diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index 7047cf96a87fb..d926f9724a22e 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -16,43 +16,19 @@ add_compile_definitions($<$>:CROSS_CO add_compile_definitions($<$>:FEATURE_NATIVE_IMAGE_GENERATION>) add_compile_definitions($<$>:SELF_NO_HOST>) -if (CLR_CMAKE_TARGET_ARCH_AMD64) - if (CLR_CMAKE_TARGET_UNIX) - add_definitions(-DDBG_TARGET_AMD64_UNIX) - endif() - add_definitions(-D_TARGET_AMD64_) - add_definitions(-D_TARGET_64BIT_) - add_definitions(-DDBG_TARGET_64BIT) - add_definitions(-DDBG_TARGET_AMD64) -elseif (CLR_CMAKE_TARGET_ARCH_ARM64) +if (CLR_CMAKE_TARGET_ARCH_ARM64) if (CLR_CMAKE_TARGET_UNIX) - add_definitions(-DDBG_TARGET_ARM64_UNIX) add_definitions(-DFEATURE_EMULATE_SINGLESTEP) endif() - add_definitions(-D_TARGET_ARM64_) - add_definitions(-D_TARGET_64BIT_) - add_definitions(-DDBG_TARGET_64BIT) - add_definitions(-DDBG_TARGET_ARM64) add_definitions(-DFEATURE_MULTIREG_RETURN) elseif (CLR_CMAKE_TARGET_ARCH_ARM) - if (CLR_CMAKE_TARGET_UNIX) - add_definitions(-DDBG_TARGET_ARM_UNIX) - elseif (WIN32 AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD) + if (WIN32 AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD) # Set this to ensure we can use Arm SDK for Desktop binary linkage when doing native (Arm32) build add_definitions(-D_ARM_WINAPI_PARTITION_DESKTOP_SDK_AVAILABLE) add_definitions(-D_ARM_WORKAROUND_) - endif (CLR_CMAKE_TARGET_UNIX) - add_definitions(-D_TARGET_ARM_) - add_definitions(-DDBG_TARGET_32BIT) - add_definitions(-DDBG_TARGET_ARM) + endif (WIN32 AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD) add_definitions(-DFEATURE_EMULATE_SINGLESTEP) -elseif (CLR_CMAKE_TARGET_ARCH_I386) - add_definitions(-D_TARGET_X86_) - add_definitions(-DDBG_TARGET_32BIT) - add_definitions(-DDBG_TARGET_X86) -else () - clr_unknown_arch() -endif (CLR_CMAKE_TARGET_ARCH_AMD64) +endif (CLR_CMAKE_TARGET_ARCH_ARM64) if (CLR_CMAKE_TARGET_UNIX) @@ -179,7 +155,6 @@ if(FEATURE_MERGE_JIT_AND_ENGINE) endif(FEATURE_MERGE_JIT_AND_ENGINE) add_compile_definitions($<$>>:FEATURE_MULTICOREJIT>) if(CLR_CMAKE_TARGET_UNIX) - add_definitions(-DFEATURE_PAL) add_definitions(-DFEATURE_PAL_ANSI) endif(CLR_CMAKE_TARGET_UNIX) if(CLR_CMAKE_TARGET_LINUX AND CLR_CMAKE_HOST_LINUX) diff --git a/src/coreclr/configurecompiler.cmake b/src/coreclr/configurecompiler.cmake index 6950be03d965d..e08eb3c30da9e 100644 --- a/src/coreclr/configurecompiler.cmake +++ b/src/coreclr/configurecompiler.cmake @@ -190,17 +190,15 @@ endif(CLR_CMAKE_HOST_FREEBSD) # Definitions (for platform) #----------------------------------- if (CLR_CMAKE_HOST_ARCH_AMD64) - add_definitions(-D_AMD64_) - add_definitions(-DBIT64) + add_definitions(-DHOST_AMD64) + add_definitions(-DHOST_64BIT) elseif (CLR_CMAKE_HOST_ARCH_I386) - add_definitions(-D_X86_) - add_definitions(-DBIT32) + add_definitions(-DHOST_X86) elseif (CLR_CMAKE_HOST_ARCH_ARM) - add_definitions(-D_ARM_) - add_definitions(-DBIT32) + add_definitions(-DHOST_ARM) elseif (CLR_CMAKE_HOST_ARCH_ARM64) - add_definitions(-D_ARM64_) - add_definitions(-DBIT64) + add_definitions(-DHOST_ARM64) + add_definitions(-DHOST_64BIT) else () clr_unknown_arch() endif () @@ -222,7 +220,7 @@ if (CLR_CMAKE_HOST_UNIX) endif(CLR_CMAKE_HOST_UNIX) if (CLR_CMAKE_HOST_UNIX) - add_definitions(-DPLATFORM_UNIX) + add_definitions(-DHOST_UNIX) if(CLR_CMAKE_HOST_DARWIN) message("Detected OSX x86_64") @@ -238,7 +236,7 @@ if (CLR_CMAKE_HOST_UNIX) endif(CLR_CMAKE_HOST_UNIX) if (WIN32) - add_definitions(-DPLATFORM_WINDOWS) + add_definitions(-DHOST_WINDOWS) # Define the CRT lib references that link into Desktop imports set(STATIC_MT_CRT_LIB "libcmt$<$,$>:d>.lib") @@ -249,12 +247,18 @@ endif(WIN32) # Architecture specific files folder name if (CLR_CMAKE_TARGET_ARCH_AMD64) set(ARCH_SOURCES_DIR amd64) + add_definitions(-DTARGET_AMD64) + add_definitions(-DTARGET_64BIT) elseif (CLR_CMAKE_TARGET_ARCH_ARM64) set(ARCH_SOURCES_DIR arm64) + add_definitions(-DTARGET_ARM64) + add_definitions(-DTARGET_64BIT) elseif (CLR_CMAKE_TARGET_ARCH_ARM) set(ARCH_SOURCES_DIR arm) + add_definitions(-DTARGET_ARM) elseif (CLR_CMAKE_TARGET_ARCH_I386) set(ARCH_SOURCES_DIR i386) + add_definitions(-DTARGET_X86) else () clr_unknown_arch() endif () @@ -348,8 +352,11 @@ if (CLR_CMAKE_HOST_UNIX) endif(CLR_CMAKE_HOST_UNIX) if(CLR_CMAKE_TARGET_UNIX) + add_definitions(-DTARGET_UNIX) # Contracts are disabled on UNIX. add_definitions(-DDISABLE_CONTRACTS) +else(CLR_CMAKE_TARGET_UNIX) + add_definitions(-DTARGET_WINDOWS) endif(CLR_CMAKE_TARGET_UNIX) if(CLR_CMAKE_HOST_UNIX_ARM) diff --git a/src/coreclr/src/ToolBox/superpmi/mcs/commandline.cpp b/src/coreclr/src/ToolBox/superpmi/mcs/commandline.cpp index 161122815a621..a6e74a9934a78 100644 --- a/src/coreclr/src/ToolBox/superpmi/mcs/commandline.cpp +++ b/src/coreclr/src/ToolBox/superpmi/mcs/commandline.cpp @@ -152,12 +152,12 @@ bool CommandLine::Parse(int argc, char* argv[], /* OUT */ Options* o) for (int i = 1; i < argc; i++) { bool isASwitch = (argv[i][0] == '-'); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (argv[i][0] == '/') // Also accept "/" on Windows { isASwitch = true; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Process a switch if (isASwitch) diff --git a/src/coreclr/src/ToolBox/superpmi/mcs/mcs.cpp b/src/coreclr/src/ToolBox/superpmi/mcs/mcs.cpp index 4ed72cac2770d..c1815341e55fe 100644 --- a/src/coreclr/src/ToolBox/superpmi/mcs/mcs.cpp +++ b/src/coreclr/src/ToolBox/superpmi/mcs/mcs.cpp @@ -23,13 +23,13 @@ int __cdecl main(int argc, char* argv[]) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (0 != PAL_Initialize(argc, argv)) { fprintf(stderr, "Error: Fail to PAL_Initialize\n"); exit(1); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX Logger::Initialize(); diff --git a/src/coreclr/src/ToolBox/superpmi/mcs/verbmerge.cpp b/src/coreclr/src/ToolBox/superpmi/mcs/verbmerge.cpp index 029b3b7d983b0..c1cb225485a7f 100644 --- a/src/coreclr/src/ToolBox/superpmi/mcs/verbmerge.cpp +++ b/src/coreclr/src/ToolBox/superpmi/mcs/verbmerge.cpp @@ -119,10 +119,10 @@ bool verbMerge::DirectoryFilterDirectories(WIN32_FIND_DATAW* findData) // 3. hidden directories // 4. "." or ".." -#ifndef FEATURE_PAL // FILE_ATTRIBUTE_REPARSE_POINT is not defined in the PAL +#ifndef TARGET_UNIX // FILE_ATTRIBUTE_REPARSE_POINT is not defined in the PAL if ((findData->dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0) return false; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if ((findData->dwFileAttributes & FILE_ATTRIBUTE_SYSTEM) != 0) return false; if ((findData->dwFileAttributes & FILE_ATTRIBUTE_HIDDEN) != 0) @@ -204,18 +204,18 @@ int verbMerge::FilterDirectory(LPCWSTR searchPattern, // NOTE: this function only works on Windows 7 and later. WIN32_FIND_DATAW findData; HANDLE hSearch; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // PAL doesn't have FindFirstFileEx(). So just use FindFirstFile(). The only reason we use // the Ex version is potentially better performance (don't populate short name; use large fetch), // not functionality. hSearch = FindFirstFileW(searchPattern, &findData); -#else // !FEATURE_PAL +#else // !TARGET_UNIX hSearch = FindFirstFileExW(searchPattern, FindExInfoBasic, // We don't care about the short names &findData, FindExSearchNameMatch, // standard name matching NULL, FIND_FIRST_EX_LARGE_FETCH); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (hSearch == INVALID_HANDLE_VALUE) { diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/asmdumper.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/asmdumper.cpp index d74902c624e5c..61cfd7edfa3d1 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/asmdumper.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/asmdumper.cpp @@ -44,9 +44,9 @@ void ASMDumper::DumpToFile(HANDLE hFile, MethodContext* mc, CompileResult* cr) #ifdef USE_MSVCDIS -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 DIS* disasm = DIS::PdisNew(DIS::distX8664); -#elif _TARGET_X86_ +#elif TARGET_X86 DIS* disasm = DIS::PdisNew(DIS::distX86); #endif size_t offset = 0; diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/callutils.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/callutils.cpp index d071e8a24c26e..9f13c50b963b2 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/callutils.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/callutils.cpp @@ -224,7 +224,7 @@ bool CallUtils::HasRetBuffArg(MethodContext* mc, CORINFO_SIG_INFO args) return false; } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // We don't need a return buffer if: // i) TYP_STRUCT argument that can fit into a single register and // ii) Power of two sized TYP_STRUCT on AMD64. diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.cpp index d8ce7689da44c..b8c006040c4f2 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.cpp @@ -44,10 +44,10 @@ CompileResult::~CompileResult() if (CallTargetTypes != nullptr) delete CallTargetTypes; -#ifndef FEATURE_PAL // PAL doesn't have HeapDestroy() +#ifndef TARGET_UNIX // PAL doesn't have HeapDestroy() if (codeHeap != nullptr) ::HeapDestroy(codeHeap); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } // Is the CompileResult empty? Define this as whether all the maps that store information given by the JIT are empty. @@ -721,7 +721,7 @@ void CompileResult::applyRelocs(unsigned char* block1, ULONG blocksize1, void* o switch (tmp.fRelocType) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) case IMAGE_REL_BASED_HIGHLOW: { DWORDLONG fixupLocation = tmp.location; @@ -735,9 +735,9 @@ void CompileResult::applyRelocs(unsigned char* block1, ULONG blocksize1, void* o } } break; -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) case IMAGE_REL_BASED_REL32: { DWORDLONG target = tmp.target + tmp.addlDelta; @@ -745,7 +745,7 @@ void CompileResult::applyRelocs(unsigned char* block1, ULONG blocksize1, void* o DWORDLONG baseAddr = fixupLocation + sizeof(INT32); INT64 delta = (INT64)((BYTE*)target - baseAddr); -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) if (delta != (INT64)(int)delta) { // This isn't going to fit in a signed 32-bit address. Use something that will fit, @@ -759,11 +759,11 @@ void CompileResult::applyRelocs(unsigned char* block1, ULONG blocksize1, void* o delta = newdelta; } -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) if (delta != (INT64)(int)delta) { -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) LogError("REL32 relocation overflows field! delta=0x%016llX", delta); #else LogError("REL32 relocation overflows field! delta=0x%08X", delta); @@ -780,9 +780,9 @@ void CompileResult::applyRelocs(unsigned char* block1, ULONG blocksize1, void* o } } break; -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_ARM_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) case IMAGE_REL_BASED_DIR64: { DWORDLONG fixupLocation = tmp.location + tmp.slotNum; @@ -797,15 +797,15 @@ void CompileResult::applyRelocs(unsigned char* block1, ULONG blocksize1, void* o } } break; -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case IMAGE_REL_ARM64_BRANCH26: // 26 bit offset << 2 & sign ext, for B and BL case IMAGE_REL_ARM64_PAGEBASE_REL21: case IMAGE_REL_ARM64_PAGEOFFSET_12A: LogError("Unimplemented reloc type %u", tmp.fRelocType); break; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 default: LogError("Unknown reloc type %u", tmp.fRelocType); diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/logging.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/logging.cpp index bd7de08da136b..3a71df168ccf8 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/logging.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/logging.cpp @@ -235,7 +235,7 @@ void Logger::LogVprintf( if (s_logFile != INVALID_HANDLE_VALUE) { -#ifndef FEATURE_PAL // TODO: no localtime_s() or strftime() in PAL +#ifndef TARGET_UNIX // TODO: no localtime_s() or strftime() in PAL tm timeInfo; errno_t err = localtime_s(&timeInfo, ×tamp); if (err != 0) @@ -254,9 +254,9 @@ void Logger::LogVprintf( timeStrBuffSize *= 2; timeStr = (char*)realloc(timeStr, timeStrBuffSize); } -#else // FEATURE_PAL +#else // TARGET_UNIX const char* timeStr = ""; -#endif // FEATURE_PAL +#endif // TARGET_UNIX const char logEntryFmtStr[] = "%s - %s [%s:%d] - %s - %s\r\n"; size_t logEntryBuffSize = sizeof(logEntryFmtStr) + strlen(timeStr) + strlen(function) + strlen(file) + 10 + @@ -277,15 +277,15 @@ void Logger::LogVprintf( delete[] logEntry; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX free((void*)timeStr); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX CleanUp: -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX LeaveCriticalSection(&s_critSec); delete[] fullMsg; diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp index 12686514eb593..0523e77d9c81c 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp @@ -3988,7 +3988,7 @@ void MethodContext::repGetEEInfo(CORINFO_EE_INFO* pEEInfoOut) pEEInfoOut->osPageSize = (size_t)0x1000; pEEInfoOut->maxUncheckedOffsetForNullObject = (size_t)((32 * 1024) - 1); pEEInfoOut->targetAbi = CORINFO_DESKTOP_ABI; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX pEEInfoOut->osType = CORINFO_UNIX; #else pEEInfoOut->osType = CORINFO_WINNT; @@ -6400,7 +6400,7 @@ int MethodContext::dumpMethodMD5HashToBuffer(char* buff, int len, bool ignoreMet int MethodContext::dumpMD5HashToBuffer(BYTE* pBuffer, int bufLen, char* hash, int hashLen) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX MD5HASHDATA md5_hashdata; MD5 md5_hasher; @@ -6420,7 +6420,7 @@ int MethodContext::dumpMD5HashToBuffer(BYTE* pBuffer, int bufLen, char* hash, in return MD5_HASH_BUFFER_SIZE; // if we had success we wrote MD5_HASH_BUFFER_SIZE bytes to the buffer -#else // !FEATURE_PAL +#else // !TARGET_UNIX HCRYPTPROV hProv = NULL; // CryptoProvider HCRYPTHASH hHash = NULL; @@ -6467,7 +6467,7 @@ int MethodContext::dumpMD5HashToBuffer(BYTE* pBuffer, int bufLen, char* hash, in CryptReleaseContext(hProv, 0); return -1; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } MethodContext::Environment MethodContext::cloneEnvironment() diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/runtimedetails.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/runtimedetails.h index a21c8e988af4d..cacd2c4e1fd1f 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/runtimedetails.h +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/runtimedetails.h @@ -12,11 +12,11 @@ // Our little collection of enough of the CLR data to get the JIT up and working... #define FEATURE_CLRSQM -#if !defined(_TARGET_AMD64_) && !defined(_TARGET_X86_) && !defined(_TARGET_ARM64_) && !defined(_TARGET_ARM_) +#if !defined(TARGET_AMD64) && !defined(TARGET_X86) && !defined(TARGET_ARM64) && !defined(TARGET_ARM) #if defined(_M_X64) -#define _TARGET_AMD64_ 1 +#define TARGET_AMD64 1 #elif defined(_M_IX86) -#define _TARGET_X86_ 1 +#define TARGET_X86 1 #endif #endif // _TARGET_* not previously defined diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/spmiutil.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/spmiutil.cpp index 79caf6020d8a7..80a552f8fc45c 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/spmiutil.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/spmiutil.cpp @@ -86,7 +86,7 @@ WCHAR* GetEnvironmentVariableWithDefaultW(const WCHAR* envVarName, const WCHAR* return retString; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // For some reason, the PAL doesn't have GetCommandLineA(). So write it. LPSTR GetCommandLineA() { @@ -116,7 +116,7 @@ LPSTR GetCommandLineA() return pCmdLine; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX bool LoadRealJitLib(HMODULE& jitLib, WCHAR* jitLibPath) { @@ -214,11 +214,11 @@ WCHAR* GetResultFileName(const WCHAR* folderPath, const WCHAR* fileName, const W { unsigned randomNumber = 0; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_Random(&randomNumber, sizeof(randomNumber)); -#else // !FEATURE_PAL +#else // !TARGET_UNIX rand_s(&randomNumber); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX WCHAR randomString[randomStringLength + 1]; swprintf_s(randomString, randomStringLength + 1, W("%08X"), randomNumber); diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/spmiutil.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/spmiutil.h index 5ae422ffc48fb..c66e69f0fd6ab 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/spmiutil.h +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/spmiutil.h @@ -19,9 +19,9 @@ char* GetEnvironmentVariableWithDefaultA(const char* envVarName, const char* def WCHAR* GetEnvironmentVariableWithDefaultW(const WCHAR* envVarName, const WCHAR* defaultValue = nullptr); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX LPSTR GetCommandLineA(); -#endif // FEATURE_PAL +#endif // TARGET_UNIX bool LoadRealJitLib(HMODULE& realJit, WCHAR* realJitPath); diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/standardpch.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/standardpch.h index 65a4f321f9469..8aa6a1acbbddc 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/standardpch.h +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/standardpch.h @@ -64,16 +64,16 @@ #include // Getting STL to work with PAL is difficult, so reimplement STL functionality to not require it. -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include "clr_std/string" #include "clr_std/algorithm" -#else // !FEATURE_PAL +#else // !TARGET_UNIX #ifndef USE_STL #define USE_STL #endif // USE_STL #include #include -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef USE_MSVCDIS #define DISLIB @@ -90,22 +90,22 @@ #endif #ifndef W -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX #define W(str) u##str -#else // PLATFORM_UNIX +#else // TARGET_UNIX #define W(str) L##str -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX #endif // !W #ifndef DIRECTORY_SEPARATOR_STR_W #define DIRECTORY_SEPARATOR_STR_W W("\\") #endif -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #define PLATFORM_SHARED_LIB_SUFFIX_A PAL_SHLIB_SUFFIX -#else // !FEATURE_PAL +#else // !TARGET_UNIX #define PLATFORM_SHARED_LIB_SUFFIX_A ".dll" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #define DEFAULT_REAL_JIT_NAME_A MAKEDLLNAME_A("clrjit2") #define DEFAULT_REAL_JIT_NAME_W MAKEDLLNAME_W("clrjit2") diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/typeutils.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/typeutils.cpp index 9b3ad4c88a068..fc6d763f709f4 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/typeutils.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/typeutils.cpp @@ -71,7 +71,7 @@ const char* TypeUtils::GetCorInfoTypeName(CorInfoType type) case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: // Emulates the JIT's concept of TYP_I_IMPL -#if defined(_TARGET_AMD64_) // TODO: should be _TARGET_64BIT_ +#if defined(TARGET_AMD64) // TODO: should be TARGET_64BIT return "long"; #else return "int"; @@ -108,7 +108,7 @@ bool TypeUtils::IsValueClass(CorInfoType type) // by reference (i.e. it cannot be stuffed as-is into a register or stack slot). bool TypeUtils::ValueClassRequiresByref(MethodContext* mc, CORINFO_CLASS_HANDLE clsHnd) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) size_t size = mc->repGetClassSize(clsHnd); return ((size > sizeof(void*)) || ((size & (size - 1)) != 0)); #else diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp index cad8ac1cfbffa..1a94cdb1509ab 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp @@ -83,7 +83,7 @@ void SetLogFilePath() } extern "C" -#ifdef FEATURE_PAL +#ifdef HOST_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL @@ -92,13 +92,13 @@ extern "C" switch (ul_reason_for_call) { case DLL_PROCESS_ATTACH: -#ifdef FEATURE_PAL +#ifdef HOST_UNIX if (0 != PAL_InitializeDLL()) { fprintf(stderr, "Error: Fail to PAL_InitializeDLL\n"); exit(1); } -#endif // FEATURE_PAL +#endif // HOST_UNIX Logger::Initialize(); SetLogFilePath(); diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-counter/superpmi-shim-counter.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-counter/superpmi-shim-counter.cpp index 37fe7252ac68e..6bc7d3f4b7f1f 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-counter/superpmi-shim-counter.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-counter/superpmi-shim-counter.cpp @@ -69,7 +69,7 @@ void SetLogFilePath() } extern "C" -#ifdef FEATURE_PAL +#ifdef HOST_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL @@ -78,13 +78,13 @@ extern "C" switch (ul_reason_for_call) { case DLL_PROCESS_ATTACH: -#ifdef FEATURE_PAL +#ifdef HOST_UNIX if (0 != PAL_InitializeDLL()) { fprintf(stderr, "Error: Fail to PAL_InitializeDLL\n"); exit(1); } -#endif // FEATURE_PAL +#endif // HOST_UNIX Logger::Initialize(); SetLogFilePath(); diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-simple/superpmi-shim-simple.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-simple/superpmi-shim-simple.cpp index 897880aef7cb0..a2fbaf0ef9773 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-simple/superpmi-shim-simple.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-simple/superpmi-shim-simple.cpp @@ -59,7 +59,7 @@ void SetLogFilePath() } extern "C" -#ifdef FEATURE_PAL +#ifdef HOST_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL @@ -68,13 +68,13 @@ extern "C" switch (ul_reason_for_call) { case DLL_PROCESS_ATTACH: -#ifdef FEATURE_PAL +#ifdef HOST_UNIX if (0 != PAL_InitializeDLL()) { fprintf(stderr, "Error: Fail to PAL_InitializeDLL\n"); exit(1); } -#endif // FEATURE_PAL +#endif // HOST_UNIX Logger::Initialize(); SetLogFilePath(); diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/commandline.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/commandline.cpp index 0b4f0f9ce83de..a504729abb27c 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi/commandline.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi/commandline.cpp @@ -201,12 +201,12 @@ bool CommandLine::Parse(int argc, char* argv[], /* OUT */ Options* o) for (int i = 1; i < argc; i++) { bool isASwitch = (argv[i][0] == '-'); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (argv[i][0] == '/') // Also accept "/" on Windows { isASwitch = true; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Process a switch if (isASwitch) @@ -596,12 +596,12 @@ bool CommandLine::Parse(int argc, char* argv[], /* OUT */ Options* o) if (o->targetArchitecture != nullptr) { const char* errorMessage = nullptr; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (0 != _stricmp(o->targetArchitecture, "arm64")) { errorMessage = "Illegal target architecture specified with -target (only arm64 is supported on x64 host)."; } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) if (0 != _stricmp(o->targetArchitecture, "arm")) { errorMessage = "Illegal target architecture specified with -target (only arm is supported on x86 host)."; diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp index 3441850ed2eae..630ca18c69b63 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp @@ -1880,13 +1880,13 @@ void MyICJI::getModuleNativeEntryPointRange(void** pStart, /* OUT */ // DWORD MyICJI::getExpectedTargetArchitecture() { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return IMAGE_FILE_MACHINE_I386; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) return IMAGE_FILE_MACHINE_AMD64; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return IMAGE_FILE_MACHINE_ARMNT; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return IMAGE_FILE_MACHINE_ARM64; #else return IMAGE_FILE_MACHINE_UNKNOWN; diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/ieememorymanager.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/ieememorymanager.cpp index b8d5f152af766..b12735019e354 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi/ieememorymanager.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi/ieememorymanager.cpp @@ -98,9 +98,9 @@ HANDLE STDMETHODCALLTYPE MyIEEMM::ClrGetProcessExecutableHeap() if (processHeap == INVALID_HANDLE_VALUE) { DWORD flOptions = 0; -#ifndef FEATURE_PAL // TODO-Review: PAL doesn't have HEAP_CREATE_ENABLE_EXECUTE. Is this ok? +#ifndef TARGET_UNIX // TODO-Review: PAL doesn't have HEAP_CREATE_ENABLE_EXECUTE. Is this ok? flOptions = HEAP_CREATE_ENABLE_EXECUTE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX processHeap = HeapCreate(flOptions, 10000, 0); } return processHeap; diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/jitdebugger.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/jitdebugger.cpp index 9de6b9cc16d11..75afd87b3aeca 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi/jitdebugger.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi/jitdebugger.cpp @@ -18,9 +18,9 @@ // JIT debugging is broken due to utilcode changes to support LongFile. We need to re-copy // or adjust the implementation of the below functions so they link properly. #if 0 -#ifndef FEATURE_PAL // No just-in-time debugger under PAL +#ifndef TARGET_UNIX // No just-in-time debugger under PAL #define FEATURE_JIT_DEBUGGING -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // 0 #ifndef FEATURE_JIT_DEBUGGING diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/jitinstance.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/jitinstance.cpp index 669e459553528..36bfda31d20e5 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi/jitinstance.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi/jitinstance.cpp @@ -131,7 +131,7 @@ HRESULT JitInstance::StartUp(char* PathToJit, bool copyJit, bool breakOnDebugBre else PathToTempJit = PathToOriginalJit; -#ifndef FEATURE_PAL // No file version APIs in the PAL +#ifndef TARGET_UNIX // No file version APIs in the PAL // Do a quick version check DWORD dwHandle = 0; DWORD fviSize = GetFileVersionInfoSizeA(PathToTempJit, &dwHandle); @@ -156,7 +156,7 @@ HRESULT JitInstance::StartUp(char* PathToJit, bool copyJit, bool breakOnDebugBre } delete[] fviData; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Load Library hLib = ::LoadLibraryA(PathToTempJit); @@ -320,12 +320,12 @@ JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, i if (jitResult == CORJIT_SKIPPED) { // For altjit, treat SKIPPED as OK -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (SpmiTargetArchitecture == SPMI_TARGET_ARCHITECTURE_ARM64) { jitResult = CORJIT_OK; } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) if (SpmiTargetArchitecture == SPMI_TARGET_ARCHITECTURE_ARM) { jitResult = CORJIT_OK; diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/neardiffer.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/neardiffer.cpp index a94e1e1df72ec..d5d8586f7e375 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi/neardiffer.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi/neardiffer.cpp @@ -68,7 +68,7 @@ bool NearDiffer::InitAsmDiff() if (UseCoreDisTools) { const WCHAR* coreDisToolsLibrary = MAKEDLLNAME_W("coredistools"); -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX // Unix will require the full path to coredistools. Assume that the // location is next to the full path to the superpmi.so. @@ -90,7 +90,7 @@ bool NearDiffer::InitAsmDiff() const WCHAR* coreDisToolsLibraryName = MAKEDLLNAME_W("coredistools"); ::wcscpy_s(ptr, &coreCLRLoadedPath[MAX_LONGPATH] - ptr, coreDisToolsLibraryName); coreDisToolsLibrary = coreCLRLoadedPath; -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX HMODULE hCoreDisToolsLib = ::LoadLibraryW(coreDisToolsLibrary); if (hCoreDisToolsLib == 0) @@ -125,12 +125,12 @@ bool NearDiffer::InitAsmDiff() } TargetArch coreDisTargetArchitecture = Target_Host; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if ((TargetArchitecture != nullptr) && (0 == _stricmp(TargetArchitecture, "arm64"))) { coreDisTargetArchitecture = Target_Arm64; } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) if ((TargetArchitecture != nullptr) && (0 == _stricmp(TargetArchitecture, "arm"))) { coreDisTargetArchitecture = Target_Thumb; @@ -190,7 +190,7 @@ DIS* NearDiffer::GetMsVcDis() { DIS* disasm; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if ((TargetArchitecture != nullptr) && (0 == _stricmp(TargetArchitecture, "arm64"))) { disasm = DIS::PdisNew(DIS::distArm64); @@ -199,7 +199,7 @@ DIS* NearDiffer::GetMsVcDis() { disasm = DIS::PdisNew(DIS::distX8664); } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) disasm = DIS::PdisNew(DIS::distX86); #endif @@ -665,11 +665,11 @@ bool NearDiffer::compareCodeSection(MethodContext* mc, size_t gOffset2 = (size_t)originalBlock2 + offset + (size_t)ops_2[i].dwl; LogVerbose("operand %d dwl is different", i); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 LogVerbose("gOffset1 %016llX", gOffset1); LogVerbose("gOffset2 %016llX", gOffset2); LogVerbose("gOffset1 - gOffset2 %016llX", gOffset1 - gOffset2); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) LogVerbose("gOffset1 %08X", gOffset1); LogVerbose("gOffset2 %08X", gOffset2); LogVerbose("gOffset1 - gOffset2 %08X", gOffset1 - gOffset2); @@ -702,11 +702,11 @@ bool NearDiffer::compareCodeSection(MethodContext* mc, LogVerbose("otherCodeBlockSize1 %08X", otherCodeBlockSize1); LogVerbose("otherCodeBlockSize2 %08X", otherCodeBlockSize2); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 LogVerbose("offset %016llX", offset); LogVerbose("addr1 %016llX", (size_t)originalBlock1 + offset); LogVerbose("addr2 %016llX", (size_t)originalBlock2 + offset); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) LogVerbose("offset %08X", offset); LogVerbose("addr1 %08X", (size_t)originalBlock1 + offset); LogVerbose("addr2 %08X", (size_t)originalBlock2 + offset); diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/parallelsuperpmi.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/parallelsuperpmi.cpp index cd7559f42f336..88dd861ee48ec 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi/parallelsuperpmi.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi/parallelsuperpmi.cpp @@ -277,7 +277,7 @@ void ProcessChildStdOut(const CommandLine::Options& o, } } -#ifndef FEATURE_PAL // TODO-Porting: handle Ctrl-C signals gracefully on Unix +#ifndef TARGET_UNIX // TODO-Porting: handle Ctrl-C signals gracefully on Unix BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) { // Since the child SuperPMI.exe processes share the same console @@ -286,7 +286,7 @@ BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) closeRequested = true; // set a flag to indicate we need to quit return TRUE; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX int __cdecl compareInt(const void* arg1, const void* arg2) { @@ -410,14 +410,14 @@ int doParallelSuperPMI(CommandLine::Options& o) SimpleTimer st; st.Start(); -#ifndef FEATURE_PAL // TODO-Porting: handle Ctrl-C signals gracefully on Unix +#ifndef TARGET_UNIX // TODO-Porting: handle Ctrl-C signals gracefully on Unix // Register a ConsoleCtrlHandler if (!SetConsoleCtrlHandler(CtrlHandler, TRUE)) { LogError("Failed to set control handler."); return 1; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX char tempPath[MAX_PATH]; if (!GetTempPath(MAX_PATH, tempPath)) @@ -469,11 +469,11 @@ int doParallelSuperPMI(CommandLine::Options& o) // Add a random number to the temporary file names to allow multiple parallel SuperPMI to happen at once. unsigned int randNumber = 0; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_Random(&randNumber, sizeof(randNumber)); -#else // !FEATURE_PAL +#else // !TARGET_UNIX rand_s(&randNumber); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX for (int i = 0; i < o.workerCount; i++) { diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/superpmi.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/superpmi.cpp index 514ae5f9928b0..d52d9da4999c1 100644 --- a/src/coreclr/src/ToolBox/superpmi/superpmi/superpmi.cpp +++ b/src/coreclr/src/ToolBox/superpmi/superpmi/superpmi.cpp @@ -35,7 +35,7 @@ SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture; void SetSuperPmiTargetArchitecture(const char* targetArchitecture) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if ((targetArchitecture != nullptr) && (0 == _stricmp(targetArchitecture, "arm64"))) { SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM64; @@ -44,7 +44,7 @@ void SetSuperPmiTargetArchitecture(const char* targetArchitecture) { SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_AMD64; } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) if ((targetArchitecture != nullptr) && (0 == _stricmp(targetArchitecture, "arm"))) { SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM; @@ -53,9 +53,9 @@ void SetSuperPmiTargetArchitecture(const char* targetArchitecture) { SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_X86; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM64; #else #error Unsupported architecture @@ -136,13 +136,13 @@ void InvokeNearDiffer(NearDiffer* nearDiffer, // 3 : there were missing values in method context int __cdecl main(int argc, char* argv[]) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (0 != PAL_Initialize(argc, argv)) { fprintf(stderr, "Error: Fail to PAL_Initialize\n"); return (int)SpmiResult::GeneralFailure; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX Logger::Initialize(); diff --git a/src/coreclr/src/binder/assembly.cpp b/src/coreclr/src/binder/assembly.cpp index 3cefbc726d65f..9000e9095b3a0 100644 --- a/src/coreclr/src/binder/assembly.cpp +++ b/src/coreclr/src/binder/assembly.cpp @@ -152,13 +152,13 @@ namespace BINDER_SPACE /* static */ PEKIND Assembly::GetSystemArchitecture() { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return peI386; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) return peAMD64; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return peARM; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return peARM64; #else PORTABILITY_ASSERT("Assembly::GetSystemArchitecture"); diff --git a/src/coreclr/src/binder/utils.cpp b/src/coreclr/src/binder/utils.cpp index 25610ce86fa85..27f1d4c6b3f6d 100644 --- a/src/coreclr/src/binder/utils.cpp +++ b/src/coreclr/src/binder/utils.cpp @@ -23,11 +23,11 @@ namespace BINDER_SPACE { inline const WCHAR *GetPlatformPathSeparator() { -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX return W("/"); #else return W("\\"); -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX } } @@ -49,7 +49,7 @@ namespace BINDER_SPACE } i = urlOrPath.Begin(); -#if !defined(PLATFORM_UNIX) +#if !defined(TARGET_UNIX) if (i[0] == W('/')) { // Disk path file:/// diff --git a/src/coreclr/src/classlibnative/bcltype/arraynative.cpp b/src/coreclr/src/classlibnative/bcltype/arraynative.cpp index 65b8b677796b5..56d85cebe2bb6 100644 --- a/src/coreclr/src/classlibnative/bcltype/arraynative.cpp +++ b/src/coreclr/src/classlibnative/bcltype/arraynative.cpp @@ -60,7 +60,7 @@ void ArrayInitializeWorker(ARRAYBASEREF * arrayRef, PCODE ctorFtn = pCanonMT->GetSlot(slot); -#if defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if defined(TARGET_X86) && !defined(TARGET_UNIX) BEGIN_CALL_TO_MANAGED(); @@ -91,7 +91,7 @@ void ArrayInitializeWorker(ARRAYBASEREF * arrayRef, } END_CALL_TO_MANAGED(); -#else // _TARGET_X86_ && !FEATURE_PAL +#else // TARGET_X86 && !TARGET_UNIX // // This is quite a bit slower, but it is portable. // @@ -115,7 +115,7 @@ void ArrayInitializeWorker(ARRAYBASEREF * arrayRef, offset += size; } -#endif // !_TARGET_X86_ || FEATURE_PAL +#endif // !TARGET_X86 || TARGET_UNIX } diff --git a/src/coreclr/src/classlibnative/bcltype/arraynative.inl b/src/coreclr/src/classlibnative/bcltype/arraynative.inl index 5928f9d6e0fa6..516eb9235f0d7 100644 --- a/src/coreclr/src/classlibnative/bcltype/arraynative.inl +++ b/src/coreclr/src/classlibnative/bcltype/arraynative.inl @@ -58,7 +58,7 @@ FORCEINLINE void InlinedForwardGCSafeCopyHelper(void *dest, const void *src, siz ++dptr; } -#if defined(_AMD64_) && (defined(_MSC_VER) || defined(__GNUC__)) +#if defined(HOST_AMD64) && (defined(_MSC_VER) || defined(__GNUC__)) if ((len & (2 * sizeof(SIZE_T))) != 0) { __m128 v = _mm_loadu_ps((float *)sptr); @@ -104,7 +104,7 @@ FORCEINLINE void InlinedForwardGCSafeCopyHelper(void *dest, const void *src, siz { return; } -#else // !(defined(_AMD64_) && (defined(_MSC_VER) || defined(__GNUC__))) +#else // !(defined(HOST_AMD64) && (defined(_MSC_VER) || defined(__GNUC__))) if ((len & (2 * sizeof(SIZE_T))) != 0) { // Read two values and write two values to hint the use of wide loads and stores @@ -144,7 +144,7 @@ FORCEINLINE void InlinedForwardGCSafeCopyHelper(void *dest, const void *src, siz sptr += 4; dptr += 4; } -#endif // defined(_AMD64_) && (defined(_MSC_VER) || defined(__GNUC__)) +#endif // defined(HOST_AMD64) && (defined(_MSC_VER) || defined(__GNUC__)) } } @@ -193,7 +193,7 @@ FORCEINLINE void InlinedBackwardGCSafeCopyHelper(void *dest, const void *src, si } } -#if defined(_AMD64_) && (defined(_MSC_VER) || defined(__GNUC__)) +#if defined(HOST_AMD64) && (defined(_MSC_VER) || defined(__GNUC__)) if ((len & (2 * sizeof(SIZE_T))) != 0) { sptr -= 2; @@ -242,7 +242,7 @@ FORCEINLINE void InlinedBackwardGCSafeCopyHelper(void *dest, const void *src, si { return; } -#else // !(defined(_AMD64_) && (defined(_MSC_VER) || defined(__GNUC__))) +#else // !(defined(HOST_AMD64) && (defined(_MSC_VER) || defined(__GNUC__))) if ((len & (2 * sizeof(SIZE_T))) != 0) { sptr -= 2; @@ -281,7 +281,7 @@ FORCEINLINE void InlinedBackwardGCSafeCopyHelper(void *dest, const void *src, si len -= 4 * sizeof(SIZE_T); } while (len != 0); return; -#endif // defined(_AMD64_) && (defined(_MSC_VER) || defined(__GNUC__)) +#endif // defined(HOST_AMD64) && (defined(_MSC_VER) || defined(__GNUC__)) } } diff --git a/src/coreclr/src/classlibnative/bcltype/stringnative.cpp b/src/coreclr/src/classlibnative/bcltype/stringnative.cpp index 8f3946a3155d3..9e093d43a497c 100644 --- a/src/coreclr/src/classlibnative/bcltype/stringnative.cpp +++ b/src/coreclr/src/classlibnative/bcltype/stringnative.cpp @@ -26,7 +26,7 @@ // Compile the string functionality with these pragma flags (equivalent of the command line /Ox flag) // Compiling this functionality differently gives us significant throughout gain in some cases. -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("tgy", on) #endif @@ -95,6 +95,6 @@ FCIMPL2(VOID, COMString::FCSetTrailByte, StringObject* thisRefUNSAFE, UINT8 bDat FCIMPLEND // Revert to command line compilation flags -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize ("", on) #endif diff --git a/src/coreclr/src/classlibnative/bcltype/stringnative.h b/src/coreclr/src/classlibnative/bcltype/stringnative.h index dfd439127bf02..6aea696e41af7 100644 --- a/src/coreclr/src/classlibnative/bcltype/stringnative.h +++ b/src/coreclr/src/classlibnative/bcltype/stringnative.h @@ -35,7 +35,7 @@ // Compile the string functionality with these pragma flags (equivalent of the command line /Ox flag) // Compiling this functionality differently gives us significant throughout gain in some cases. -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("tgy", on) #endif @@ -57,7 +57,7 @@ class COMString { }; // Revert to command line compilation flags -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize ("", on) #endif diff --git a/src/coreclr/src/classlibnative/bcltype/system.cpp b/src/coreclr/src/classlibnative/bcltype/system.cpp index 18621145cfb91..631a61d9462b2 100644 --- a/src/coreclr/src/classlibnative/bcltype/system.cpp +++ b/src/coreclr/src/classlibnative/bcltype/system.cpp @@ -31,7 +31,7 @@ #include "array.h" #include "eepolicy.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX typedef void(WINAPI *pfnGetSystemTimeAsFileTime)(LPFILETIME lpSystemTimeAsFileTime); extern pfnGetSystemTimeAsFileTime g_pfnGetSystemTimeAsFileTime; @@ -82,14 +82,14 @@ void WINAPI InitializeGetSystemTimeAsFileTime(LPFILETIME lpSystemTimeAsFileTime) } pfnGetSystemTimeAsFileTime g_pfnGetSystemTimeAsFileTime = &InitializeGetSystemTimeAsFileTime; -#endif // FEATURE_PAL +#endif // TARGET_UNIX FCIMPL0(INT64, SystemNative::__GetSystemTimeAsFileTime) { FCALL_CONTRACT; INT64 timestamp; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX g_pfnGetSystemTimeAsFileTime((FILETIME*)×tamp); #else GetSystemTimeAsFileTime((FILETIME*)×tamp); @@ -104,7 +104,7 @@ FCIMPL0(INT64, SystemNative::__GetSystemTimeAsFileTime) FCIMPLEND; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX FCIMPL1(VOID, SystemNative::GetSystemTimeWithLeapSecondsHandling, FullSystemTime *time) { @@ -181,7 +181,7 @@ FCIMPL2(FC_BOOL_RET, SystemNative::SystemTimeToFileTime, SYSTEMTIME *time, INT64 FC_RETURN_BOOL(ret); } FCIMPLEND; -#endif // FEATURE_PAL +#endif // TARGET_UNIX FCIMPL0(UINT32, SystemNative::GetTickCount) @@ -333,14 +333,14 @@ INT32 QCALLTYPE SystemNative::GetProcessorCount() BEGIN_QCALL; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX CPUGroupInfo::EnsureInitialized(); if(CPUGroupInfo::CanEnableThreadUseAllCpuGroups()) { processorCount = CPUGroupInfo::GetNumActiveProcessors(); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Processor count will be 0 if CPU groups are disabled/not supported if(processorCount == 0) { @@ -352,7 +352,7 @@ INT32 QCALLTYPE SystemNative::GetProcessorCount() processorCount = systemInfo.dwNumberOfProcessors; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX uint32_t cpuLimit; if (PAL_GetCpuLimit(&cpuLimit) && cpuLimit < (uint32_t)processorCount) @@ -475,7 +475,7 @@ void SystemNative::GenericFailFast(STRINGREF refMesgString, EXCEPTIONREF refExce Thread *pThread = GetThread(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // If we have the exception object, then try to setup // the watson bucket if it has any details. // On CoreCLR, Watson may not be enabled. Thus, we should @@ -494,7 +494,7 @@ void SystemNative::GenericFailFast(STRINGREF refMesgString, EXCEPTIONREF refExce } } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // stash the user-provided exception object. this will be used as // the inner exception object to the FatalExecutionEngineException. diff --git a/src/coreclr/src/classlibnative/bcltype/system.h b/src/coreclr/src/classlibnative/bcltype/system.h index 83dde4b9346b3..d2132e22109f6 100644 --- a/src/coreclr/src/classlibnative/bcltype/system.h +++ b/src/coreclr/src/classlibnative/bcltype/system.h @@ -44,12 +44,12 @@ class SystemNative public: // Functions on the System.Environment class -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static FCDECL1(VOID, GetSystemTimeWithLeapSecondsHandling, FullSystemTime *time); static FCDECL2(FC_BOOL_RET, ValidateSystemTime, SYSTEMTIME *time, CLR_BOOL localTime); static FCDECL2(FC_BOOL_RET, FileTimeToSystemTime, INT64 fileTime, FullSystemTime *time); static FCDECL2(FC_BOOL_RET, SystemTimeToFileTime, SYSTEMTIME *time, INT64 *pFileTime); -#endif // FEATURE_PAL +#endif // TARGET_UNIX static FCDECL0(INT64, __GetSystemTimeAsFileTime); static FCDECL0(UINT32, GetTickCount); static FCDECL0(UINT64, GetTickCount64); diff --git a/src/coreclr/src/classlibnative/bcltype/varargsnative.cpp b/src/coreclr/src/classlibnative/bcltype/varargsnative.cpp index 463179d0d2c06..23ba5e7ec9018 100644 --- a/src/coreclr/src/classlibnative/bcltype/varargsnative.cpp +++ b/src/coreclr/src/classlibnative/bcltype/varargsnative.cpp @@ -24,7 +24,7 @@ // so if you change this implementation be sure to update the debugger's version as well. static void AdjustArgPtrForAlignment(VARARGS *pData, size_t cbArg) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Only 64-bit primitives or value types with embedded 64-bit primitives are aligned on 64-bit boundaries. if (cbArg < 8) return; @@ -50,7 +50,7 @@ static void AdjustArgPtrForAlignment(VARARGS *pData, size_t cbArg) // One of the primitive 64-bit types } pData->ArgPtr = (BYTE*)ALIGN_UP(pData->ArgPtr, 8); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } //////////////////////////////////////////////////////////////////////////////// @@ -83,7 +83,7 @@ static void InitCommon(VARARGS *data, VASigCookie** cookie) // Get a pointer to the cookie arg. data->ArgPtr = (BYTE *) cookie; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK // ;; lower memory @@ -530,7 +530,7 @@ VarArgsNative::GetNextArgHelper( case ELEMENT_TYPE_U8: case ELEMENT_TYPE_R8: value->type = MscorlibBinder::GetElementType(elemType); -#if !defined(BIT64) && (DATA_ALIGNMENT > 4) +#if !defined(HOST_64BIT) && (DATA_ALIGNMENT > 4) if ( fData && origArgPtr == value->data ) { // allocate an aligned copy of the value value->data = value->type.AsMethodTable()->Box(origArgPtr, FALSE)->UnBox(); diff --git a/src/coreclr/src/classlibnative/float/floatdouble.cpp b/src/coreclr/src/classlibnative/float/floatdouble.cpp index e0d0fd95e6765..cd38648bab4c9 100644 --- a/src/coreclr/src/classlibnative/float/floatdouble.cpp +++ b/src/coreclr/src/classlibnative/float/floatdouble.cpp @@ -124,7 +124,7 @@ FCIMPL1_V(double, COMDouble::Cbrt, double x) return (double)cbrt(x); FCIMPLEND -#if defined(_MSC_VER) && defined(_TARGET_AMD64_) +#if defined(_MSC_VER) && defined(TARGET_AMD64) // The /fp:fast form of `ceil` for AMD64 does not correctly handle: `-1.0 < value <= -0.0` // https://github.com/dotnet/coreclr/issues/19739 #pragma float_control(push) @@ -140,7 +140,7 @@ FCIMPL1_V(double, COMDouble::Ceil, double x) return (double)ceil(x); FCIMPLEND -#if defined(_MSC_VER) && defined(_TARGET_AMD64_) +#if defined(_MSC_VER) && defined(TARGET_AMD64) #pragma float_control(pop) #endif @@ -171,7 +171,7 @@ FCIMPL1_V(double, COMDouble::Exp, double x) return (double)exp(x); FCIMPLEND -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) // The /fp:fast form of `floor` for x86 does not correctly handle: `-0.0` // https://github.com/dotnet/coreclr/issues/19739 #pragma float_control(push) @@ -187,7 +187,7 @@ FCIMPL1_V(double, COMDouble::Floor, double x) return (double)floor(x); FCIMPLEND -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma float_control(pop) #endif diff --git a/src/coreclr/src/classlibnative/float/floatsingle.cpp b/src/coreclr/src/classlibnative/float/floatsingle.cpp index 39b9fd6b6fe37..f6c949f03f36d 100644 --- a/src/coreclr/src/classlibnative/float/floatsingle.cpp +++ b/src/coreclr/src/classlibnative/float/floatsingle.cpp @@ -13,7 +13,7 @@ // define _isnan() and _copysign(). We will redirect the macros to these other functions if // the macro is not defined for the platform. This has the side effect of a possible implicit // upcasting for arguments passed in and an explicit downcasting for the _copysign() call. -#if (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL) +#if (defined(TARGET_X86) || defined(TARGET_ARM) || defined(TARGET_ARM64)) && !defined(TARGET_UNIX) #if !defined(_copysignf) #define _copysignf (float)_copysign @@ -122,7 +122,7 @@ FCIMPL1_V(float, COMSingle::Cbrt, float x) return (float)cbrtf(x); FCIMPLEND -#if defined(_MSC_VER) && defined(_TARGET_AMD64_) +#if defined(_MSC_VER) && defined(TARGET_AMD64) // The /fp:fast form of `ceilf` for AMD64 does not correctly handle: `-1.0 < value <= -0.0` // https://github.com/dotnet/coreclr/issues/19739 #pragma float_control(push) @@ -138,7 +138,7 @@ FCIMPL1_V(float, COMSingle::Ceil, float x) return (float)ceilf(x); FCIMPLEND -#if defined(_MSC_VER) && defined(_TARGET_AMD64_) +#if defined(_MSC_VER) && defined(TARGET_AMD64) #pragma float_control(pop) #endif diff --git a/src/coreclr/src/debug/createdump/crashinfo.cpp b/src/coreclr/src/debug/createdump/crashinfo.cpp index 7b5c20bf64f4f..ec2ad71e053a0 100644 --- a/src/coreclr/src/debug/createdump/crashinfo.cpp +++ b/src/coreclr/src/debug/createdump/crashinfo.cpp @@ -520,7 +520,7 @@ CrashInfo::GetELFInfo(uint64_t baseAddress) int phnum = ehdr.e_phnum; assert(phnum != PN_XNUM); assert(ehdr.e_phentsize == sizeof(Phdr)); -#ifdef BIT64 +#ifdef HOST_64BIT assert(ehdr.e_ident[EI_CLASS] == ELFCLASS64); #else assert(ehdr.e_ident[EI_CLASS] == ELFCLASS32); diff --git a/src/coreclr/src/debug/createdump/datatarget.cpp b/src/coreclr/src/debug/createdump/datatarget.cpp index c160b870c3075..13df2e7e7eaca 100644 --- a/src/coreclr/src/debug/createdump/datatarget.cpp +++ b/src/coreclr/src/debug/createdump/datatarget.cpp @@ -87,13 +87,13 @@ HRESULT STDMETHODCALLTYPE DumpDataTarget::GetMachineType( /* [out] */ ULONG32 *machine) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 *machine = IMAGE_FILE_MACHINE_AMD64; -#elif _ARM_ +#elif HOST_ARM *machine = IMAGE_FILE_MACHINE_ARMNT; -#elif _ARM64_ +#elif HOST_ARM64 *machine = IMAGE_FILE_MACHINE_ARM64; -#elif _X86_ +#elif HOST_X86 *machine = IMAGE_FILE_MACHINE_I386; #else #error Unsupported architecture @@ -105,9 +105,9 @@ HRESULT STDMETHODCALLTYPE DumpDataTarget::GetPointerSize( /* [out] */ ULONG32 *size) { -#if defined(_AMD64_) || defined(_ARM64_) +#if defined(HOST_AMD64) || defined(HOST_ARM64) *size = 8; -#elif defined(_ARM_) || defined(_X86_) +#elif defined(HOST_ARM) || defined(HOST_X86) *size = 4; #else #error Unsupported architecture diff --git a/src/coreclr/src/debug/createdump/dumpwriter.h b/src/coreclr/src/debug/createdump/dumpwriter.h index a4d40a28bf2f4..01d1c50c16ae7 100644 --- a/src/coreclr/src/debug/createdump/dumpwriter.h +++ b/src/coreclr/src/debug/createdump/dumpwriter.h @@ -2,7 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -#ifdef BIT64 +#ifdef HOST_64BIT #define ELF_CLASS ELFCLASS64 #else #define ELF_CLASS ELFCLASS32 diff --git a/src/coreclr/src/debug/createdump/memoryregion.h b/src/coreclr/src/debug/createdump/memoryregion.h index bd5536e3c226c..234af4ff77949 100644 --- a/src/coreclr/src/debug/createdump/memoryregion.h +++ b/src/coreclr/src/debug/createdump/memoryregion.h @@ -7,7 +7,7 @@ #define PAGE_MASK (~(PAGE_SIZE-1)) #endif -#ifdef BIT64 +#ifdef HOST_64BIT #define PRIA "016" #else #define PRIA "08" diff --git a/src/coreclr/src/debug/daccess/daccess.cpp b/src/coreclr/src/debug/daccess/daccess.cpp index b1c6572a55f95..1b60d238c7f94 100644 --- a/src/coreclr/src/debug/daccess/daccess.cpp +++ b/src/coreclr/src/debug/daccess/daccess.cpp @@ -24,7 +24,7 @@ #include "dwreport.h" #include "primitives.h" #include "dbgutil.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include #endif @@ -39,7 +39,7 @@ ClrDataAccess* g_dacImpl; HINSTANCE g_thisModule; EXTERN_C -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL WINAPI DllMain(HANDLE instance, DWORD reason, LPVOID reserved) @@ -52,7 +52,7 @@ BOOL WINAPI DllMain(HANDLE instance, DWORD reason, LPVOID reserved) { if (g_procInitialized) { -#ifdef FEATURE_PAL +#ifdef HOST_UNIX // Double initialization can happen on Unix // in case of manual load of DAC shared lib and calling DllMain // not a big deal, we just ignore it. @@ -62,7 +62,7 @@ BOOL WINAPI DllMain(HANDLE instance, DWORD reason, LPVOID reserved) #endif } -#ifdef FEATURE_PAL +#ifdef HOST_UNIX int err = PAL_InitializeDLL(); if(err != 0) { @@ -2329,7 +2329,7 @@ namespace serialization { namespace bin { static const size_t ErrOverflow = (size_t)(-1); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Template class is_blittable template @@ -2380,11 +2380,11 @@ namespace serialization { namespace bin { template class Traits::value>::type> { -#else // FEATURE_PAL +#else // TARGET_UNIX template class Traits { -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX public: // // raw_size() returns the size in bytes of the binary representation of a @@ -2546,7 +2546,7 @@ namespace serialization { namespace bin { }; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // // Specialization for SString-derived classes (like SStrings) // @@ -2555,7 +2555,7 @@ namespace serialization { namespace bin { : public Traits { }; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // // Convenience functions to allow argument type deduction @@ -3702,7 +3702,7 @@ ClrDataAccess::GetRuntimeNameByAddress( EX_TRY { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM address &= ~THUMB_CODE; //workaround for windbg passing in addresses with the THUMB mode bit set #endif status = RawGetMethodName(address, flags, bufLen, symbolLen, symbolBuf, @@ -5527,26 +5527,26 @@ ClrDataAccess::Initialize(void) // Determine our platform based on the pre-processor macros set when we were built -#ifdef FEATURE_PAL - #if defined(DBG_TARGET_X86) +#ifdef TARGET_UNIX + #if defined(TARGET_X86) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_X86; - #elif defined(DBG_TARGET_AMD64) + #elif defined(TARGET_AMD64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_AMD64; - #elif defined(DBG_TARGET_ARM) + #elif defined(TARGET_ARM) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM; - #elif defined(DBG_TARGET_ARM64) + #elif defined(TARGET_ARM64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM64; #else #error Unknown Processor. #endif #else - #if defined(DBG_TARGET_X86) + #if defined(TARGET_X86) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_X86; - #elif defined(DBG_TARGET_AMD64) + #elif defined(TARGET_AMD64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_AMD64; - #elif defined(DBG_TARGET_ARM) + #elif defined(TARGET_ARM) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_ARM; - #elif defined(DBG_TARGET_ARM64) + #elif defined(TARGET_ARM64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_WINDOWS_ARM64; #else #error Unknown Processor. @@ -5724,12 +5724,12 @@ ClrDataAccess::GetJitHelperName( }; static_assert_no_msg(COUNTOF(s_rgHelperNames) == CORINFO_HELP_COUNT); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (!dynamicHelpersOnly) #else if (!dynamicHelpersOnly && g_runtimeLoadedBaseAddress <= address && address < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize) -#endif // FEATURE_PAL +#endif // TARGET_UNIX { // Read the whole table from the target in one shot for better performance VMHELPDEF * pTable = static_cast( @@ -5772,7 +5772,7 @@ ClrDataAccess::RawGetMethodName( /* [size_is][out] */ __out_ecount_opt(bufLen) WCHAR symbolBuf[ ], /* [out] */ CLRDATA_ADDRESS* displacement) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM _ASSERTE((address & THUMB_CODE) == 0); address &= ~THUMB_CODE; #endif @@ -5848,7 +5848,7 @@ ClrDataAccess::RawGetMethodName( #endif PCODE alignedAddress = AlignDown(TO_TADDR(address), PRECODE_ALIGNMENT); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM alignedAddress += THUMB_CODE; #endif @@ -7028,7 +7028,7 @@ bool ClrDataAccess::TargetConsistencyAssertsEnabled() // HRESULT ClrDataAccess::VerifyDlls() { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Provide a knob for disabling this check if we really want to try and proceed anyway with a // DAC mismatch. DAC behavior may be arbitrarily bad - globals probably won't be at the same // address, data structures may be laid out differently, etc. @@ -7149,7 +7149,7 @@ HRESULT ClrDataAccess::VerifyDlls() // Return a specific hresult indicating this problem return CORDBG_E_MISMATCHED_CORWKS_AND_DACWKS_DLLS; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX return S_OK; } @@ -7249,7 +7249,7 @@ bool ClrDataAccess::MdCacheGetEEName(TADDR taEEStruct, SString & eeName) HRESULT ClrDataAccess::GetDacGlobals() { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #ifdef DAC_TABLE_SIZE if (DAC_TABLE_SIZE != sizeof(g_dacGlobals)) { @@ -7404,7 +7404,7 @@ BOOL ClrDataAccess::IsExceptionFromManagedCode(EXCEPTION_RECORD* pExceptionRecor return flag; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //---------------------------------------------------------------------------- // @@ -7444,7 +7444,7 @@ HRESULT ClrDataAccess::GetWatsonBuckets(DWORD dwThreadId, GenericModeBlock * pGM return hr; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX //---------------------------------------------------------------------------- // @@ -7548,7 +7548,7 @@ BOOL OutOfProcessExceptionEventGetProcessIdAndThreadId(HANDLE hProcess, HANDLE h { _ASSERTE((pPId != NULL) && (pThreadId != NULL)); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // UNIXTODO: mikem 1/13/15 Need appropriate PAL functions for getting ids *pPId = (DWORD)(SIZE_T)hProcess; *pThreadId = (DWORD)(SIZE_T)hThread; @@ -7581,7 +7581,7 @@ BOOL OutOfProcessExceptionEventGetProcessIdAndThreadId(HANDLE hProcess, HANDLE h *pPId = (*pGetProcessIdOfThread)(hThread); *pThreadId = (*pGetThreadId)(hThread); -#endif // FEATURE_PAL +#endif // TARGET_UNIX return TRUE; } @@ -7598,7 +7598,7 @@ typedef struct _WER_RUNTIME_EXCEPTION_INFORMATION #endif // !defined(WER_RUNTIME_EXCEPTION_INFORMATION) -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //---------------------------------------------------------------------------- // @@ -7885,7 +7885,7 @@ STDAPI OutOfProcessExceptionEventSignatureCallback(__in PDWORD pContext, return S_OK; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX //---------------------------------------------------------------------------- // diff --git a/src/coreclr/src/debug/daccess/dacdbiimpl.cpp b/src/coreclr/src/debug/daccess/dacdbiimpl.cpp index 2bf8efaee3c19..43c0ed6dcc6cb 100644 --- a/src/coreclr/src/debug/daccess/dacdbiimpl.cpp +++ b/src/coreclr/src/debug/daccess/dacdbiimpl.cpp @@ -475,14 +475,14 @@ BOOL DacDbiInterfaceImpl::IsTransitionStub(CORDB_ADDRESS address) BOOL fIsStub = FALSE; -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) // Currently IsIPInModule() is not implemented in the PAL. Rather than skipping the check, we should // either E_NOTIMPL this API or implement IsIPInModule() in the PAL. Since ICDProcess::IsTransitionStub() // is only called by VS in mixed-mode debugging scenarios, and mixed-mode debugging is not supported on // POSIX systems, there is really no incentive to implement this API at this point. ThrowHR(E_NOTIMPL); -#else // !FEATURE_PAL +#else // !TARGET_UNIX TADDR ip = (TADDR)address; @@ -501,7 +501,7 @@ BOOL DacDbiInterfaceImpl::IsTransitionStub(CORDB_ADDRESS address) fIsStub = IsIPInModule(m_globalBase, ip); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX return fIsStub; } @@ -1408,7 +1408,7 @@ void DacDbiInterfaceImpl::GetNativeCodeInfoForAddr(VMPTR_MethodDesc vmMe IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0}; TADDR codeAddr = CORDB_ADDRESS_TO_TADDR(hotCodeStartAddr); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // TADDR should not have the thumb code bit set. _ASSERTE((codeAddr & THUMB_CODE) == 0); codeAddr &= ~THUMB_CODE; @@ -5077,7 +5077,7 @@ void DacDbiInterfaceImpl::AlignStackPointer(CORDB_ADDRESS * pEsp) SUPPORTS_DAC; // Nop on x86. -#if defined(BIT64) +#if defined(HOST_64BIT) // on 64-bit, stack pointer must be 16-byte aligned. // Stacks grown down, so round down to nearest 0xF bits. *pEsp &= ~((CORDB_ADDRESS) 0xF); @@ -5174,7 +5174,7 @@ void DacDbiInterfaceImpl::Hijack( // (The hijack function already has the context) _ASSERTE((pOriginalContext == NULL) == (cbSizeContext == 0)); _ASSERTE(EHijackReason::IsValid(reason)); -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX _ASSERTE(!"Not supported on this platform"); #endif @@ -5305,14 +5305,14 @@ void DacDbiInterfaceImpl::Hijack( // Regarding stack overflow: We could do an explicit check against the thread's stack base limit. // However, we don't need an explicit overflow check because if the stack does overflow, // the hijack will just hit a regular stack-overflow exception. -#if defined(_TARGET_X86_) // TARGET +#if defined(TARGET_X86) // TARGET // X86 calling convention is to push args on the stack in reverse order. // If we fail here, the stack is written, but esp hasn't been committed yet so it shouldn't matter. PushHelper(&esp, &pData, TRUE); PushHelper(&esp, &reason, TRUE); PushHelper(&esp, &espRecord, TRUE); PushHelper(&esp, &espContext, TRUE); -#elif defined (_TARGET_AMD64_) // TARGET +#elif defined (TARGET_AMD64) // TARGET // AMD64 calling convention is to place first 4 parameters in: rcx, rdx, r8 and r9 ctx.Rcx = (DWORD64) espContext; ctx.Rdx = (DWORD64) espRecord; @@ -5326,12 +5326,12 @@ void DacDbiInterfaceImpl::Hijack( PushHelper(&esp, reinterpret_cast(&(ctx.R8)), FALSE); PushHelper(&esp, reinterpret_cast(&(ctx.Rdx)), FALSE); PushHelper(&esp, reinterpret_cast(&(ctx.Rcx)), FALSE); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) ctx.R0 = (DWORD)espContext; ctx.R1 = (DWORD)espRecord; ctx.R2 = (DWORD)reason; ctx.R3 = (DWORD)pData; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) ctx.X0 = (DWORD64)espContext; ctx.X1 = (DWORD64)espRecord; ctx.X2 = (DWORD64)reason; @@ -5404,11 +5404,11 @@ TargetBuffer DacDbiInterfaceImpl::GetVarArgSig(CORDB_ADDRESS VASigCookieAddr, VASigCookie * pVACookie = PTR_VASigCookie(taVASigCookie); // Figure out where the first argument is. -#if defined(_TARGET_X86_) // (STACK_GROWS_DOWN_ON_ARGS_WALK) +#if defined(TARGET_X86) // (STACK_GROWS_DOWN_ON_ARGS_WALK) *pArgBase = VASigCookieAddr + pVACookie->sizeOfArgs; -#else // !_TARGET_X86_ (STACK_GROWS_UP_ON_ARGS_WALK) +#else // !TARGET_X86 (STACK_GROWS_UP_ON_ARGS_WALK) *pArgBase = VASigCookieAddr + sizeof(VASigCookie *); -#endif // !_TARGET_X86_ (STACK_GROWS_UP_ON_ARGS_WALK) +#endif // !TARGET_X86 (STACK_GROWS_UP_ON_ARGS_WALK) return TargetBuffer(PTR_TO_CORDB_ADDRESS(pVACookie->signature.GetRawSig()), pVACookie->signature.GetRawSigLen()); diff --git a/src/coreclr/src/debug/daccess/dacdbiimplstackwalk.cpp b/src/coreclr/src/debug/daccess/dacdbiimplstackwalk.cpp index 15abd7d2d5ebe..29dce45d6480c 100644 --- a/src/coreclr/src/debug/daccess/dacdbiimplstackwalk.cpp +++ b/src/coreclr/src/debug/daccess/dacdbiimplstackwalk.cpp @@ -1014,7 +1014,7 @@ void DacDbiInterfaceImpl::AdjustRegDisplayForStackParameter(REGDISPLAY * BOOL fIsActiveFrame, StackAdjustmentDirection direction) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // If the CONTEXT is active then no adjustment is needed. if (!fIsActiveFrame) { @@ -1033,7 +1033,7 @@ void DacDbiInterfaceImpl::AdjustRegDisplayForStackParameter(REGDISPLAY * } SetRegdisplaySP(pRD, reinterpret_cast(sp)); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } //--------------------------------------------------------------------------------------- @@ -1149,7 +1149,7 @@ CorDebugInternalFrameType DacDbiInterfaceImpl::GetInternalFrameType(Frame * pFra void DacDbiInterfaceImpl::UpdateContextFromRegDisp(REGDISPLAY * pRegDisp, T_CONTEXT * pContext) { -#if defined(_TARGET_X86_) && !defined(FEATURE_EH_FUNCLETS) +#if defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS) // Do a partial copy first. pContext->ContextFlags = (CONTEXT_INTEGER | CONTEXT_CONTROL); @@ -1171,9 +1171,9 @@ void DacDbiInterfaceImpl::UpdateContextFromRegDisp(REGDISPLAY * pRegDisp, { *pContext = *pRegDisp->pContext; } -#else // _TARGET_X86_ && !FEATURE_EH_FUNCLETS +#else // TARGET_X86 && !FEATURE_EH_FUNCLETS *pContext = *pRegDisp->pCurrentContext; -#endif // !_TARGET_X86_ || FEATURE_EH_FUNCLETS +#endif // !TARGET_X86 || FEATURE_EH_FUNCLETS } //--------------------------------------------------------------------------------------- @@ -1216,7 +1216,7 @@ PTR_CONTEXT DacDbiInterfaceImpl::RetrieveHijackedContext(REGDISPLAY * pRD) // Convert the REGDISPLAY to a CONTEXT; T_CONTEXT * pContext = NULL; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) T_CONTEXT ctx; pContext = &ctx; UpdateContextFromRegDisp(pRD, pContext); diff --git a/src/coreclr/src/debug/daccess/dacfn.cpp b/src/coreclr/src/debug/daccess/dacfn.cpp index a8f3483a64c5c..cf1428498f00f 100644 --- a/src/coreclr/src/debug/daccess/dacfn.cpp +++ b/src/coreclr/src/debug/daccess/dacfn.cpp @@ -216,7 +216,7 @@ DacWriteAll(TADDR addr, PVOID buffer, ULONG32 size, bool throwEx) return S_OK; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX static BOOL DacReadAllAdapter(PVOID address, PVOID buffer, SIZE_T size) { @@ -282,7 +282,7 @@ DacVirtualUnwind(ULONG32 threadId, PT_CONTEXT context, PT_KNONVOLATILE_CONTEXT_P return hr; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX // DacAllocVirtual - Allocate memory from the target process // Note: this is only available to clients supporting the legacy @@ -1451,7 +1451,7 @@ void DacEnumCodeForStackwalk(TADDR taCallEnd) // DacEnumMemoryRegion(taCallEnd - MAX_INSTRUCTION_LENGTH, MAX_INSTRUCTION_LENGTH * 2, false); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // If it was an indirect call we also need to save the data indirected through. // Note that this only handles absolute indirect calls (ModR/M byte of 0x15), all the other forms of // indirect calls are register-relative, and so we'd have to do a much more complicated decoding based @@ -1469,7 +1469,7 @@ void DacEnumCodeForStackwalk(TADDR taCallEnd) { DacEnumMemoryRegion(*callInd, sizeof(TADDR), false); } -#endif // #ifdef _TARGET_X86_ +#endif // #ifdef TARGET_X86 } // ---------------------------------------------------------------------------- diff --git a/src/coreclr/src/debug/daccess/dacimpl.h b/src/coreclr/src/debug/daccess/dacimpl.h index 21deb2986db2c..958adb1fd9178 100644 --- a/src/coreclr/src/debug/daccess/dacimpl.h +++ b/src/coreclr/src/debug/daccess/dacimpl.h @@ -16,13 +16,13 @@ #include "gcinterface.dac.h" -#if defined(_TARGET_ARM_) || defined(FEATURE_CORESYSTEM) // @ARMTODO: STL breaks the build with current VC headers +#if defined(TARGET_ARM) || defined(FEATURE_CORESYSTEM) // @ARMTODO: STL breaks the build with current VC headers //--------------------------------------------------------------------------------------- // Setting DAC_HASHTABLE tells the DAC to use the hand rolled hashtable for // storing code:DAC_INSTANCE . Otherwise, the DAC uses the STL unordered_map to. #define DAC_HASHTABLE -#endif // _TARGET_ARM_|| FEATURE_CORESYSTEM +#endif // TARGET_ARM|| FEATURE_CORESYSTEM #ifndef DAC_HASHTABLE #pragma push_macro("return") @@ -59,7 +59,7 @@ extern CRITICAL_SECTION g_dacCritSec; inline TADDR CLRDATA_ADDRESS_TO_TADDR(CLRDATA_ADDRESS cdAddr) { SUPPORTS_DAC; -#ifndef BIT64 +#ifndef HOST_64BIT static_assert_no_msg(sizeof(TADDR)==sizeof(UINT)); INT64 iSignedAddr = (INT64)cdAddr; if (iSignedAddr > INT_MAX || iSignedAddr < INT_MIN) @@ -76,7 +76,7 @@ inline TADDR CLRDATA_ADDRESS_TO_TADDR(CLRDATA_ADDRESS cdAddr) inline HRESULT TRY_CLRDATA_ADDRESS_TO_TADDR(CLRDATA_ADDRESS cdAddr, TADDR* pOutTaddr) { SUPPORTS_DAC; -#ifndef BIT64 +#ifndef HOST_64BIT static_assert_no_msg(sizeof(TADDR)==sizeof(UINT)); INT64 iSignedAddr = (INT64)cdAddr; if (iSignedAddr > INT_MAX || iSignedAddr < INT_MIN) @@ -93,7 +93,7 @@ inline HRESULT TRY_CLRDATA_ADDRESS_TO_TADDR(CLRDATA_ADDRESS cdAddr, TADDR* pOutT inline TADDR CORDB_ADDRESS_TO_TADDR(CORDB_ADDRESS cdbAddr) { SUPPORTS_DAC; -#ifndef BIT64 +#ifndef HOST_64BIT static_assert_no_msg(sizeof(TADDR)==sizeof(UINT)); if (cdbAddr > UINT_MAX) { @@ -635,7 +635,7 @@ struct DAC_INSTANCE // a method descriptor ULONG32 MDEnumed:1; -#ifdef BIT64 +#ifdef HOST_64BIT // Keep DAC_INSTANCE a multiple of DAC_INSTANCE_ALIGN // bytes in size. ULONG32 pad[2]; @@ -1202,9 +1202,9 @@ class ClrDataAccess HRESULT Initialize(void); BOOL IsExceptionFromManagedCode(EXCEPTION_RECORD * pExceptionRecord); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HRESULT GetWatsonBuckets(DWORD dwThreadId, GenericModeBlock * pGM); -#endif // FEATURE_PAL +#endif // TARGET_UNIX Thread* FindClrThreadByTaskId(ULONG64 taskId); @@ -1266,9 +1266,9 @@ class ClrDataAccess // Get the MethodDesc for a function MethodDesc * FindLoadedMethodRefOrDef(Module* pModule, mdToken memberRef); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HRESULT GetClrWatsonBucketsWorker(Thread * pThread, GenericModeBlock * pGM); -#endif // FEATURE_PAL +#endif // TARGET_UNIX HRESULT ServerGCHeapDetails(CLRDATA_ADDRESS heapAddr, DacpGcHeapDetails *detailsData); @@ -3948,7 +3948,7 @@ HRESULT GetServerHeaps(CLRDATA_ADDRESS pGCHeaps[], ICorDebugDataTarget* pTarget) #if defined(DAC_MEASURE_PERF) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Assume Pentium CPU @@ -3977,7 +3977,7 @@ __asm pop EDX #pragma warning( default: 4035 ) -#else // #if defined(_TARGET_X86_) +#else // #if defined(TARGET_X86) #define CCNT_OVERHEAD 0 // Don't know @@ -3990,7 +3990,7 @@ __inline unsigned __int64 GetCycleCount() return qwTmp.QuadPart; } -#endif // #if defined(_TARGET_X86_) +#endif // #if defined(TARGET_X86) extern unsigned __int64 g_nTotalTime; extern unsigned __int64 g_nStackTotalTime; diff --git a/src/coreclr/src/debug/daccess/datatargetadapter.cpp b/src/coreclr/src/debug/daccess/datatargetadapter.cpp index df8fda4d210a8..be176ff75f1dd 100644 --- a/src/coreclr/src/debug/daccess/datatargetadapter.cpp +++ b/src/coreclr/src/debug/daccess/datatargetadapter.cpp @@ -111,7 +111,7 @@ DataTargetAdapter::GetPlatform( switch(ulMachineType) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX case IMAGE_FILE_MACHINE_I386: ulExpectedPointerSize = 4; platform = CORDB_PLATFORM_POSIX_X86; @@ -136,7 +136,7 @@ DataTargetAdapter::GetPlatform( _ASSERTE_MSG(false, "Not supported platform."); return E_NOTIMPL; -#else // FEATURE_PAL +#else // TARGET_UNIX case IMAGE_FILE_MACHINE_I386: ulExpectedPointerSize = 4; platform = CORDB_PLATFORM_WINDOWS_X86; @@ -161,7 +161,7 @@ DataTargetAdapter::GetPlatform( ulExpectedPointerSize = 8; platform = CORDB_PLATFORM_WINDOWS_ARM64; break; -#endif // FEATURE_PAL +#endif // TARGET_UNIX default: // No other platforms are current supported diff --git a/src/coreclr/src/debug/daccess/enummem.cpp b/src/coreclr/src/debug/daccess/enummem.cpp index cc4b9b229b131..1a5cff87ab0ce 100644 --- a/src/coreclr/src/debug/daccess/enummem.cpp +++ b/src/coreclr/src/debug/daccess/enummem.cpp @@ -21,7 +21,7 @@ #include "binder.h" #include "win32threadpool.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include #endif @@ -195,7 +195,7 @@ HRESULT ClrDataAccess::EnumMemCLRStatic(IN CLRDataEnumMemoryFlags flags) #define DEFINE_DACVAR(id_type, size_type, id, var) \ ReportMem(m_globalBase + g_dacGlobals.id, sizeof(size_type)); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Add the dac table memory in coreclr CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED ( ReportMem(m_globalBase + DAC_TABLE_RVA, sizeof(g_dacGlobals)); ) #endif @@ -241,9 +241,9 @@ HRESULT ClrDataAccess::EnumMemCLRStatic(IN CLRDataEnumMemoryFlags flags) } EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED( g_runtimeLoadedBaseAddress.EnumMem(); ) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // These are the structures that are pointed by global pointers and we care. // Some may reside in heap and some may reside as a static byte array in mscorwks.dll @@ -1921,7 +1921,7 @@ ClrDataAccess::EnumMemoryRegions(IN ICLRDataEnumMemoryRegionsCallback* callback, status = EnumMemoryRegionsWrapper(CLRDATA_ENUM_MEM_MINI); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // For all dump types, we need to capture the chain to the IMAGE_DIRECTORY_ENTRY_DEBUG // contents, so that DAC can validate against the TimeDateStamp even if the // debugger can't find the main CLR module on disk. diff --git a/src/coreclr/src/debug/daccess/fntableaccess.cpp b/src/coreclr/src/debug/daccess/fntableaccess.cpp index 14854bde933bb..57e68298de4e7 100644 --- a/src/coreclr/src/debug/daccess/fntableaccess.cpp +++ b/src/coreclr/src/debug/daccess/fntableaccess.cpp @@ -11,8 +11,8 @@ #include "stdafx.h" -#ifndef FEATURE_PAL -#ifndef _TARGET_X86_ +#ifndef TARGET_UNIX +#ifndef TARGET_X86 // // @@ -308,7 +308,7 @@ static NTSTATUS OutOfProcessFunctionTableCallback_Stub(IN ReadMemoryFunction { FakeStubUnwindInfoHeader unwindInfoHeader; move(unwindInfoHeader, pHeader); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Consistency checks to detect corrupted process state if (unwindInfoHeader.FunctionEntry.BeginAddress > unwindInfoHeader.FunctionEntry.EndAddress || unwindInfoHeader.FunctionEntry.EndAddress > stubHeapSegment.cbSegment) @@ -323,11 +323,11 @@ static NTSTATUS OutOfProcessFunctionTableCallback_Stub(IN ReadMemoryFunction _ASSERTE(1 == pass); return STATUS_UNSUCCESSFUL; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Skip checking the corrupted process stateon ARM -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Compute the function length ULONG64 functionLength = 0; ULONG64 unwindData = unwindInfoHeader.FunctionEntry.UnwindData; @@ -457,5 +457,5 @@ extern "C" NTSTATUS OutOfProcessFunctionTableCallbackEx() return STATUS_UNSUCCESSFUL; } -#endif // !_TARGET_X86_ -#endif // !FEATURE_PAL +#endif // !TARGET_X86 +#endif // !TARGET_UNIX diff --git a/src/coreclr/src/debug/daccess/fntableaccess.h b/src/coreclr/src/debug/daccess/fntableaccess.h index b1d6f8a4ba7f6..4a6992b23aa46 100644 --- a/src/coreclr/src/debug/daccess/fntableaccess.h +++ b/src/coreclr/src/debug/daccess/fntableaccess.h @@ -12,11 +12,11 @@ #define _FN_TABLE_ACCESS_H -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define DEBUGSUPPORT_STUBS_HAVE_UNWIND_INFO -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifndef USE_INDIRECT_CODEHEADER #define USE_INDIRECT_CODEHEADER @@ -106,11 +106,11 @@ struct FakeStub #ifdef _DEBUG UINT32 m_signature; #else -#ifdef BIT64 +#ifdef HOST_64BIT //README ALIGNEMENT: in retail mode UINT m_numCodeBytes does not align to 16byte for the code // after the Stub struct. This is to pad properly UINT m_pad_code_bytes; -#endif // BIT64 +#endif // HOST_64BIT #endif // _DEBUG }; @@ -143,10 +143,10 @@ class CheckDuplicatedStructLayouts CHECK_OFFSET(HeapList, mapBase); CHECK_OFFSET(HeapList, pHdrMap); -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) CHECK_OFFSET(RealCodeHeader, nUnwindInfos); CHECK_OFFSET(RealCodeHeader, unwindInfos); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 #ifdef DEBUGSUPPORT_STUBS_HAVE_UNWIND_INFO CHECK_OFFSET(StubUnwindInfoHeader, pNext); diff --git a/src/coreclr/src/debug/daccess/nidump.cpp b/src/coreclr/src/debug/daccess/nidump.cpp index 3e93f1746e64a..1f0c91e5faa82 100644 --- a/src/coreclr/src/debug/daccess/nidump.cpp +++ b/src/coreclr/src/debug/daccess/nidump.cpp @@ -3051,13 +3051,13 @@ void NativeImageDumper::DumpCompleteMethod(PTR_Module module, MethodIterator& mi g_holdStringOutData.Clear(); GCDump gcDump(gcInfoToken.Version); gcDump.gcPrintf = stringOutFn; -#if !defined(_TARGET_X86_) && defined(USE_GC_INFO_DECODER) +#if !defined(TARGET_X86) && defined(USE_GC_INFO_DECODER) GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_CODE_LENGTH); methodSize = gcInfoDecoder.GetCodeLength(); #endif //dump the data to a string first so we can get the gcinfo size. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 InfoHdr hdr; stringOutFn( "method info Block:\n" ); curGCInfoPtr += gcDump.DumpInfoHdr(curGCInfoPtr, &hdr, &methodSize, 0); @@ -3066,7 +3066,7 @@ void NativeImageDumper::DumpCompleteMethod(PTR_Module module, MethodIterator& mi IF_OPT(METHODS) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 stringOutFn( "PointerTable:\n" ); curGCInfoPtr += gcDump.DumpGCTable( curGCInfoPtr, hdr, @@ -3350,7 +3350,7 @@ SIZE_T NativeImageDumper::TranslateFixupCallback(IXCLRDisassemblySupport *dis, case sizeof(void*): targetOffset = *PTR_SIZE_T(taddr); break; -#ifdef BIT64 +#ifdef HOST_64BIT case sizeof(INT32): targetOffset = *PTR_INT32(taddr); break; @@ -6234,7 +6234,7 @@ void NativeImageDumper::DoDumpComPlusCallInfo( PTR_ComPlusCallInfo compluscall ) compluscall->m_pStubMD.GetValueMaybeNull(PTR_HOST_MEMBER_TADDR(ComPlusCallInfo, compluscall, m_pStubMD)), ComPlusCallInfo, ALWAYS ); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 DisplayWriteFieldInt( m_cbStackArgumentSize, compluscall->m_cbStackArgumentSize, ComPlusCallInfo, ALWAYS ); @@ -7723,7 +7723,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module ) CoverageRead(TO_TADDR(ssmd->GetSigRVA()), ssmd->m_cSig); DisplayWriteFieldInt( m_cSig, ssmd->m_cSig, StoredSigMethodDesc, METHODDESCS ); -#ifdef BIT64 +#ifdef HOST_64BIT DisplayWriteFieldEnumerated( m_dwExtendedFlags, ssmd->m_dwExtendedFlags, StoredSigMethodDesc, @@ -7743,7 +7743,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module ) DisplayWriteFieldPointer( m_pResolver, DPtrToPreferredAddr(dmd->m_pResolver), DynamicMethodDesc, METHODDESCS ); -#ifndef BIT64 +#ifndef HOST_64BIT DisplayWriteFieldEnumerated( m_dwExtendedFlags, dmd->m_dwExtendedFlags, DynamicMethodDesc, @@ -7866,7 +7866,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module ) } #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 DisplayWriteFieldInt( m_cbStackArgumentSize, nd->m_cbStackArgumentSize, NDirectMethodDesc::temp1, METHODDESCS ); @@ -8985,13 +8985,13 @@ void NativeImageDumper::DumpReadyToRunMethod(PCODE pEntryPoint, PTR_RUNTIME_FUNC UINT32 gcInfoVersion = GCInfoToken::ReadyToRunVersionToGcInfoVersion(r2rversion); GCInfoToken gcInfoToken = { curGCInfoPtr, gcInfoVersion }; -#if !defined(_TARGET_X86_) && defined(USE_GC_INFO_DECODER) +#if !defined(TARGET_X86) && defined(USE_GC_INFO_DECODER) GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_CODE_LENGTH); methodSize = gcInfoDecoder.GetCodeLength(); #endif //dump the data to a string first so we can get the gcinfo size. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 InfoHdr hdr; stringOutFn("method info Block:\n"); curGCInfoPtr += gcDump.DumpInfoHdr(curGCInfoPtr, &hdr, &methodSize, 0); @@ -9000,7 +9000,7 @@ void NativeImageDumper::DumpReadyToRunMethod(PCODE pEntryPoint, PTR_RUNTIME_FUNC IF_OPT(METHODS) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 stringOutFn("PointerTable:\n"); curGCInfoPtr += gcDump.DumpGCTable(curGCInfoPtr, hdr, @@ -9095,7 +9095,7 @@ HRESULT ClrDataAccess::DumpNativeImage(CLRDATA_ADDRESS loadedBase, #undef NOTHROW #undef GC_NOTRIGGER -#if defined _DEBUG && defined _TARGET_X86_ +#if defined _DEBUG && defined TARGET_X86 #ifdef _MSC_VER // disable FPO for checked build #pragma optimize("y", off) @@ -9104,18 +9104,18 @@ HRESULT ClrDataAccess::DumpNativeImage(CLRDATA_ADDRESS loadedBase, #undef _ASSERTE #define _ASSERTE(a) do {} while (0) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #include #endif #undef LIMITED_METHOD_CONTRACT #undef WRAPPER_NO_CONTRACT -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #include -#else // !_TARGET_X86_ +#else // !TARGET_X86 #undef PREGDISPLAY #include -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 #ifdef __MSC_VER #pragma warning(default:4244) diff --git a/src/coreclr/src/debug/daccess/request.cpp b/src/coreclr/src/debug/daccess/request.cpp index 7a88e4cc6d83a..11fb716c786c9 100644 --- a/src/coreclr/src/debug/daccess/request.cpp +++ b/src/coreclr/src/debug/daccess/request.cpp @@ -20,10 +20,10 @@ #include #endif // FEATURE_COMINTEROP -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // It is unfortunate having to include this header just to get the definition of GenericModeBlock #include -#endif // FEATURE_PAL +#endif // TARGET_UNIX // To include definiton of IsThrowableThreadAbortException #include @@ -577,13 +577,13 @@ ClrDataAccess::GetRegisterName(int regNum, unsigned int count, __out_z __inout_e if (!buffer && !pNeeded) return E_POINTER; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 static const WCHAR *regs[] = { W("rax"), W("rcx"), W("rdx"), W("rbx"), W("rsp"), W("rbp"), W("rsi"), W("rdi"), W("r8"), W("r9"), W("r10"), W("r11"), W("r12"), W("r13"), W("r14"), W("r15"), }; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) static const WCHAR *regs[] = { W("r0"), @@ -596,7 +596,7 @@ ClrDataAccess::GetRegisterName(int regNum, unsigned int count, __out_z __inout_e W("r7"), W("r8"), W("r9"), W("r10"), W("r11"), W("r12"), W("sp"), W("lr") }; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) static const WCHAR *regs[] = { W("X0"), @@ -611,7 +611,7 @@ ClrDataAccess::GetRegisterName(int regNum, unsigned int count, __out_z __inout_e W("X18"), W("X19"), W("X20"), W("X21"), W("X22"), W("X23"), W("X24"), W("X25"), W("X26"), W("X27"), W("X28"), W("Fp"), W("Lr"), W("Sp") }; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) static const WCHAR *regs[] = { W("eax"), W("ecx"), W("edx"), W("ebx"), W("esp"), W("ebp"), W("esi"), W("edi"), @@ -764,7 +764,7 @@ ClrDataAccess::GetThreadData(CLRDATA_ADDRESS threadAddr, struct DacpThreadData * threadData->context = PTR_CDADDR(thread->m_pDomain); threadData->domain = PTR_CDADDR(thread->m_pDomain); threadData->lockCount = thread->m_dwLockCount; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX threadData->teb = TO_CDADDR(thread->m_pTEB); #else threadData->teb = NULL; @@ -3655,7 +3655,7 @@ ClrDataAccess::GetJumpThunkTarget(T_CONTEXT *ctx, CLRDATA_ADDRESS *targetIP, CLR if (ctx == NULL || targetIP == NULL || targetMD == NULL) return E_INVALIDARG; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 SOSDacEnter(); if (!GetAnyThunkTarget(ctx, targetIP, targetMD)) @@ -3665,7 +3665,7 @@ ClrDataAccess::GetJumpThunkTarget(T_CONTEXT *ctx, CLRDATA_ADDRESS *targetIP, CLR return hr; #else return E_FAIL; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } @@ -3764,10 +3764,10 @@ ClrDataAccess::EnumWksGlobalMemoryRegions(CLRDataEnumMemoryFlags flags) HRESULT ClrDataAccess::GetClrWatsonBuckets(CLRDATA_ADDRESS thread, void *pGenericModeBlock) { -#ifdef FEATURE_PAL - // This API is not available under FEATURE_PAL +#ifdef TARGET_UNIX + // This API is not available under TARGET_UNIX return E_FAIL; -#else // FEATURE_PAL +#else // TARGET_UNIX if (thread == 0 || pGenericModeBlock == NULL) return E_INVALIDARG; @@ -3778,10 +3778,10 @@ ClrDataAccess::GetClrWatsonBuckets(CLRDATA_ADDRESS thread, void *pGenericModeBlo SOSDacLeave(); return hr; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HRESULT ClrDataAccess::GetClrWatsonBucketsWorker(Thread * pThread, GenericModeBlock * pGM) { @@ -3854,7 +3854,7 @@ HRESULT ClrDataAccess::GetClrWatsonBucketsWorker(Thread * pThread, GenericModeBl } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX HRESULT ClrDataAccess::GetTLSIndex(ULONG *pIndex) { @@ -4016,7 +4016,7 @@ PTR_ComCallWrapper ClrDataAccess::DACGetCCWFromAddress(CLRDATA_ADDRESS addr) returned == sizeof(TADDR)) { -#ifdef DBG_TARGET_ARM +#ifdef TARGET_ARM // clear the THUMB bit on pPtr before comparing with known vtable entry pPtr &= ~THUMB_CODE; #endif diff --git a/src/coreclr/src/debug/daccess/task.cpp b/src/coreclr/src/debug/daccess/task.cpp index ac87333e6f1ef..565b8eb106933 100644 --- a/src/coreclr/src/debug/daccess/task.cpp +++ b/src/coreclr/src/debug/daccess/task.cpp @@ -4074,7 +4074,7 @@ ClrDataMethodInstance::GetILOffsetsByAddress( ULONG32 codeOffset; ULONG32 hits = 0; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM address &= ~THUMB_CODE; // on ARM windbg passes in an address with the mode flag set... need to workaround #endif if ((status = m_dac->GetMethodNativeMap(m_methodDesc, diff --git a/src/coreclr/src/debug/debug-pal/CMakeLists.txt b/src/coreclr/src/debug/debug-pal/CMakeLists.txt index 0208b6b028ed0..032ff26b7a2c8 100644 --- a/src/coreclr/src/debug/debug-pal/CMakeLists.txt +++ b/src/coreclr/src/debug/debug-pal/CMakeLists.txt @@ -17,7 +17,6 @@ endif(WIN32) if(CLR_CMAKE_HOST_UNIX) - add_definitions(-DFEATURE_PAL) add_definitions(-DPAL_IMPLEMENTATION) add_definitions(-D_POSIX_C_SOURCE=200809L) diff --git a/src/coreclr/src/debug/di/cordb.cpp b/src/coreclr/src/debug/di/cordb.cpp index aed8be3287e83..ad304fbf23382 100644 --- a/src/coreclr/src/debug/di/cordb.cpp +++ b/src/coreclr/src/debug/di/cordb.cpp @@ -20,7 +20,7 @@ #include "dbgtransportmanager.h" #endif // FEATURE_DBGIPC_TRANSPORT_DI -#if defined(PLATFORM_UNIX) || defined(__ANDROID__) +#if defined(TARGET_UNIX) || defined(__ANDROID__) // Local (in-process) debugging is not supported for UNIX and Android. #define SUPPORT_LOCAL_DEBUGGING 0 #else @@ -28,7 +28,7 @@ #endif //********** Globals. ********************************************************* -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HINSTANCE g_hInst; // Instance handle to this piece of code. #endif @@ -201,7 +201,7 @@ BOOL WINAPI DbgDllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved) case DLL_PROCESS_ATTACH: { -#ifndef FEATURE_PAL +#ifndef HOST_UNIX g_hInst = hInstance; #else int err = PAL_InitializeDLL(); @@ -442,7 +442,7 @@ HRESULT STDMETHODCALLTYPE CClassFactory::LockServer( //***************************************************************************** // This helper provides access to the instance handle of the loaded image. //***************************************************************************** -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HINSTANCE GetModuleInst() { return g_hInst; @@ -507,13 +507,13 @@ CLRRuntimeHostInternal_GetImageVersionString( } // CLRRuntimeHostInternal_GetImageVersionString -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM BOOL DbiGetThreadContext(HANDLE hThread, DT_CONTEXT *lpContext) { // if we aren't local debugging this isn't going to work -#if !defined(_ARM_) || defined(FEATURE_DBGIPC_TRANSPORT_DI) || !SUPPORT_LOCAL_DEBUGGING +#if !defined(HOST_ARM) || defined(FEATURE_DBGIPC_TRANSPORT_DI) || !SUPPORT_LOCAL_DEBUGGING _ASSERTE(!"Can't use local GetThreadContext remotely, this needed to go to datatarget"); return FALSE; #else @@ -552,7 +552,7 @@ BOOL DbiSetThreadContext(HANDLE hThread, const DT_CONTEXT *lpContext) { -#if !defined(_ARM_) || defined(FEATURE_DBGIPC_TRANSPORT_DI) || !SUPPORT_LOCAL_DEBUGGING +#if !defined(HOST_ARM) || defined(FEATURE_DBGIPC_TRANSPORT_DI) || !SUPPORT_LOCAL_DEBUGGING _ASSERTE(!"Can't use local GetThreadContext remotely, this needed to go to datatarget"); return FALSE; #else diff --git a/src/coreclr/src/debug/di/dbgtransportpipeline.cpp b/src/coreclr/src/debug/di/dbgtransportpipeline.cpp index 1231dbfc4ac0f..b305dcf060610 100644 --- a/src/coreclr/src/debug/di/dbgtransportpipeline.cpp +++ b/src/coreclr/src/debug/di/dbgtransportpipeline.cpp @@ -111,7 +111,7 @@ class DbgTransportPipeline : // Terminate the debuggee process. virtual BOOL TerminateProcess(UINT32 exitCode); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX virtual void CleanupTargetProcess() { m_pTransport->CleanupTargetProcess(); diff --git a/src/coreclr/src/debug/di/module.cpp b/src/coreclr/src/debug/di/module.cpp index c11b730c8fbf8..bcf11d206540e 100644 --- a/src/coreclr/src/debug/di/module.cpp +++ b/src/coreclr/src/debug/di/module.cpp @@ -807,7 +807,7 @@ HRESULT CordbModule::InitPublicMetaDataFromFile() fDebuggeeLoadedNgen = true; fDebuggerLoadingNgen = true; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // NGEN images are large and we shouldn't load them if they won't be shared, therefore fail the NGEN mapping and // fallback to IL image if the debugger doesn't have the image loaded already. // Its possible that the debugger would still load the NGEN image sometime in the future and we will miss a sharing @@ -880,7 +880,7 @@ HRESULT CordbModule::InitPublicMetaDataFromFile(const WCHAR * pszFullPathName, DWORD dwOpenFlags, bool validateFileInfo) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // UNIXTODO: Some intricate details of file mapping don't work on Linux as on Windows. // We have to revisit this and try to fix it for POSIX system. return E_FAIL; @@ -993,7 +993,7 @@ HRESULT CordbModule::InitPublicMetaDataFromFile(const WCHAR * pszFullPathName, } return hr; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } //--------------------------------------------------------------------------------------- @@ -2566,7 +2566,7 @@ HRESULT CordbModule::CreateReaderForInMemorySymbols(REFIID riid, void** ppObj) ReleaseHolder pBinder; if (symFormat == IDacDbiInterface::kSymbolFormatPDB) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // PDB format - use diasymreader.dll with COM activation InlineSString<_MAX_PATH> ssBuf; IfFailThrow(GetHModuleDirectory(GetModuleInst(), ssBuf)); @@ -4522,14 +4522,14 @@ HRESULT CordbNativeCode::EnumerateVariableHomes(ICorDebugVariableHomeEnum **ppEn int CordbNativeCode::GetCallInstructionLength(BYTE *ip, ULONG32 count) { -#if defined(DBG_TARGET_ARM) +#if defined(TARGET_ARM) if (Is32BitInstruction(*(WORD*)ip)) return 4; else return 2; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) return MAX_INSTRUCTION_LENGTH; -#elif defined(DBG_TARGET_X86) +#elif defined(TARGET_X86) if (count < 2) return -1; @@ -4680,7 +4680,7 @@ int CordbNativeCode::GetCallInstructionLength(BYTE *ip, ULONG32 count) _ASSERTE(!"Unhandled opcode!"); return -1; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) BYTE rex = NULL; BYTE prefix = *ip; BOOL fContainsPrefix = FALSE; @@ -5191,7 +5191,7 @@ HRESULT CordbNativeCode::GetReturnValueLiveOffsetImpl(Instantiation *currentInst int skipBytes = 0; -#if defined(DBG_TARGET_X86) && defined(FEATURE_CORESYSTEM) +#if defined(TARGET_X86) && defined(FEATURE_CORESYSTEM) // Skip nop sleds on x86 coresystem. The JIT adds these instructions as a security measure, // and incorrectly reports to us the wrong offset of the call instruction. const BYTE nop_opcode = 0x90; diff --git a/src/coreclr/src/debug/di/nativepipeline.h b/src/coreclr/src/debug/di/nativepipeline.h index 8e0fafa865d8a..442262142fd4c 100644 --- a/src/coreclr/src/debug/di/nativepipeline.h +++ b/src/coreclr/src/debug/di/nativepipeline.h @@ -169,7 +169,7 @@ class INativeEventPipeline return S_FALSE; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Used by debugger side (RS) to cleanup the target (LS) named pipes // and semaphores when the debugger detects the debuggee process exited. virtual void CleanupTargetProcess() diff --git a/src/coreclr/src/debug/di/platformspecific.cpp b/src/coreclr/src/debug/di/platformspecific.cpp index c4e5b1bddef09..ad5122bc346c0 100644 --- a/src/coreclr/src/debug/di/platformspecific.cpp +++ b/src/coreclr/src/debug/di/platformspecific.cpp @@ -23,16 +23,16 @@ #include "LocalEventChannel.cpp" #endif -#if DBG_TARGET_X86 +#if TARGET_X86 #include "i386/cordbregisterset.cpp" #include "i386/primitives.cpp" -#elif DBG_TARGET_AMD64 +#elif TARGET_AMD64 #include "amd64/cordbregisterset.cpp" #include "amd64/primitives.cpp" -#elif DBG_TARGET_ARM +#elif TARGET_ARM #include "arm/cordbregisterset.cpp" #include "arm/primitives.cpp" -#elif DBG_TARGET_ARM64 +#elif TARGET_ARM64 #include "arm64/cordbregisterset.cpp" #include "arm64/primitives.cpp" #else diff --git a/src/coreclr/src/debug/di/process.cpp b/src/coreclr/src/debug/di/process.cpp index 8b9e702de574a..9c5048eb300fc 100644 --- a/src/coreclr/src/debug/di/process.cpp +++ b/src/coreclr/src/debug/di/process.cpp @@ -6387,7 +6387,7 @@ HRESULT CordbProcess::SafeWriteThreadContext(LSPTR_CONTEXT pContext, const DT_CO // can think of these members as not being part of the context, ie they don't represent something // which gets saved or restored on context switches. They are just space we shouldn't overwrite. // See issue 630276 for more details. -#if defined DBG_TARGET_AMD64 +#if defined TARGET_AMD64 pRemoteContext += offsetof(CONTEXT, ContextFlags); // immediately follows the 6 parameters P1-P6 pCtxSource += offsetof(CONTEXT, ContextFlags); sizeToWrite -= offsetof(CONTEXT, ContextFlags); @@ -7108,7 +7108,7 @@ HRESULT CordbProcess::FindPatchByAddress(CORDB_ADDRESS address, bool *pfPatchFou if (*pfPatchFound == false) { // Read one instruction from the faulting address... -#if defined(DBG_TARGET_ARM) || defined(DBG_TARGET_ARM64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) PRD_TYPE TrapCheck = 0; #else BYTE TrapCheck = 0; @@ -7153,7 +7153,7 @@ HRESULT CordbProcess::WriteMemory(CORDB_ADDRESS address, DWORD size, DWORD fCheckInt3 = configCheckInt3.val(CLRConfig::INTERNAL_DbgCheckInt3); if (fCheckInt3) { -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_AMD64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) if (size == 1 && buffer[0] == 0xCC) { CONSISTENCY_CHECK_MSGF(false, @@ -7162,7 +7162,7 @@ HRESULT CordbProcess::WriteMemory(CORDB_ADDRESS address, DWORD size, "(This assert is only enabled under the COM+ knob DbgCheckInt3.)\n", CORDB_ADDRESS_TO_PTR(address))); } -#endif // DBG_TARGET_X86 || DBG_TARGET_AMD64 +#endif // TARGET_X86 || TARGET_AMD64 // check if we're replaced an opcode. if (size == 1) @@ -7625,7 +7625,7 @@ HRESULT CordbProcess::GetRuntimeOffsets() m_hHelperThread = pfnOpenThread(SYNCHRONIZE, FALSE, dwHelperTid); CONSISTENCY_CHECK_MSGF(m_hHelperThread != NULL, ("Failed to get helper-thread handle. tid=0x%x\n", dwHelperTid)); } -#elif FEATURE_PAL +#elif TARGET_UNIX m_hHelperThread = NULL; //RS is supposed to be able to live without a helper thread handle. #else m_hHelperThread = OpenThread(SYNCHRONIZE, FALSE, dwHelperTid); @@ -9033,9 +9033,9 @@ bool CordbProcess::IsBreakOpcodeAtAddress(const void * address) { // There should have been an int3 there already. Since we already put it in there, // we should be able to safely read it out. -#if defined(DBG_TARGET_ARM) || defined(DBG_TARGET_ARM64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) PRD_TYPE opcodeTest = 0; -#elif defined(DBG_TARGET_AMD64) || defined(DBG_TARGET_X86) +#elif defined(TARGET_AMD64) || defined(TARGET_X86) BYTE opcodeTest = 0; #else PORTABILITY_ASSERT("NYI: Architecture specific opcode type to read"); @@ -9098,10 +9098,10 @@ CordbProcess::SetUnmanagedBreakpointInternal(CORDB_ADDRESS address, ULONG32 bufs HRESULT hr = S_OK; NativePatch * p = NULL; -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_AMD64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) const BYTE patch = CORDbg_BREAK_INSTRUCTION; BYTE opcode; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) const PRD_TYPE patch = CORDbg_BREAK_INSTRUCTION; PRD_TYPE opcode; #else @@ -9140,10 +9140,10 @@ CordbProcess::SetUnmanagedBreakpointInternal(CORDB_ADDRESS address, ULONG32 bufs goto ErrExit; // It's all successful, so now update our out-params & internal bookkeaping. -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_AMD64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) opcode = (BYTE)p->opcode; buffer[0] = opcode; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) opcode = p->opcode; memcpy_s(buffer, bufsize, &opcode, sizeof(opcode)); #else @@ -9728,7 +9728,7 @@ void CordbProcess::MarshalManagedEvent(DebuggerIPCEvent * pManagedEvent) // The event still needs to be Marshaled before being used. (see code:CordbProcess::MarshalManagedEvent) // //--------------------------------------------------------------------------------------- -#if defined(_MSC_VER) && defined(_TARGET_ARM_) +#if defined(_MSC_VER) && defined(TARGET_ARM) // This is a temporary workaround for an ARM specific MS C++ compiler bug (internal LKG build 18.1). // Branch < if (ptrRemoteManagedEvent == NULL) > was always taken and the function always returned false. // TODO: It should be removed once the bug is fixed. @@ -9780,7 +9780,7 @@ bool CordbProcess::CopyManagedEventFromTarget( return true; } -#if defined(_MSC_VER) && defined(_TARGET_ARM_) +#if defined(_MSC_VER) && defined(TARGET_ARM) #pragma optimize("", on) #endif @@ -11246,7 +11246,7 @@ const EXCEPTION_RECORD * CordbProcess::ValidateExceptionRecord( // // @dbgtodo - , cross-plat: Once we do cross-plat, these should be based off target-architecture not host's. -#if defined(BIT64) +#if defined(HOST_64BIT) if (format != FORMAT_WINDOWS_EXCEPTIONRECORD64) { ThrowHR(E_INVALIDARG); @@ -12790,9 +12790,9 @@ void CordbProcess::HandleDebugEventForInteropDebugging(const DEBUG_EVENT * pEven tempDebugContext.ContextFlags = DT_CONTEXT_FULL; DbiGetThreadContext(pUnmanagedThread->m_handle, &tempDebugContext); CordbUnmanagedThread::LogContext(&tempDebugContext); -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_AMD64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) const ULONG_PTR breakpointOpcodeSize = 1; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) const ULONG_PTR breakpointOpcodeSize = 4; #else const ULONG_PTR breakpointOpcodeSize = 1; @@ -13009,7 +13009,7 @@ void CordbProcess::HandleDebugEventForInteropDebugging(const DEBUG_EVENT * pEven // Because hijacks don't return normally they might have pushed handlers without poping them // back off. To take care of that we explicitly restore the old SEH chain. - #ifdef DBG_TARGET_X86 + #ifdef TARGET_X86 hr = pUnmanagedThread->RestoreLeafSeh(); _ASSERTE(SUCCEEDED(hr)); #endif @@ -13382,7 +13382,7 @@ void EnableDebugTrace(CordbUnmanagedThread *ut) return; // Give us a nop so that we can setip in the optimized case. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 __asm { nop } @@ -14572,7 +14572,7 @@ void CordbWin32EventThread::ExitProcess(bool fDetach) // and dispatch it inband w/the other callbacks. if (!fDetach) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Cleanup the transport pipe and semaphore files that might be left by the target (LS) process. m_pNativePipeline->CleanupTargetProcess(); #endif diff --git a/src/coreclr/src/debug/di/rsmain.cpp b/src/coreclr/src/debug/di/rsmain.cpp index 83997541ad8ec..f58026a889ed9 100644 --- a/src/coreclr/src/debug/di/rsmain.cpp +++ b/src/coreclr/src/debug/di/rsmain.cpp @@ -489,7 +489,7 @@ void CordbCommonBase::InitializeCommon() unsigned level = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_LogLevel, LL_INFO1000); unsigned bytesPerThread = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_StressLogSize, STRESSLOG_CHUNK_SIZE * 2); unsigned totalBytes = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_TotalStressLogSize, STRESSLOG_CHUNK_SIZE * 1024); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX StressLog::Initialize(facilities, level, bytesPerThread, totalBytes, GetModuleInst()); #else StressLog::Initialize(facilities, level, bytesPerThread, totalBytes, NULL); @@ -519,7 +519,7 @@ void CordbCommonBase::InitializeCommon() // setting this since V1.0 and removing it may be a breaking change. void CordbCommonBase::AddDebugPrivilege() { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HANDLE hToken; TOKEN_PRIVILEGES Privileges; BOOL fSucc; @@ -2013,7 +2013,7 @@ HRESULT Cordb::EnumerateProcesses(ICorDebugProcessEnum **ppProcesses) // typedef LONG NTSTATUS; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX typedef BOOL (*NTQUERYSYSTEMINFORMATION)(SYSTEM_INFORMATION_CLASS SystemInformationClass, PVOID SystemInformation, ULONG SystemInformationLength, diff --git a/src/coreclr/src/debug/di/rspriv.h b/src/coreclr/src/debug/di/rspriv.h index 573e4ea8fa868..7b4ff27f62606 100644 --- a/src/coreclr/src/debug/di/rspriv.h +++ b/src/coreclr/src/debug/di/rspriv.h @@ -136,7 +136,7 @@ class DbgTransportSession; class ShimProcess; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX extern HINSTANCE GetModuleInst(); #endif @@ -3969,8 +3969,8 @@ class CordbProcess : // This has m_cPatch elements. PRD_TYPE *m_rgUncommitedOpcode; - // CORDB_ADDRESS's are UINT_PTR's (64 bit under BIT64, 32 bit otherwise) -#if defined(DBG_TARGET_64BIT) + // CORDB_ADDRESS's are UINT_PTR's (64 bit under HOST_64BIT, 32 bit otherwise) +#if defined(TARGET_64BIT) #define MAX_ADDRESS (_UI64_MAX) #else #define MAX_ADDRESS (_UI32_MAX) @@ -6142,15 +6142,15 @@ class CordbThread : public CordbBase, public ICorDebugThread, void MarkStackFramesDirty(); -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) // Converts the values in the floating point register area of the context to real number values. void Get32bitFPRegisters(CONTEXT * pContext); -#elif defined(DBG_TARGET_AMD64) || defined(DBG_TARGET_ARM64) || defined(DBG_TARGET_ARM) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // Converts the values in the floating point register area of the context to real number values. void Get64bitFPRegisters(FPRegister64 * rgContextFPRegisters, int start, int nRegisters); -#endif // DBG_TARGET_X86 +#endif // TARGET_X86 // Initializes the float state members of this instance of CordbThread. This function gets the context and // converts the floating point values from their context representation to real number values. @@ -10554,7 +10554,7 @@ class CordbUnmanagedThread : public CordbBase return (DWORD) this->m_id; } -#ifdef DBG_TARGET_X86 +#ifdef TARGET_X86 // Stores the thread's current leaf SEH handler HRESULT SaveCurrentLeafSeh(); // Restores the thread's leaf SEH handler from the previously saved value @@ -10602,7 +10602,7 @@ class CordbUnmanagedThread : public CordbBase ULONG_PTR m_raiseExceptionExceptionInformation[EXCEPTION_MAXIMUM_PARAMETERS]; -#ifdef DBG_TARGET_X86 +#ifdef TARGET_X86 // the SEH handler which was the leaf when SaveCurrentSeh was called (prior to hijack) REMOTE_PTR m_pSavedLeafSeh; #endif @@ -11743,7 +11743,7 @@ inline void ValidateOrThrow(const void * p) // aligns argBase on platforms that require it else it's a no-op inline void AlignAddressForType(CordbType* pArgType, CORDB_ADDRESS& argBase) { -#ifdef DBG_TARGET_ARM +#ifdef TARGET_ARM // TODO: review the following #ifdef FEATURE_64BIT_ALIGNMENT BOOL align = FALSE; @@ -11753,7 +11753,7 @@ inline void AlignAddressForType(CordbType* pArgType, CORDB_ADDRESS& argBase) if (align) argBase = ALIGN_ADDRESS(argBase, 8); #endif // FEATURE_64BIT_ALIGNMENT -#endif // DBG_TARGET_ARM +#endif // TARGET_ARM } //----------------------------------------------------------------------------- diff --git a/src/coreclr/src/debug/di/rsstackwalk.cpp b/src/coreclr/src/debug/di/rsstackwalk.cpp index 83b3de28b8f38..63f555f7e1303 100644 --- a/src/coreclr/src/debug/di/rsstackwalk.cpp +++ b/src/coreclr/src/debug/di/rsstackwalk.cpp @@ -617,9 +617,9 @@ HRESULT CordbStackWalk::GetFrameWorker(ICorDebugFrame ** ppFrame) _ASSERTE(fSuccess); m_fIsOneFrameAhead = true; -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) frameData.fp = pDAC->GetFramePointer(m_pSFIHandle); -#endif // DBG_TARGET_X86 +#endif // TARGET_X86 // currentFuncData contains general information about the method. // It has no information about any particular jitted instance of the method. @@ -797,9 +797,9 @@ HRESULT CordbStackWalk::GetFrameWorker(ICorDebugFrame ** ppFrame) _ASSERTE(fSuccess); m_fIsOneFrameAhead = true; -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) frameData.fp = pDAC->GetFramePointer(m_pSFIHandle); -#endif // DBG_TARGET_X86 +#endif // TARGET_X86 // Lookup the appdomain that the thread was in when it was executing code for this frame. We pass this // to the frame when we create it so we can properly resolve locals in that frame later. diff --git a/src/coreclr/src/debug/di/rsthread.cpp b/src/coreclr/src/debug/di/rsthread.cpp index 596ae22b64c4c..3645cc785aaf3 100644 --- a/src/coreclr/src/debug/di/rsthread.cpp +++ b/src/coreclr/src/debug/di/rsthread.cpp @@ -1410,13 +1410,13 @@ HRESULT CordbThread::FindFrame(ICorDebugFrame ** ppFrame, FramePointer fp) ICorDebugFrame * pIFrame = pSSW->GetFrame(i); CordbFrame * pCFrame = CordbFrame::GetCordbFrameFromInterface(pIFrame); -#if defined(BIT64) +#if defined(HOST_64BIT) // On 64-bit we can simply compare the FramePointer. if (pCFrame->GetFramePointer() == fp) -#else // !BIT64 +#else // !HOST_64BIT // On other platforms, we need to do a more elaborate check. if (pCFrame->IsContainedInFrame(fp)) -#endif // BIT64 +#endif // HOST_64BIT { *ppFrame = pIFrame; (*ppFrame)->AddRef(); @@ -1430,18 +1430,18 @@ HRESULT CordbThread::FindFrame(ICorDebugFrame ** ppFrame, FramePointer fp) -#if defined(CROSS_COMPILE) && (defined(_TARGET_ARM64_) || defined(_TARGET_ARM_)) +#if defined(CROSS_COMPILE) && (defined(TARGET_ARM64) || defined(TARGET_ARM)) extern "C" double FPFillR8(void* pFillSlot) { _ASSERTE(!"nyi for platform"); return 0; } -#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_ARM_) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) extern "C" double FPFillR8(void* pFillSlot); #endif -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // CordbThread::Get32bitFPRegisters // Converts the values in the floating point register area of the context to real number values. See @@ -1531,7 +1531,7 @@ void CordbThread::Get32bitFPRegisters(CONTEXT * pContext) m_floatStackTop = floatStackTop; } // CordbThread::Get32bitFPRegisters -#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_ARM_) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // CordbThread::Get64bitFPRegisters // Converts the values in the floating point register area of the context to real number values. See @@ -1559,7 +1559,7 @@ void CordbThread::Get64bitFPRegisters(FPRegister64 * rgContextFPRegisters, int s } } // CordbThread::Get64bitFPRegisters -#endif // _TARGET_X86_ +#endif // TARGET_X86 // CordbThread::LoadFloatState // Initializes the float state members of this instance of CordbThread. This function gets the context and @@ -1584,18 +1584,18 @@ void CordbThread::LoadFloatState() DT_CONTEXT tempContext; GetProcess()->GetDAC()->GetContext(m_vmThreadToken, &tempContext); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) Get32bitFPRegisters((CONTEXT*) &tempContext); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // we have no fixed-value registers, so we begin with the first one and initialize all 16 Get64bitFPRegisters((FPRegister64*) &(tempContext.Xmm0), 0, 16); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) Get64bitFPRegisters((FPRegister64*) &(tempContext.V), 0, 32); -#elif defined (_TARGET_ARM_) +#elif defined (TARGET_ARM) Get64bitFPRegisters((FPRegister64*) &(tempContext.D), 0, 32); #else _ASSERTE(!"nyi for platform"); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 m_fFloatStateValid = true; } // CordbThread::LoadFloatState @@ -2785,7 +2785,7 @@ CordbUnmanagedThread::CordbUnmanagedThread(CordbProcess *pProcess, DWORD dwThrea m_pTLSExtendedArray(NULL), m_state(CUTS_None), m_originalHandler(NULL), -#ifdef DBG_TARGET_X86 +#ifdef TARGET_X86 m_pSavedLeafSeh(NULL), #endif m_stackBase(0), @@ -2852,13 +2852,13 @@ HRESULT CordbUnmanagedThread::LoadTLSArrayPtr(void) // Just simple math on NT with a small tls index. // The TLS slots for 0-63 are embedded in the TIB. -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) m_pTLSArray = (BYTE*) m_threadLocalBase + WINNT_TLS_OFFSET_X86; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) m_pTLSArray = (BYTE*) m_threadLocalBase + WINNT_TLS_OFFSET_AMD64; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) m_pTLSArray = (BYTE*) m_threadLocalBase + WINNT_TLS_OFFSET_ARM; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) m_pTLSArray = (BYTE*) m_threadLocalBase + WINNT_TLS_OFFSET_ARM64; #else PORTABILITY_ASSERT("Implement OOP TLS on your platform"); @@ -2873,13 +2873,13 @@ HRESULT CordbUnmanagedThread::LoadTLSArrayPtr(void) // never move once we find it for a given thread, so we // cache it here so we don't always have to perform two // ReadProcessMemory's. -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) void *ppTLSArray = (BYTE*) m_threadLocalBase + WINNT5_TLSEXPANSIONPTR_OFFSET_X86; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) void *ppTLSArray = (BYTE*) m_threadLocalBase + WINNT5_TLSEXPANSIONPTR_OFFSET_AMD64; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) void *ppTLSArray = (BYTE*) m_threadLocalBase + WINNT5_TLSEXPANSIONPTR_OFFSET_ARM; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) void *ppTLSArray = (BYTE*) m_threadLocalBase + WINNT5_TLSEXPANSIONPTR_OFFSET_ARM64; #else PORTABILITY_ASSERT("Implement OOP TLS on your platform"); @@ -2895,7 +2895,7 @@ HRESULT CordbUnmanagedThread::LoadTLSArrayPtr(void) /* VOID CordbUnmanagedThread::VerifyFSChain() { -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) DT_CONTEXT temp; temp.ContextFlags = DT_CONTEXT_FULL; DbiGetThreadContext(m_handle, &temp); @@ -2956,7 +2956,7 @@ VOID CordbUnmanagedThread::VerifyFSChain() return; }*/ -#ifdef DBG_TARGET_X86 +#ifdef TARGET_X86 HRESULT CordbUnmanagedThread::SaveCurrentLeafSeh() { _ASSERTE(m_pSavedLeafSeh == NULL); @@ -3763,26 +3763,26 @@ VOID CordbUnmanagedThread::EndStepping() // Writes some details of the given context into the debugger log VOID CordbUnmanagedThread::LogContext(DT_CONTEXT* pContext) { -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) LOG((LF_CORDB, LL_INFO10000, "CUT::LC: Eip=0x%08x, Esp=0x%08x, Eflags=0x%08x\n", pContext->Eip, pContext->Esp, pContext->EFlags)); -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) LOG((LF_CORDB, LL_INFO10000, "CUT::LC: Rip=" FMT_ADDR ", Rsp=" FMT_ADDR ", Eflags=0x%08x\n", DBG_ADDR(pContext->Rip), DBG_ADDR(pContext->Rsp), pContext->EFlags)); // EFlags is still 32bits on AMD64 -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) LOG((LF_CORDB, LL_INFO10000, "CUT::LC: Pc=" FMT_ADDR ", Sp=" FMT_ADDR ", Lr=" FMT_ADDR ", Cpsr=" FMT_ADDR "\n", DBG_ADDR(pContext->Pc), DBG_ADDR(pContext->Sp), DBG_ADDR(pContext->Lr), DBG_ADDR(pContext->Cpsr))); -#else // DBG_TARGET_X86 +#else // TARGET_X86 PORTABILITY_ASSERT("LogContext needs a PC and stack pointer."); -#endif // DBG_TARGET_X86 +#endif // TARGET_X86 } // Hijacks this thread using the FirstChanceSuspend hijack @@ -3914,7 +3914,7 @@ HRESULT CordbUnmanagedThread::SetupFirstChanceHijack(EHijackReason::EHijackReaso { // We save off the SEH handler on X86 to make sure we restore it properly after the hijack is complete // The hijacks don't return normally and the SEH chain might have handlers added that don't get removed by default -#ifdef DBG_TARGET_X86 +#ifdef TARGET_X86 hr = SaveCurrentLeafSeh(); if(FAILED(hr)) ThrowHR(hr); @@ -3970,7 +3970,7 @@ HRESULT CordbUnmanagedThread::SetupGenericHijack(DWORD eventCode, const EXCEPTIO return HRESULT_FROM_WIN32(GetLastError()); } -#if defined(DBG_TARGET_AMD64) || defined(DBG_TARGET_ARM64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // On X86 Debugger::GenericHijackFunc() ensures the stack is walkable // by simply using the EBP chain, therefore we can execute the hijack @@ -4018,7 +4018,7 @@ HRESULT CordbUnmanagedThread::SetupGenericHijack(DWORD eventCode, const EXCEPTIO } // else (non-threadstore threads) fallthrough -#endif // DBG_TARGET_AMD64 || defined(DBG_TARGET_ARM64) +#endif // TARGET_AMD64 || defined(TARGET_ARM64) // Remember that we've hijacked the thread. SetState(CUTS_GenericHijacked); @@ -4183,7 +4183,7 @@ void CordbUnmanagedThread::SetupForSkipBreakpoint(NativePatch * pNativePatch) fTrapOnSkip = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgTrapOnSkip); } #endif -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) STRESS_LOG2(LF_CORDB, LL_INFO100, "CUT::SetupSkip. addr=%p. Opcode=%x\n", pNativePatch->pAddress, (DWORD) pNativePatch->opcode); #endif @@ -4236,11 +4236,11 @@ void CordbUnmanagedThread::FixupForSkipBreakpoint() inline TADDR GetSP(DT_CONTEXT* context) { -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) return (TADDR)context->Esp; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) return (TADDR)context->Rsp; -#elif defined(DBG_TARGET_ARM) || defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) return (TADDR)context->Sp; #else _ASSERTE(!"nyi for platform"); @@ -4383,17 +4383,17 @@ void CordbUnmanagedThread::SaveRaiseExceptionEntryContext() // calculate the exception that we would expect to come from this invocation of RaiseException REMOTE_PTR pExceptionInformation = NULL; -#if defined(DBG_TARGET_AMD64) +#if defined(TARGET_AMD64) m_raiseExceptionExceptionCode = (DWORD)m_raiseExceptionEntryContext.Rcx; m_raiseExceptionExceptionFlags = (DWORD)m_raiseExceptionEntryContext.Rdx; m_raiseExceptionNumberParameters = (DWORD)m_raiseExceptionEntryContext.R8; pExceptionInformation = (REMOTE_PTR)m_raiseExceptionEntryContext.R9; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) m_raiseExceptionExceptionCode = (DWORD)m_raiseExceptionEntryContext.X0; m_raiseExceptionExceptionFlags = (DWORD)m_raiseExceptionEntryContext.X1; m_raiseExceptionNumberParameters = (DWORD)m_raiseExceptionEntryContext.X2; pExceptionInformation = (REMOTE_PTR)m_raiseExceptionEntryContext.X3; -#elif defined(DBG_TARGET_X86) +#elif defined(TARGET_X86) hr = m_pProcess->SafeReadStruct(PTR_TO_CORDB_ADDRESS((BYTE*)m_raiseExceptionEntryContext.Esp+4), &m_raiseExceptionExceptionCode); if(FAILED(hr)) { @@ -4513,9 +4513,9 @@ BOOL CordbUnmanagedThread::IsExceptionFromLastRaiseException(const EXCEPTION_REC // This flavor is assuming our caller already knows the opcode. HRESULT ApplyRemotePatch(CordbProcess * pProcess, const void * pRemoteAddress) { -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_AMD64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) const BYTE patch = CORDbg_BREAK_INSTRUCTION; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) const PRD_TYPE patch = CORDbg_BREAK_INSTRUCTION; #else const BYTE patch = 0; @@ -4530,10 +4530,10 @@ HRESULT ApplyRemotePatch(CordbProcess * pProcess, const void * pRemoteAddress) // Get the opcode that we're replacing. HRESULT ApplyRemotePatch(CordbProcess * pProcess, const void * pRemoteAddress, PRD_TYPE * pOpcode) { -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_AMD64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // Read out opcode. 1 byte on x86 BYTE opcode; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) // Read out opcode. 4 bytes on arm64 PRD_TYPE opcode; #else @@ -4557,10 +4557,10 @@ HRESULT ApplyRemotePatch(CordbProcess * pProcess, const void * pRemoteAddress, P //----------------------------------------------------------------------------- HRESULT RemoveRemotePatch(CordbProcess * pProcess, const void * pRemoteAddress, PRD_TYPE opcode) { -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_AMD64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // Replace the BP w/ the opcode. BYTE opcode2 = (BYTE) opcode; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) // 4 bytes on arm64 PRD_TYPE opcode2 = opcode; #else @@ -5897,13 +5897,13 @@ ULONG32 CordbNativeFrame::GetIPOffset() TADDR CordbNativeFrame::GetReturnRegisterValue() { -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) return (TADDR)m_context.Eax; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) return (TADDR)m_context.Rax; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) return (TADDR)m_context.R0; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) return (TADDR)m_context.X0; #else _ASSERTE(!"nyi for platform"); @@ -6208,13 +6208,13 @@ HRESULT CordbNativeFrame::GetStackParameterSize(ULONG32 * pSize) ThrowHR(E_INVALIDARG); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) IDacDbiInterface * pDAC = GetProcess()->GetDAC(); *pSize = pDAC->GetStackParameterSize(PTR_TO_CORDB_ADDRESS(CORDbgGetIP(&m_context))); -#else // !_TARGET_X86_ +#else // !TARGET_X86 hr = S_FALSE; *pSize = 0; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } EX_CATCH_HRESULT(hr); @@ -6236,13 +6236,13 @@ UINT_PTR * CordbNativeFrame::GetAddressOfRegister(CorDebugRegister regNum) const ret = (UINT_PTR*)GetSPAddress(&m_rd); break; -#if !defined(DBG_TARGET_AMD64) && !defined(DBG_TARGET_ARM) // @ARMTODO +#if !defined(TARGET_AMD64) && !defined(TARGET_ARM) // @ARMTODO case REGISTER_FRAME_POINTER: ret = (UINT_PTR*)GetFPAddress(&m_rd); break; #endif -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) case REGISTER_X86_EAX: ret = (UINT_PTR*)&m_rd.Eax; break; @@ -6267,7 +6267,7 @@ UINT_PTR * CordbNativeFrame::GetAddressOfRegister(CorDebugRegister regNum) const ret = (UINT_PTR*)&m_rd.Edi; break; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) case REGISTER_AMD64_RBP: ret = (UINT_PTR*)&m_rd.Rbp; break; @@ -6327,7 +6327,7 @@ UINT_PTR * CordbNativeFrame::GetAddressOfRegister(CorDebugRegister regNum) const case REGISTER_AMD64_R15: ret = (UINT_PTR*)&m_rd.R15; break; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) case REGISTER_ARM_R0: ret = (UINT_PTR*)&m_rd.R0; break; @@ -6387,7 +6387,7 @@ UINT_PTR * CordbNativeFrame::GetAddressOfRegister(CorDebugRegister regNum) const case REGISTER_ARM_PC: ret = (UINT_PTR*)&m_rd.PC; break; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) case REGISTER_ARM64_X0: case REGISTER_ARM64_X1: case REGISTER_ARM64_X2: @@ -6460,13 +6460,13 @@ CORDB_ADDRESS CordbNativeFrame::GetLeftSideAddressOfRegister(CorDebugRegister re switch (regNum) { -#if !defined(DBG_TARGET_AMD64) +#if !defined(TARGET_AMD64) case REGISTER_FRAME_POINTER: ret = m_rd.pFP; break; #endif -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) case REGISTER_X86_EAX: ret = m_rd.pEax; break; @@ -6491,7 +6491,7 @@ CORDB_ADDRESS CordbNativeFrame::GetLeftSideAddressOfRegister(CorDebugRegister re ret = m_rd.pEdi; break; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) case REGISTER_AMD64_RBP: ret = m_rd.pRbp; break; @@ -6844,12 +6844,12 @@ HRESULT CordbNativeFrame::GetLocalRegisterValue(CorDebugRegister reg, VALIDATE_POINTER_TO_OBJECT(ppValue, ICorDebugValue **); ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess()); -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_64BIT) -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) || defined(TARGET_64BIT) +#if defined(TARGET_X86) if ((reg >= REGISTER_X86_FPSTACK_0) && (reg <= REGISTER_X86_FPSTACK_7)) -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) if ((reg >= REGISTER_AMD64_XMM0) && (reg <= REGISTER_AMD64_XMM15)) -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) if ((reg >= REGISTER_ARM64_V0) && (reg <= REGISTER_ARM64_V31)) #endif { @@ -7093,17 +7093,17 @@ HRESULT CordbNativeFrame::GetLocalFloatingPointValue(DWORD index, (et != ELEMENT_TYPE_R8)) return E_INVALIDARG; -#if defined(DBG_TARGET_AMD64) +#if defined(TARGET_AMD64) if (!((index >= REGISTER_AMD64_XMM0) && (index <= REGISTER_AMD64_XMM15))) return E_INVALIDARG; index -= REGISTER_AMD64_XMM0; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) if (!((index >= REGISTER_ARM64_V0) && (index <= REGISTER_ARM64_V31))) return E_INVALIDARG; index -= REGISTER_ARM64_V0; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) if (!((index >= REGISTER_ARM_D0) && (index <= REGISTER_ARM_D31))) return E_INVALIDARG; @@ -7133,7 +7133,7 @@ HRESULT CordbNativeFrame::GetLocalFloatingPointValue(DWORD index, EX_CATCH_HRESULT(hr); if (SUCCEEDED(hr)) { -#if !defined(DBG_TARGET_64BIT) +#if !defined(TARGET_64BIT) // This is needed on x86 because we are dealing with a stack. index = pThread->m_floatStackTop - index; #endif @@ -7142,7 +7142,7 @@ HRESULT CordbNativeFrame::GetLocalFloatingPointValue(DWORD index, sizeof(pThread->m_floatValues[0]))) return E_INVALIDARG; -#ifdef DBG_TARGET_X86 +#ifdef TARGET_X86 // A workaround (sort of) to get around the difference in format between // a float value and a double value. We can't simply cast a double pointer to // a float pointer. Instead, we have to cast the double itself to a float. @@ -7500,12 +7500,12 @@ HRESULT CordbJITILFrame::Init() IfFailThrow(GetArgumentType(0, &pArgType)); ULONG32 argSize = 0; IfFailThrow(pArgType->GetUnboxedObjectSize(&argSize)); -#if defined(_TARGET_X86_) // (STACK_GROWS_DOWN_ON_ARGS_WALK) +#if defined(TARGET_X86) // (STACK_GROWS_DOWN_ON_ARGS_WALK) m_FirstArgAddr = argBase - argSize; -#else // !_TARGET_X86_ (STACK_GROWS_UP_ON_ARGS_WALK) +#else // !TARGET_X86 (STACK_GROWS_UP_ON_ARGS_WALK) AlignAddressForType(pArgType, argBase); m_FirstArgAddr = argBase; -#endif // !_TARGET_X86_ (STACK_GROWS_UP_ON_ARGS_WALK) +#endif // !TARGET_X86 (STACK_GROWS_UP_ON_ARGS_WALK) } // The stackwalking code can't always successfully retrieve the generics type token. @@ -8133,14 +8133,14 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, // m_FirstArgAddr will already be aligned on platforms that require alignment CORDB_ADDRESS rpCur = m_FirstArgAddr; -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_ARM) +#if defined(TARGET_X86) || defined(TARGET_ARM) cbArchitectureMin = 4; -#elif defined(DBG_TARGET_64BIT) +#elif defined(TARGET_64BIT) cbArchitectureMin = 8; #else cbArchitectureMin = 8; //REVISIT_TODO not sure if this is correct PORTABILITY_ASSERT("What is the architecture-dependent minimum word size?"); -#endif // DBG_TARGET_X86 +#endif // TARGET_X86 // make a copy of the cached SigParser SigParser sigParser = m_sigParserCached; @@ -8159,7 +8159,7 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, IfFailThrow(pArgType->GetUnboxedObjectSize(&cbType)); -#if defined(DBG_TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK +#if defined(TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK // The the rpCur pointer starts off in the right spot for the // first argument, but thereafter we have to decrement it // before getting the variable's location from it. So increment @@ -8196,7 +8196,7 @@ HRESULT CordbJITILFrame::FabricateNativeInfo(DWORD dwIndex, IfFailThrow(pArgType->GetUnboxedObjectSize(&cbType)); -#if defined(DBG_TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK +#if defined(TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK rpCur -= max(cbType, cbArchitectureMin); m_rgNVI[i].loc.vlFixedVarArg.vlfvOffset = (unsigned)(m_FirstArgAddr - rpCur); @@ -8382,21 +8382,21 @@ HRESULT CordbJITILFrame::GetNativeVariable(CordbType *type, } break; -#if defined(DBG_TARGET_64BIT) || defined(DBG_TARGET_ARM) +#if defined(TARGET_64BIT) || defined(TARGET_ARM) case ICorDebugInfo::VLT_REG_FP: -#if defined(DBG_TARGET_ARM) // @ARMTODO +#if defined(TARGET_ARM) // @ARMTODO hr = E_NOTIMPL; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) hr = m_nativeFrame->GetLocalFloatingPointValue(pNativeVarInfo->loc.vlReg.vlrReg + REGISTER_AMD64_XMM0, type, ppValue); -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) hr = m_nativeFrame->GetLocalFloatingPointValue(pNativeVarInfo->loc.vlReg.vlrReg + REGISTER_ARM64_V0, type, ppValue); #else #error Platform not implemented -#endif // DBG_TARGET_ARM @ARMTODO +#endif // TARGET_ARM @ARMTODO break; -#endif // DBG_TARGET_64BIT || DBG_TARGET_ARM +#endif // TARGET_64BIT || TARGET_ARM case ICorDebugInfo::VLT_STK_BYREF: { @@ -8462,7 +8462,7 @@ HRESULT CordbJITILFrame::GetNativeVariable(CordbType *type, break; case ICorDebugInfo::VLT_FPSTK: -#if defined(DBG_TARGET_ARM) // @ARMTODO +#if defined(TARGET_ARM) // @ARMTODO hr = E_NOTIMPL; #else /* @@ -8482,7 +8482,7 @@ HRESULT CordbJITILFrame::GetNativeVariable(CordbType *type, CORDB_ADDRESS pRemoteValue; -#if defined(DBG_TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK +#if defined(TARGET_X86) // STACK_GROWS_DOWN_ON_ARGS_WALK pRemoteValue = m_FirstArgAddr - pNativeVarInfo->loc.vlFixedVarArg.vlfvOffset; // Remember to subtract out this amount pRemoteValue += sizeof(((CORINFO_VarArgInfo*)0)->argBytes); @@ -8820,24 +8820,24 @@ HRESULT CordbJITILFrame::GetReturnValueForType(CordbType *pType, ICorDebugValue { -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) const CorDebugRegister floatRegister = REGISTER_X86_FPSTACK_0; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) const CorDebugRegister floatRegister = REGISTER_AMD64_XMM0; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) const CorDebugRegister floatRegister = REGISTER_ARM64_V0; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) const CorDebugRegister floatRegister = REGISTER_ARM_D0; #endif -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) const CorDebugRegister ptrRegister = REGISTER_X86_EAX; const CorDebugRegister ptrHighWordRegister = REGISTER_X86_EDX; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) const CorDebugRegister ptrRegister = REGISTER_AMD64_RAX; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) const CorDebugRegister ptrRegister = REGISTER_ARM64_X0; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) const CorDebugRegister ptrRegister = REGISTER_ARM_R0; const CorDebugRegister ptrHighWordRegister = REGISTER_ARM_R1; @@ -8853,7 +8853,7 @@ HRESULT CordbJITILFrame::GetReturnValueForType(CordbType *pType, ICorDebugValue case ELEMENT_TYPE_R8: return m_nativeFrame->GetLocalFloatingPointValue(floatRegister, pType, ppReturnValue); -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_ARM) +#if defined(TARGET_X86) || defined(TARGET_ARM) case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: return m_nativeFrame->GetLocalDoubleRegisterValue(ptrHighWordRegister, ptrRegister, pType, ppReturnValue); diff --git a/src/coreclr/src/debug/di/shimlocaldatatarget.cpp b/src/coreclr/src/debug/di/shimlocaldatatarget.cpp index c8b0904a07082..194e10633fac7 100644 --- a/src/coreclr/src/debug/di/shimlocaldatatarget.cpp +++ b/src/coreclr/src/debug/di/shimlocaldatatarget.cpp @@ -82,7 +82,7 @@ class ShimLocalDataTarget : public ShimDataTarget // Note: throws BOOL CompatibleHostAndTargetPlatforms(HANDLE hTargetProcess) { -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) return TRUE; #else // get the platform for the host process @@ -279,17 +279,17 @@ HRESULT STDMETHODCALLTYPE ShimLocalDataTarget::GetPlatform( CorDebugPlatform *pPlatform) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #error ShimLocalDataTarget is not implemented on PAL systems yet #endif // Assume that we're running on Windows for now. -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) *pPlatform = CORDB_PLATFORM_WINDOWS_X86; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) *pPlatform = CORDB_PLATFORM_WINDOWS_AMD64; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) *pPlatform = CORDB_PLATFORM_WINDOWS_ARM; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) *pPlatform = CORDB_PLATFORM_WINDOWS_ARM64; #else #error Unknown Processor. @@ -463,7 +463,7 @@ ShimLocalDataTarget::ContinueStatusChanged( HRESULT STDMETHODCALLTYPE ShimLocalDataTarget::VirtualUnwind(DWORD threadId, ULONG32 contextSize, PBYTE context) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX _ASSERTE(!"ShimLocalDataTarget::VirtualUnwind NOT IMPLEMENTED"); #endif return E_NOTIMPL; diff --git a/src/coreclr/src/debug/di/shimprocess.cpp b/src/coreclr/src/debug/di/shimprocess.cpp index bdfb9ac17ce14..c6f5ae3bf791e 100644 --- a/src/coreclr/src/debug/di/shimprocess.cpp +++ b/src/coreclr/src/debug/di/shimprocess.cpp @@ -676,7 +676,7 @@ bool ShimProcess::RemoveDuplicateCreationEventIfPresent(void * pKey) // It can be passed into ICorDebugProcess4::Filter. CorDebugRecordFormat GetHostExceptionRecordFormat() { -#if defined(BIT64) +#if defined(HOST_64BIT) return FORMAT_WINDOWS_EXCEPTIONRECORD64; #else return FORMAT_WINDOWS_EXCEPTIONRECORD32; @@ -1825,7 +1825,7 @@ HMODULE ShimProcess::GetDacModule() HModuleHolder hDacDll; PathString wszAccessDllPath; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (!PAL_GetPALDirectoryWrapper(wszAccessDllPath)) { ThrowLastError(); @@ -1852,7 +1852,7 @@ HMODULE ShimProcess::GetDacModule() PCWSTR eeFlavor = W("mscordaccore.dll"); -#endif // FEATURE_PAL +#endif // TARGET_UNIX wszAccessDllPath.Append(eeFlavor); hDacDll.Assign(WszLoadLibrary(wszAccessDllPath)); diff --git a/src/coreclr/src/debug/di/shimremotedatatarget.cpp b/src/coreclr/src/debug/di/shimremotedatatarget.cpp index 8e986aed17fdf..261dd08f0c42b 100644 --- a/src/coreclr/src/debug/di/shimremotedatatarget.cpp +++ b/src/coreclr/src/debug/di/shimremotedatatarget.cpp @@ -227,26 +227,26 @@ HRESULT STDMETHODCALLTYPE ShimRemoteDataTarget::GetPlatform( CorDebugPlatform *pPlatform) { -#ifdef FEATURE_PAL - #if defined(DBG_TARGET_X86) +#ifdef TARGET_UNIX + #if defined(TARGET_X86) *pPlatform = CORDB_PLATFORM_POSIX_X86; - #elif defined(DBG_TARGET_AMD64) + #elif defined(TARGET_AMD64) *pPlatform = CORDB_PLATFORM_POSIX_AMD64; - #elif defined(DBG_TARGET_ARM) + #elif defined(TARGET_ARM) *pPlatform = CORDB_PLATFORM_POSIX_ARM; - #elif defined(DBG_TARGET_ARM64) + #elif defined(TARGET_ARM64) *pPlatform = CORDB_PLATFORM_POSIX_ARM64; #else #error Unknown Processor. #endif #else - #if defined(DBG_TARGET_X86) + #if defined(TARGET_X86) *pPlatform = CORDB_PLATFORM_WINDOWS_X86; - #elif defined(DBG_TARGET_AMD64) + #elif defined(TARGET_AMD64) *pPlatform = CORDB_PLATFORM_WINDOWS_AMD64; - #elif defined(DBG_TARGET_ARM) + #elif defined(TARGET_ARM) *pPlatform = CORDB_PLATFORM_WINDOWS_ARM; - #elif defined(DBG_TARGET_ARM64) + #elif defined(TARGET_ARM64) *pPlatform = CORDB_PLATFORM_WINDOWS_ARM64; #else #error Unknown Processor. diff --git a/src/coreclr/src/debug/di/shimstackwalk.cpp b/src/coreclr/src/debug/di/shimstackwalk.cpp index d54a3ecfa7a33..f123749218f9e 100644 --- a/src/coreclr/src/debug/di/shimstackwalk.cpp +++ b/src/coreclr/src/debug/di/shimstackwalk.cpp @@ -15,10 +15,10 @@ #include "stdafx.h" #include "primitives.h" -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) static const ULONG32 REGISTER_X86_MAX = REGISTER_X86_FPSTACK_7 + 1; static const ULONG32 MAX_MASK_COUNT = (REGISTER_X86_MAX + 7) >> 3; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) static const ULONG32 REGISTER_AMD64_MAX = REGISTER_AMD64_XMM15 + 1; static const ULONG32 MAX_MASK_COUNT = (REGISTER_AMD64_MAX + 7) >> 3; #endif @@ -1097,7 +1097,7 @@ void ShimStackWalk::AppendChain(ChainInfo * pChainInfo, StackWalkInfo * pStackWa // We need to send an extra enter-managed chain. _ASSERTE(pChainInfo->m_fLeafNativeContextIsValid); BYTE * sp = reinterpret_cast(CORDbgGetSP(&(pChainInfo->m_leafNativeContext))); -#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM) && !defined(TARGET_ARM64) // Dev11 324806: on ARM we use the caller's SP for a frame's ending delimiter so we cannot // subtract 4 bytes from the chain's ending delimiter else the frame might never be in range. // TODO: revisit overlapping ranges on ARM, it would be nice to make it consistent with the other architectures. diff --git a/src/coreclr/src/debug/di/stdafx.h b/src/coreclr/src/debug/di/stdafx.h index fcee5322636e3..ade52e3a27b83 100644 --- a/src/coreclr/src/debug/di/stdafx.h +++ b/src/coreclr/src/debug/di/stdafx.h @@ -54,7 +54,7 @@ #include "utilcode.h" #endif -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM #define DbiGetThreadContext(hThread, lpContext) ::GetThreadContext(hThread, (CONTEXT*)(lpContext)) #define DbiSetThreadContext(hThread, lpContext) ::SetThreadContext(hThread, (CONTEXT*)(lpContext)) #else diff --git a/src/coreclr/src/debug/di/valuehome.cpp b/src/coreclr/src/debug/di/valuehome.cpp index d1a70b760c7b1..0b2f36bea579f 100644 --- a/src/coreclr/src/debug/di/valuehome.cpp +++ b/src/coreclr/src/debug/di/valuehome.cpp @@ -63,7 +63,7 @@ void RegValueHome::SetContextRegister(DT_CONTEXT * pContext, case REGISTER_INSTRUCTION_POINTER: CORDbgSetIP(pContext, (LPVOID)newVal); break; case REGISTER_STACK_POINTER: CORDbgSetSP(pContext, (LPVOID)newVal); break; -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) case REGISTER_FRAME_POINTER: CORDbgSetFP(pContext, (LPVOID)newVal); _UpdateFrame(); break; @@ -80,7 +80,7 @@ void RegValueHome::SetContextRegister(DT_CONTEXT * pContext, case REGISTER_X86_EDI: pContext->Edi = newVal; _UpdateFrame(); break; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) case REGISTER_AMD64_RBP: pContext->Rbp = newVal; _UpdateFrame(); break; case REGISTER_AMD64_RAX: pContext->Rax = newVal; @@ -111,7 +111,7 @@ void RegValueHome::SetContextRegister(DT_CONTEXT * pContext, _UpdateFrame(); break; case REGISTER_AMD64_R15: pContext->R15 = newVal; _UpdateFrame(); break; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) case REGISTER_ARM_R0: pContext->R0 = newVal; _UpdateFrame(); break; case REGISTER_ARM_R1: pContext->R1 = newVal; @@ -140,7 +140,7 @@ void RegValueHome::SetContextRegister(DT_CONTEXT * pContext, _UpdateFrame(); break; case REGISTER_ARM_LR: pContext->Lr = newVal; _UpdateFrame(); break; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) case REGISTER_ARM64_X0: case REGISTER_ARM64_X1: case REGISTER_ARM64_X2: @@ -204,10 +204,10 @@ void RegValueHome::SetEnregisteredValue(MemoryRange newValue, DT_CONTEXT * pCont extendedVal = (SSIZE_T) *(short*)newValue.StartAddress(); break; case 4: _ASSERTE(sizeof(DWORD) == 4); extendedVal = (SSIZE_T) *(int*)newValue.StartAddress(); break; -#if defined(DBG_TARGET_64BIT) +#if defined(TARGET_64BIT) case 8: _ASSERTE(sizeof(ULONGLONG) == 8); extendedVal = (SSIZE_T) *(ULONGLONG*)newValue.StartAddress(); break; -#endif // DBG_TARGET_64BIT +#endif // TARGET_64BIT default: _ASSERTE(!"bad size"); } } @@ -222,10 +222,10 @@ void RegValueHome::SetEnregisteredValue(MemoryRange newValue, DT_CONTEXT * pCont extendedVal = *( WORD*)newValue.StartAddress(); break; case 4: _ASSERTE(sizeof(DWORD) == 4); extendedVal = *(DWORD*)newValue.StartAddress(); break; -#if defined(DBG_TARGET_64BIT) +#if defined(TARGET_64BIT) case 8: _ASSERTE(sizeof(ULONGLONG) == 8); extendedVal = *(ULONGLONG*)newValue.StartAddress(); break; -#endif // DBG_TARGET_64BIT +#endif // TARGET_64BIT default: _ASSERTE(!"bad size"); } } @@ -453,9 +453,9 @@ void FloatRegValueHome::SetEnregisteredValue(MemoryRange newValue, bool fIsSigned) { // TODO: : implement CordbValue::SetEnregisteredValue for RAK_FLOAT - #if defined(DBG_TARGET_AMD64) + #if defined(TARGET_AMD64) PORTABILITY_ASSERT("NYI: SetEnregisteredValue (divalue.cpp): RAK_FLOAT for AMD64"); - #endif // DBG_TARGET_AMD64 + #endif // TARGET_AMD64 _ASSERTE((newValue.Size() == 4) || (newValue.Size() == 8)); @@ -464,14 +464,14 @@ void FloatRegValueHome::SetEnregisteredValue(MemoryRange newValue, memcpy(&newVal, newValue.StartAddress(), newValue.Size()); - #if defined(DBG_TARGET_X86) + #if defined(TARGET_X86) // This is unfortunately non-portable. Luckily we can live with this for now since we only support // Win/X86 debugging a Mac/X86 platform. - #if !defined(_TARGET_X86_) + #if !defined(TARGET_X86) #error Unsupported target platform - #endif // !_TARGET_X86_ + #endif // !TARGET_X86 // What a pain, on X86 take the floating // point state in the context and make it our current FP @@ -565,7 +565,7 @@ void FloatRegValueHome::SetEnregisteredValue(MemoryRange newValue, : "m"(currentFPUState) ); #endif - #endif // DBG_TARGET_X86 + #endif // TARGET_X86 // update the thread's floating point stack void * valueAddress = (void *) &(m_pFrame->m_pThread->m_floatValues[m_floatIndex]); @@ -844,14 +844,14 @@ void RegisterValueHome::SetEnregisteredValue(MemoryRange src, bool fIsSigned) // or for 64-bit platforms void RegisterValueHome::GetEnregisteredValue(MemoryRange dest) { -#if !defined(DBG_TARGET_X86) +#if !defined(TARGET_X86) _ASSERTE(!"@TODO IA64/AMD64 -- Not Yet Implemented"); ThrowHR(E_NOTIMPL); -#else // DBG_TARGET_X86 +#else // TARGET_X86 _ASSERTE(m_pRemoteRegAddr != NULL); m_pRemoteRegAddr->GetEnregisteredValue(dest); // throws -#endif // !DBG_TARGET_X86 +#endif // !TARGET_X86 } // RegisterValueHome::GetEnregisteredValue // Is this a signed type or unsigned type? diff --git a/src/coreclr/src/debug/ee/amd64/amd64walker.cpp b/src/coreclr/src/debug/ee/amd64/amd64walker.cpp index 5e88560b16ef2..fb2ab17056423 100644 --- a/src/coreclr/src/debug/ee/amd64/amd64walker.cpp +++ b/src/coreclr/src/debug/ee/amd64/amd64walker.cpp @@ -18,7 +18,7 @@ #include "openum.h" #include "amd64InstrDecode.h" -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // // The AMD64 walker is currently pretty minimal. It only recognizes call and return opcodes, plus a few jumps. The rest diff --git a/src/coreclr/src/debug/ee/arm/armwalker.cpp b/src/coreclr/src/debug/ee/arm/armwalker.cpp index 5739ce4f11d39..062e1de8b2122 100644 --- a/src/coreclr/src/debug/ee/arm/armwalker.cpp +++ b/src/coreclr/src/debug/ee/arm/armwalker.cpp @@ -18,7 +18,7 @@ #include "openum.h" -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM void NativeWalker::Decode() { diff --git a/src/coreclr/src/debug/ee/arm64/arm64walker.cpp b/src/coreclr/src/debug/ee/arm64/arm64walker.cpp index a7429bb30d17a..9552446760875 100644 --- a/src/coreclr/src/debug/ee/arm64/arm64walker.cpp +++ b/src/coreclr/src/debug/ee/arm64/arm64walker.cpp @@ -15,7 +15,7 @@ #include "frames.h" #include "openum.h" -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 PCODE Expand19bitoffset(PCODE opcode) { diff --git a/src/coreclr/src/debug/ee/controller.cpp b/src/coreclr/src/debug/ee/controller.cpp index e8a6a12d8cb41..03b27612e5a5e 100644 --- a/src/coreclr/src/debug/ee/controller.cpp +++ b/src/coreclr/src/debug/ee/controller.cpp @@ -1388,7 +1388,7 @@ bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch) } } // TODO: : determine if this is needed for AMD64 -#if defined(_TARGET_X86_) //REVISIT_TODO what is this?! +#if defined(TARGET_X86) //REVISIT_TODO what is this?! else { DWORD oldProt; @@ -1415,7 +1415,7 @@ bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch) return false; } } -#endif //_TARGET_X86_ +#endif //TARGET_X86 return true; } @@ -1504,7 +1504,7 @@ bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch) // !!! IL patch logic assumes reference encoding // // TODO: : determine if this is needed for AMD64 -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) _ASSERTE(*(unsigned short*)(patch->address+1) == CEE_BREAK); *(unsigned short *) (patch->address+1) @@ -1552,7 +1552,7 @@ void DebuggerController::UnapplyPatchAt(DebuggerControllerPatch *patch, // !!! IL patch logic assumes reference encoding // // TODO: : determine if this is needed for AMD64 -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 _ASSERTE(*(unsigned short*)(address+1) == CEE_BREAK); *(unsigned short *) (address+1) @@ -1626,7 +1626,7 @@ PRD_TYPE DebuggerController::GetPatchedOpcode(CORDB_ADDRESS_TYPE *address) // // TODO: : determine if this is needed for AMD64 // -#ifdef _TARGET_X86_ //what is this?! +#ifdef TARGET_X86 //what is this?! else { // @@ -1635,7 +1635,7 @@ PRD_TYPE DebuggerController::GetPatchedOpcode(CORDB_ADDRESS_TYPE *address) opcode = *(unsigned short*)(address+1); } -#endif //_TARGET_X86_ +#endif //TARGET_X86 } @@ -2253,7 +2253,7 @@ bool DebuggerController::ModuleHasPatches( Module* pModule ) // static bool _AddrIsJITHelper(PCODE addr) { -#if !defined(BIT64) && !defined(FEATURE_PAL) +#if !defined(HOST_64BIT) && !defined(TARGET_UNIX) // Is the address in the runtime dll (clr.dll or coreclr.dll) at all? (All helpers are in // that dll) if (g_runtimeLoadedBaseAddress <= addr && @@ -2285,9 +2285,9 @@ static bool _AddrIsJITHelper(PCODE addr) "_ANIM: address within runtime dll, but not a helper function " "0x%08x\n", addr)); } -#else // !defined(BIT64) && !defined(FEATURE_PAL) +#else // !defined(HOST_64BIT) && !defined(TARGET_UNIX) // TODO: Figure out what we want to do here -#endif // !defined(BIT64) && !defined(FEATURE_PAL) +#endif // !defined(HOST_64BIT) && !defined(TARGET_UNIX) return false; } @@ -4372,7 +4372,7 @@ DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread, NativeWalker::DecodeInstructionForPatchSkip(patchBypass, &(m_instrAttrib)); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // The code below handles RIP-relative addressing on AMD64. the original implementation made the assumption that @@ -4415,7 +4415,7 @@ DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread, } } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif // !FEATURE_EMULATE_SINGLESTEP @@ -4464,14 +4464,14 @@ DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread, ControllerLockHolder lockController; g_pEEInterface->MarkThreadForDebugStepping(thread, true); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM WORD opcode2 = 0; if (Is32BitInstruction(patch->opcode)) { opcode2 = CORDbgGetInstruction((CORDB_ADDRESS_TYPE *)(((DWORD)patch->address) + 2)); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM thread->BypassWithSingleStep(patch->address, patch->opcode ARM_ARG(opcode2)); m_singleStep = true; @@ -4479,9 +4479,9 @@ DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread, #else // FEATURE_EMULATE_SINGLESTEP -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 patchBypass = NativeWalker::SetupOrSimulateInstructionForPatchSkip(context, m_pSharedPatchBypassBuffer, (const BYTE *)patch->address, patch->opcode); -#endif //_TARGET_ARM64_ +#endif //TARGET_ARM64 //set eip to point to buffer... SetIP(context, (PCODE)patchBypass); @@ -4686,7 +4686,7 @@ TP_RESULT DebuggerPatchSkip::TriggerExceptionHook(Thread *thread, CONTEXT * cont LOG((LF_CORDB,LL_INFO10000, "DPS::TEH: doing the patch-skip thing\n")); -#if defined(_TARGET_ARM64_) && !defined(FEATURE_EMULATE_SINGLESTEP) +#if defined(TARGET_ARM64) && !defined(FEATURE_EMULATE_SINGLESTEP) if (!IsSingleStep(exception->ExceptionCode)) { @@ -4719,7 +4719,7 @@ TP_RESULT DebuggerPatchSkip::TriggerExceptionHook(Thread *thread, CONTEXT * cont if (m_instrAttrib.m_fIsCall && IsSingleStep(exception->ExceptionCode)) { // Fixup return address on stack -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) SIZE_T *sp = (SIZE_T *) GetSP(context); LOG((LF_CORDB, LL_INFO10000, @@ -4741,7 +4741,7 @@ TP_RESULT DebuggerPatchSkip::TriggerExceptionHook(Thread *thread, CONTEXT * cont if (IsSingleStep(exception->ExceptionCode)) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Check if the current IP is anywhere near the exception dispatcher logic. // If it is, ignore the exception, as the real exception is coming next. static FARPROC pExcepDispProc = NULL; @@ -4776,7 +4776,7 @@ TP_RESULT DebuggerPatchSkip::TriggerExceptionHook(Thread *thread, CONTEXT * cont return (TPR_IGNORE_AND_STOP); } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX // If the IP is close to the skip patch start, or if we were skipping over a call, then assume the IP needs // adjusting. @@ -4863,7 +4863,7 @@ bool DebuggerPatchSkip::TriggerSingleStep(Thread *thread, const BYTE *ip) return false; } } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Dev11 91932: for RIP-relative writes we need to copy the value that was written in our buffer to the actual address _ASSERTE(m_pSharedPatchBypassBuffer); if (m_pSharedPatchBypassBuffer->RipTargetFixup) @@ -5736,7 +5736,7 @@ bool DebuggerStepper::TrapStep(ControllerStackInfo *info, bool in) return false; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 LOG((LF_CORDB,LL_INFO1000, "GetJitInfo for pc = 0x%x (addr of " "that value:0x%x)\n", (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)), info->m_activeFrame.registers.PCTAddr)); @@ -6174,7 +6174,7 @@ void DebuggerStepper::TrapStepOut(ControllerStackInfo *info, bool fForceTraditio // There should always be a frame for the parent method. _ASSERTE(info->HasReturnFrame()); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM while (info->HasReturnFrame() && info->m_activeFrame.md != info->GetReturnFrame().md) { StackTraceTicket ticket(info); @@ -8499,7 +8499,7 @@ DebuggerFuncEvalComplete::DebuggerFuncEvalComplete(Thread *thread, void *dest) : DebuggerController(thread, NULL) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM m_pDE = reinterpret_cast(((DWORD)dest) & ~THUMB_CODE)->m_associatedDebuggerEval; #else m_pDE = reinterpret_cast(dest)->m_associatedDebuggerEval; @@ -8523,10 +8523,10 @@ TP_RESULT DebuggerFuncEvalComplete::TriggerPatch(DebuggerControllerPatch *patch, // Restore the thread's context to what it was before we hijacked it for this func eval. CONTEXT *pCtx = GetManagedLiveCtx(thread); #ifdef FEATURE_DATABREAKPOINT -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #error Not supported -#endif // FEATURE_PAL -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // TARGET_UNIX +#if defined(TARGET_X86) || defined(TARGET_AMD64) // If a data breakpoint is set while we hit a breakpoint inside a FuncEval, this will make sure the data breakpoint stays m_pDE->m_context.Dr0 = pCtx->Dr0; m_pDE->m_context.Dr1 = pCtx->Dr1; @@ -8936,19 +8936,19 @@ bool DebuggerContinuableExceptionBreakpoint::SendEvent(Thread *thread, bool fIpC bool hitDataBp = false; bool result = false; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #error Not supported -#endif // FEATURE_PAL -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // TARGET_UNIX +#if defined(TARGET_X86) || defined(TARGET_AMD64) PDR6 pdr6 = (PDR6)&(pContext->Dr6); if (pdr6->B0 || pdr6->B1 || pdr6->B2 || pdr6->B3) { hitDataBp = true; } -#else // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#else // defined(TARGET_X86) || defined(TARGET_AMD64) #error Not supported -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) if (hitDataBp) { if (g_pDebugger->IsThreadAtSafePlace(thread)) diff --git a/src/coreclr/src/debug/ee/controller.h b/src/coreclr/src/debug/ee/controller.h index fdaad1ab7b348..2775fe6f21aa5 100644 --- a/src/coreclr/src/debug/ee/controller.h +++ b/src/coreclr/src/debug/ee/controller.h @@ -249,11 +249,11 @@ class SharedPatchBypassBuffer // sentinel value indicating uninitialized data *(reinterpret_cast(PatchBypass)) = SentinelValue; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 *(reinterpret_cast(BypassBuffer)) = SentinelValue; RipTargetFixup = 0; RipTargetFixupSize = 0; -#elif _TARGET_ARM64_ +#elif TARGET_ARM64 RipTargetFixup = 0; #endif @@ -288,13 +288,13 @@ class SharedPatchBypassBuffer // "PatchBypass" must be the first field of this class for alignment to be correct. BYTE PatchBypass[MAX_INSTRUCTION_LENGTH]; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) const static int cbBufferBypass = 0x10; BYTE BypassBuffer[cbBufferBypass]; UINT_PTR RipTargetFixup; BYTE RipTargetFixupSize; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) UINT_PTR RipTargetFixup; #endif @@ -431,7 +431,7 @@ struct DebuggerControllerPatch // this is shared among all the skippers for this controller. see the comments // right before the definition of SharedPatchBypassBuffer for lifetime info. SharedPatchBypassBuffer* m_pSharedPatchBypassBuffer; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM public: SIZE_T pid; @@ -547,7 +547,7 @@ struct DebuggerControllerPatch if (m_pSharedPatchBypassBuffer != NULL) m_pSharedPatchBypassBuffer->Release(); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM private: DebuggerPatchKind kind; @@ -962,9 +962,9 @@ inline bool IsInUsedAction(DPOSS_ACTION action) inline void VerifyExecutableAddress(const BYTE* address) { // TODO: : when can we apply this to x86? -#if defined(BIT64) +#if defined(HOST_64BIT) #if defined(_DEBUG) -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX MEMORY_BASIC_INFORMATION mbi; if (sizeof(mbi) == ClrVirtualQuery(address, &mbi, sizeof(mbi))) @@ -982,9 +982,9 @@ inline void VerifyExecutableAddress(const BYTE* address) ("VEA: address (0x%p) is not on an executable page.", address)); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // _DEBUG -#endif // BIT64 +#endif // HOST_64BIT } #endif // !DACCESS_COMPILE @@ -1469,7 +1469,7 @@ class DebuggerPatchSkip : public DebuggerController BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass; return (CORDB_ADDRESS_TYPE *)patchBypass; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM }; /* ------------------------------------------------------------------------- * diff --git a/src/coreclr/src/debug/ee/debugger.cpp b/src/coreclr/src/debug/ee/debugger.cpp index d5e1f1be41fdf..d0f90e1dc8b14 100644 --- a/src/coreclr/src/debug/ee/debugger.cpp +++ b/src/coreclr/src/debug/ee/debugger.cpp @@ -993,7 +993,7 @@ Debugger::~Debugger() _ASSERTE(!"Debugger dtor should not be called."); } -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) typedef void (*PFN_HIJACK_FUNCTION) (void); // Given the start address and the end address of a function, return a MemoryRange for the function. @@ -1013,22 +1013,22 @@ MemoryRange Debugger::s_hijackFunction[kMaxHijackFunctions] = RedirectedHandledJITCaseForDbgThreadControl_StubEnd), GetMemoryRangeForFunction(RedirectedHandledJITCaseForUserSuspend_Stub, RedirectedHandledJITCaseForUserSuspend_StubEnd) -#if defined(HAVE_GCCOVER) && defined(_TARGET_AMD64_) +#if defined(HAVE_GCCOVER) && defined(TARGET_AMD64) , GetMemoryRangeForFunction(RedirectedHandledJITCaseForGCStress_Stub, RedirectedHandledJITCaseForGCStress_StubEnd) -#endif // HAVE_GCCOVER && _TARGET_AMD64_ +#endif // HAVE_GCCOVER && TARGET_AMD64 }; -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX // Save the necessary information for the debugger to recognize an IP in one of the thread redirection // functions. void Debugger::InitializeHijackFunctionAddress() { -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) // Advertise hijack address for the DD Hijack primitive m_rgHijackFunction = Debugger::s_hijackFunction; -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX } // For debug-only builds, we'll have a debugging feature to count @@ -1388,10 +1388,10 @@ DebuggerEval::DebuggerEval(CONTEXT * pContext, DebuggerIPCE_FuncEvalInfo * pEval // This must be non-zero so that the saved opcode is non-zero, and on IA64 we want it to be 0x16 // so that we can have a breakpoint instruction in any slot in the bundle. m_bpInfoSegment->m_breakpointInstruction[0] = 0x16; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) USHORT *bp = (USHORT*)&m_bpInfoSegment->m_breakpointInstruction; *bp = CORDbg_BREAK_INSTRUCTION; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM m_thread = pEvalInfo->vmThreadToken.GetRawPtr(); m_evalType = pEvalInfo->funcEvalType; m_methodToken = pEvalInfo->funcMetadataToken; @@ -1837,7 +1837,7 @@ void Debugger::SendCreateProcess(DebuggerLockHolder * pDbgLockHolder) pDbgLockHolder->Acquire(); } -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) HANDLE g_hContinueStartupEvent = INVALID_HANDLE_VALUE; @@ -1884,16 +1884,16 @@ void NotifyDebuggerOfStartup() g_hContinueStartupEvent = NULL; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX void Debugger::CleanupTransportSocket(void) { -#if defined(FEATURE_PAL) && defined(FEATURE_DBGIPC_TRANSPORT_VM) +#if defined(TARGET_UNIX) && defined(FEATURE_DBGIPC_TRANSPORT_VM) if (g_pDbgTransport != NULL) { g_pDbgTransport->AbortConnection(); } -#endif // FEATURE_PAL && FEATURE_DBGIPC_TRANSPORT_VM +#endif // TARGET_UNIX && FEATURE_DBGIPC_TRANSPORT_VM } //--------------------------------------------------------------------------------------- @@ -1926,10 +1926,10 @@ HRESULT Debugger::Startup(void) _ASSERTE(g_pEEInterface != NULL); -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) // This may block while an attach occurs. NotifyDebuggerOfStartup(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX { DebuggerLockHolder dbgLockHolder(this); @@ -2078,7 +2078,7 @@ HRESULT Debugger::Startup(void) #endif } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Signal the debugger (via dbgshim) and wait until it is ready for us to // continue. This needs to be outside the lock and after the transport is // initialized. @@ -2090,7 +2090,7 @@ HRESULT Debugger::Startup(void) // in startup code or Main. MarkDebuggerAttachedInternal(); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX // We don't bother changing this process's permission. // A managed debugger will have the SE_DEBUG permission which will allow it to open our process handle, @@ -2603,7 +2603,7 @@ void Debugger::JITComplete(NativeCodeVersion nativeCodeVersion, TADDR newAddress fd, fd->m_pszDebugClassName, fd->m_pszDebugMethodName, newAddress)); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM newAddress = newAddress|THUMB_CODE; #endif @@ -4390,19 +4390,19 @@ SIZE_T GetSetFrameHelper::GetSizeOfElement(CorElementType cet) { case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: -#if defined(BIT64) +#if defined(HOST_64BIT) case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: -#endif // BIT64 +#endif // HOST_64BIT case ELEMENT_TYPE_R8: return 8; case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: -#if !defined(BIT64) +#if !defined(HOST_64BIT) case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: -#endif // !BIT64 +#endif // !HOST_64BIT case ELEMENT_TYPE_R4: return 4; @@ -6760,13 +6760,13 @@ void Debugger::InitDebuggerLaunchJitInfo(Thread * pThread, EXCEPTION_POINTERS * reinterpret_cast(s_DebuggerLaunchJitInfoExceptionRecord.ExceptionAddress) : reinterpret_cast(reinterpret_cast(GetIP(pExceptionInfo->ContextRecord))); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) s_DebuggerLaunchJitInfo.dwProcessorArchitecture = PROCESSOR_ARCHITECTURE_INTEL; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) s_DebuggerLaunchJitInfo.dwProcessorArchitecture = PROCESSOR_ARCHITECTURE_AMD64; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) s_DebuggerLaunchJitInfo.dwProcessorArchitecture = PROCESSOR_ARCHITECTURE_ARM; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) s_DebuggerLaunchJitInfo.dwProcessorArchitecture = PROCESSOR_ARCHITECTURE_ARM64; #else #error Unknown processor. @@ -6809,7 +6809,7 @@ DebuggerLaunchSetting Debugger::GetDbgJITDebugLaunchSetting() } CONTRACTL_END; -#if FEATURE_PAL +#if TARGET_UNIX DebuggerLaunchSetting setting = DLS_ATTACH_DEBUGGER; #else BOOL bAuto = FALSE; @@ -6862,7 +6862,7 @@ DebuggerLaunchSetting Debugger::GetDbgJITDebugLaunchSetting() { setting = DLS_ATTACH_DEBUGGER; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX return setting; } @@ -6896,7 +6896,7 @@ bool Debugger::GetCompleteDebuggerLaunchString(SString * pStrArgsBuf) } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD pid = GetCurrentProcessId(); SString ssDebuggerString; @@ -6915,9 +6915,9 @@ bool Debugger::GetCompleteDebuggerLaunchString(SString * pStrArgsBuf) pStrArgsBuf->Printf(ssDebuggerString, pid, GetUnmanagedAttachEvent(), GetDebuggerLaunchJitInfo(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); return true; -#else // !FEATURE_PAL +#else // !TARGET_UNIX return false; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } // Proxy code for EDA @@ -6988,7 +6988,7 @@ HRESULT Debugger::EDAHelper(PROCESS_INFORMATION *pProcessInfo) } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX LOG((LF_CORDB, LL_INFO10000, "D::EDA: thread 0x%x is launching the debugger.\n", GetCurrentThreadId())); _ASSERTE(HasLazyData()); @@ -7054,9 +7054,9 @@ HRESULT Debugger::EDAHelper(PROCESS_INFORMATION *pProcessInfo) LOG((LF_CORDB, LL_INFO10000, "D::EDA: debugger launched successfully.\n")); return S_OK; -#else // !FEATURE_PAL +#else // !TARGET_UNIX return E_ABORT; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } // --------------------------------------------------------------------------------------------------------------------- @@ -8465,7 +8465,7 @@ FramePointer GetHandlerFramePointer(BYTE *pStack) { FramePointer handlerFP; -#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM) && !defined(TARGET_ARM64) // Refer to the comment in DispatchUnwind() to see why we have to add // sizeof(LPVOID) to the handler ebp. handlerFP = FramePointer::MakeFramePointer(LPVOID(pStack + sizeof(void*))); @@ -8473,7 +8473,7 @@ FramePointer GetHandlerFramePointer(BYTE *pStack) // ARM is similar to IA64 in that it uses the establisher frame as the // handler. in this case we don't need to add sizeof(void*) to the FP. handlerFP = FramePointer::MakeFramePointer((LPVOID)pStack); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM return handlerFP; } @@ -11962,7 +11962,7 @@ HRESULT Debugger::GetAndSendInterceptCommand(DebuggerIPCEvent *event) csi.m_activeFrame.MethodToken, csi.m_activeFrame.md, foundOffset, -#if defined (_TARGET_ARM_ )|| defined (_TARGET_ARM64_ ) +#if defined (TARGET_ARM )|| defined (TARGET_ARM64 ) // ARM requires the caller stack pointer, not the current stack pointer CallerStackFrame::FromRegDisplay(&(csi.m_activeFrame.registers)), #else @@ -12698,7 +12698,7 @@ bool Debugger::IsThreadAtSafePlaceWorker(Thread *thread) CONTEXT ctx; ZeroMemory(&rd, sizeof(rd)); ZeroMemory(&ctx, sizeof(ctx)); -#if defined(_TARGET_X86_) && !defined(FEATURE_EH_FUNCLETS) +#if defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS) rd.ControlPC = ctx.Eip; rd.PCTAddr = (TADDR)&(ctx.Eip); #else @@ -13476,9 +13476,9 @@ void STDCALL ExceptionHijackWorker( // call SetThreadContext on ourself to fix us. } -#if defined(FEATURE_EH_FUNCLETS) && !defined(FEATURE_PAL) +#if defined(FEATURE_EH_FUNCLETS) && !defined(TARGET_UNIX) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // ---------------------------------------------------------------------------- // EmptyPersonalityRoutine // @@ -13507,7 +13507,7 @@ EXCEPTION_DISPOSITION EmptyPersonalityRoutine(IN PEXCEPTION_RECORD pExcept LIMITED_METHOD_CONTRACT; return ExceptionContinueSearch; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 //--------------------------------------------------------------------------------------- // Personality routine for unwinder the assembly hijack stub on 64-bit. @@ -13547,7 +13547,7 @@ ExceptionHijackPersonalityRoutine(IN PEXCEPTION_RECORD pExceptionRecord IN OUT PDISPATCHER_CONTEXT pDispatcherContext ) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) CONTEXT * pHijackContext = NULL; // Get the 1st parameter (the Context) from hijack worker. @@ -13575,7 +13575,7 @@ ExceptionHijackPersonalityRoutine(IN PEXCEPTION_RECORD pExceptionRecord // exactly the behavior we want. return ExceptionCollidedUnwind; } -#endif // FEATURE_EH_FUNCLETS && !FEATURE_PAL +#endif // FEATURE_EH_FUNCLETS && !TARGET_UNIX // UEF Prototype from excep.cpp @@ -13835,15 +13835,15 @@ LONG Debugger::FirstChanceSuspendHijackWorker(CONTEXT *pContext, SPEW(fprintf(stderr, "0x%x D::FCHF: in first chance hijack filter.\n", tid)); SPEW(fprintf(stderr, "0x%x D::FCHF: pExceptionRecord=0x%p (%d), pContext=0x%p (%d)\n", tid, pExceptionRecord, sizeof(EXCEPTION_RECORD), pContext, sizeof(CONTEXT))); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) SPEW(fprintf(stderr, "0x%x D::FCHF: code=0x%08x, addr=0x%p, Rip=0x%p, Rsp=0x%p, EFlags=0x%08x\n", tid, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, pContext->Rip, pContext->Rsp, pContext->EFlags)); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) SPEW(fprintf(stderr, "0x%x D::FCHF: code=0x%08x, addr=0x%08x, Eip=0x%08x, Esp=0x%08x, EFlags=0x%08x\n", tid, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, pContext->Eip, pContext->Esp, pContext->EFlags)); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) SPEW(fprintf(stderr, "0x%x D::FCHF: code=0x%08x, addr=0x%08x, Pc=0x%p, Sp=0x%p, EFlags=0x%08x\n", tid, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, pContext->Pc, pContext->Sp, pContext->EFlags)); @@ -13956,7 +13956,7 @@ LONG Debugger::FirstChanceSuspendHijackWorker(CONTEXT *pContext, } } -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) void GenericHijackFuncHelper() { #if DOSPEW @@ -14049,14 +14049,14 @@ void GenericHijackFuncHelper() // This is the function that a thread is hijacked to by the Right Side during a variety of debug events. This function // must be naked. // -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) __declspec(naked) #endif // defined (_x86_) void Debugger::GenericHijackFunc(void) { -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) _asm { push ebp @@ -14067,7 +14067,7 @@ void Debugger::GenericHijackFunc(void) // We can't have C++ classes w/ dtors in a declspec naked, so just have call into a helper. GenericHijackFuncHelper(); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) _asm { mov esp,ebp @@ -14088,7 +14088,7 @@ void Debugger::GenericHijackFunc(void) -//#ifdef _TARGET_X86_ +//#ifdef TARGET_X86 // // This is the function that is called when we determine that a first chance exception hijack has // begun and memory is prepared for the RS to tell the LS what to do @@ -15373,17 +15373,17 @@ HRESULT Debugger::FuncEvalSetup(DebuggerIPCE_FuncEvalInfo *pEvalInfo, // the thread's registers. // Set the first argument to point to the DebuggerEval. -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) filterContext->Eax = (DWORD)pDE; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI filterContext->Rdi = (SIZE_T)pDE; #else // UNIX_AMD64_ABI filterContext->Rcx = (SIZE_T)pDE; #endif // !UNIX_AMD64_ABI -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) filterContext->R0 = (DWORD)pDE; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) filterContext->X0 = (SIZE_T)pDE; #else PORTABILITY_ASSERT("Debugger::FuncEvalSetup is not implemented on this platform."); @@ -15477,15 +15477,15 @@ HRESULT Debugger::FuncEvalSetupReAbort(Thread *pThread, Thread::ThreadAbortReque ::SetIP(filterContext, (UINT_PTR)GetEEFuncEntryPoint(::FuncEvalHijack)); -#ifdef _TARGET_X86_ // reliance on filterContext->Eip & Eax +#ifdef TARGET_X86 // reliance on filterContext->Eip & Eax // Set EAX to point to the DebuggerEval. filterContext->Eax = (DWORD)pDE; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // Set RCX to point to the DebuggerEval. filterContext->Rcx = (SIZE_T)pDE; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) filterContext->R0 = (DWORD)pDE; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) filterContext->X0 = (SIZE_T)pDE; #else PORTABILITY_ASSERT("FuncEvalSetupReAbort (Debugger.cpp) is not implemented on this platform."); @@ -16185,7 +16185,7 @@ BOOL Debugger::IsThreadContextInvalid(Thread *pThread) if (success) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Grab Eip - 1 LPVOID address = (((BYTE*)GetIP(&ctx)) - 1); @@ -16211,11 +16211,11 @@ BOOL Debugger::IsThreadContextInvalid(Thread *pThread) // Do nothing. The default return value is FALSE. } EX_END_CATCH(SwallowAllExceptions); -#else // _TARGET_X86_ +#else // TARGET_X86 // Non-x86 can detect whether the thread is suspended after an exception is hit but before // the kernel has dispatched the exception to user mode by trap frame reporting. // See Thread::IsContextSafeToRedirect(). -#endif // _TARGET_X86_ +#endif // TARGET_X86 } else { @@ -16686,7 +16686,7 @@ void DebuggerHeap::Destroy() m_hHeap = NULL; } #endif -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (m_execMemAllocator != NULL) { delete m_execMemAllocator; @@ -16739,7 +16739,7 @@ HRESULT DebuggerHeap::Init(BOOL fExecutable) } #endif -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_execMemAllocator = new (nothrow) DebuggerHeapExecutableMemoryAllocator(); ASSERT(m_execMemAllocator != NULL); if (m_execMemAllocator == NULL) @@ -16752,7 +16752,7 @@ HRESULT DebuggerHeap::Init(BOOL fExecutable) } // Only use canaries on x86 b/c they throw of alignment on Ia64. -#if defined(_DEBUG) && defined(_TARGET_X86_) +#if defined(_DEBUG) && defined(TARGET_X86) #define USE_INTEROPSAFE_CANARY #endif @@ -16832,7 +16832,7 @@ void *DebuggerHeap::Alloc(DWORD size) bool allocateOnHeap = true; HANDLE hExecutableHeap = NULL; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (m_fExecutable) { allocateOnHeap = false; @@ -16842,7 +16842,7 @@ void *DebuggerHeap::Alloc(DWORD size) { hExecutableHeap = ClrGetProcessHeap(); } -#else // FEATURE_PAL +#else // TARGET_UNIX hExecutableHeap = ClrGetProcessExecutableHeap(); #endif @@ -16885,7 +16885,7 @@ void *DebuggerHeap::Realloc(void *pMem, DWORD newSize, DWORD oldSize) _ASSERTE(newSize != 0); _ASSERTE(oldSize != 0); -#if defined(USE_INTEROPSAFE_HEAP) && !defined(USE_INTEROPSAFE_CANARY) && !defined(FEATURE_PAL) +#if defined(USE_INTEROPSAFE_HEAP) && !defined(USE_INTEROPSAFE_CANARY) && !defined(TARGET_UNIX) // No canaries in this case. // Call into realloc. void *ret; @@ -16938,11 +16938,11 @@ void DebuggerHeap::Free(void *pMem) #else if (pMem != NULL) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HANDLE hProcessExecutableHeap = ClrGetProcessExecutableHeap(); _ASSERTE(hProcessExecutableHeap != NULL); ClrHeapFree(hProcessExecutableHeap, NULL, pMem); -#else // !FEATURE_PAL +#else // !TARGET_UNIX if(!m_fExecutable) { HANDLE hProcessHeap = ClrGetProcessHeap(); @@ -16954,7 +16954,7 @@ void DebuggerHeap::Free(void *pMem) INDEBUG(int ret =) m_execMemAllocator->Free(pMem); _ASSERTE(ret == 0); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #endif } diff --git a/src/coreclr/src/debug/ee/debugger.h b/src/coreclr/src/debug/ee/debugger.h index d019f7878b6a4..478c5bb07e1d1 100644 --- a/src/coreclr/src/debug/ee/debugger.h +++ b/src/coreclr/src/debug/ee/debugger.h @@ -112,7 +112,7 @@ typedef DPTR(struct DebuggerIPCControlBlock) PTR_DebuggerIPCControlBlock; GPTR_DECL(Debugger, g_pDebugger); GPTR_DECL(EEDebugInterface, g_pEEInterface); GVAL_DECL(ULONG, CLRJitAttachState); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX GVAL_DECL(HANDLE, g_hContinueStartupEvent); #endif extern DebuggerRCThread *g_pRCThread; @@ -2881,9 +2881,9 @@ class Debugger : public DebugInterface kRedirectedForDbgThreadControl, kRedirectedForUserSuspend, kRedirectedForYieldTask, -#if defined(HAVE_GCCOVER) && defined(_TARGET_AMD64_) +#if defined(HAVE_GCCOVER) && defined(TARGET_AMD64) kRedirectedForGCStress, -#endif // HAVE_GCCOVER && _TARGET_AMD64_ +#endif // HAVE_GCCOVER && TARGET_AMD64 kMaxHijackFunctions, }; @@ -2980,10 +2980,10 @@ void RedirectedHandledJITCaseForDbgThreadControl_StubEnd(); void RedirectedHandledJITCaseForUserSuspend_Stub(); void RedirectedHandledJITCaseForUserSuspend_StubEnd(); -#if defined(HAVE_GCCOVER) && defined(_TARGET_AMD64_) +#if defined(HAVE_GCCOVER) && defined(TARGET_AMD64) void RedirectedHandledJITCaseForGCStress_Stub(); void RedirectedHandledJITCaseForGCStress_StubEnd(); -#endif // HAVE_GCCOVER && _TARGET_AMD64_ +#endif // HAVE_GCCOVER && TARGET_AMD64 }; @@ -3962,7 +3962,7 @@ HANDLE OpenWin32EventOrThrow( // Returns true if the specified IL offset has a special meaning (eg. prolog, etc.) bool DbgIsSpecialILOffset(DWORD offset); -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) void FixupDispatcherContext(T_DISPATCHER_CONTEXT* pDispatcherContext, T_CONTEXT* pContext, T_CONTEXT* pOriginalContext, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine = NULL); #endif diff --git a/src/coreclr/src/debug/ee/debugger.inl b/src/coreclr/src/debug/ee/debugger.inl index b0d4832babe00..ddd9b1b87ed66 100644 --- a/src/coreclr/src/debug/ee/debugger.inl +++ b/src/coreclr/src/debug/ee/debugger.inl @@ -231,7 +231,7 @@ inline void FuncEvalFrame::UpdateRegDisplay(const PREGDISPLAY pRD) #endif // !FEATURE_EH_FUNCLETS -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Update all registers in the reg display from the CONTEXT we stored when the thread was hijacked for this func // eval. We have to update all registers, not just the callee saved registers, because we can hijack a thread at any // point for a func eval, not just at a call site. @@ -246,7 +246,7 @@ inline void FuncEvalFrame::UpdateRegDisplay(const PREGDISPLAY pRD) pRD->PCTAddr = GetReturnAddressPtr(); pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this flag. This is only temporary. @@ -272,7 +272,7 @@ inline void FuncEvalFrame::UpdateRegDisplay(const PREGDISPLAY pRD) // SyncRegDisplayToCurrentContext() sets the pRD->SP and pRD->ControlPC on AMD64. SyncRegDisplayToCurrentContext(pRD); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this flag. This is only temporary. @@ -296,7 +296,7 @@ inline void FuncEvalFrame::UpdateRegDisplay(const PREGDISPLAY pRD) SyncRegDisplayToCurrentContext(pRD); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) pRD->IsCallerContextValid = FALSE; pRD->IsCallerSPValid = FALSE; // Don't add usage of this flag. This is only temporary. diff --git a/src/coreclr/src/debug/ee/frameinfo.cpp b/src/coreclr/src/debug/ee/frameinfo.cpp index 0b78e00b4be8b..b23905d22ad37 100644 --- a/src/coreclr/src/debug/ee/frameinfo.cpp +++ b/src/coreclr/src/debug/ee/frameinfo.cpp @@ -383,7 +383,7 @@ inline ULONG AdjustRelOffset(CrawlFrame *pCF, } CONTRACTL_END; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) return pCF->GetRelOffset() & ~THUMB_CODE; #else return pCF->GetRelOffset(); @@ -422,7 +422,7 @@ bool HasExitRuntime(Frame *pFrame, DebuggerFrameData *pData, FramePointer *pPote } CONTRACTL_END; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 TADDR returnIP, returnSP; EX_TRY @@ -454,7 +454,7 @@ bool HasExitRuntime(Frame *pFrame, DebuggerFrameData *pData, FramePointer *pPote (returnSP == NULL) || ((TADDR)GetRegdisplaySP(&pData->regDisplay) <= returnSP)); -#else // _TARGET_X86_ +#else // TARGET_X86 // DebuggerExitFrame always return a NULL returnSP on x86. if (pFrame->GetVTablePtr() == DebuggerExitFrame::GetMethodFrameVPtr()) { @@ -502,7 +502,7 @@ bool HasExitRuntime(Frame *pFrame, DebuggerFrameData *pData, FramePointer *pPote return true; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } #ifdef _DEBUG @@ -1424,13 +1424,13 @@ StackWalkAction DebuggerWalkStackProc(CrawlFrame *pCF, void *data) d->info.fp = GetFramePointerForDebugger(d, pCF); -#if defined(_DEBUG) && !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_) +#if defined(_DEBUG) && !defined(TARGET_ARM) && !defined(TARGET_ARM64) // Make sure the stackwalk is making progress. // On ARM this is invalid as the stack pointer does necessarily have to move when unwinding a frame. _ASSERTE(IsCloserToLeaf(d->previousFP, d->info.fp)); d->previousFP = d->info.fp; -#endif // _DEBUG && !_TARGET_ARM_ +#endif // _DEBUG && !TARGET_ARM d->needParentInfo = false; @@ -1602,7 +1602,7 @@ StackWalkAction DebuggerWalkStackProc(CrawlFrame *pCF, void *data) d->info.pIJM = pCF->GetJitManager(); d->info.MethodToken = pCF->GetMethodToken(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // This is collecting the ambientSP a lot more than we actually need it. Only time we need it is // inspecting local vars that are based off the ambient esp. d->info.ambientSP = pCF->GetAmbientSPFromCrawlFrame(); @@ -1842,11 +1842,11 @@ StackWalkAction DebuggerWalkStackProc(CrawlFrame *pCF, void *data) d->info.md = md; CopyREGDISPLAY(&(d->info.registers), &(d->regDisplay)); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) LOG((LF_CORDB, LL_INFO100000, "DWSP: Saving REGDISPLAY with sp = 0x%p, pc = 0x%p.\n", GetRegdisplaySP(&(d->info.registers)), GetControlPC(&(d->info.registers)))); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 d->needParentInfo = true; LOG((LF_CORDB, LL_INFO100000, "DWSP: Setting needParentInfo\n")); @@ -1873,7 +1873,7 @@ StackWalkAction DebuggerWalkStackProc(CrawlFrame *pCF, void *data) return SWA_CONTINUE; } -#if defined(_TARGET_X86_) && defined(FEATURE_INTEROP_DEBUGGING) +#if defined(TARGET_X86) && defined(FEATURE_INTEROP_DEBUGGING) // Helper to get the Wait-Sleep-Join bit from the thread bool IsInWaitSleepJoin(Thread * pThread) { @@ -2056,7 +2056,7 @@ bool PrepareLeafUMChain(DebuggerFrameData * pData, CONTEXT * pCtxTemp) return true; } -#endif // defined(_TARGET_X86_) && defined(FEATURE_INTEROP_DEBUGGING) +#endif // defined(TARGET_X86) && defined(FEATURE_INTEROP_DEBUGGING) //----------------------------------------------------------------------------- // Entry function for the debugger's stackwalking layer. @@ -2097,7 +2097,7 @@ StackWalkAction DebuggerWalkStack(Thread *thread, #endif memset((void *)&data, 0, sizeof(data)); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // @todo - this seems pointless. context->Eip will be 0; and when we copy it over to the DebuggerRD, // the context will be completely null. data.regDisplay.ControlPC = context->Eip; @@ -2120,7 +2120,7 @@ StackWalkAction DebuggerWalkStack(Thread *thread, data.Init(thread, targetFP, fIgnoreNonmethodFrames, pCallback, pData); -#if defined(_TARGET_X86_) && defined(FEATURE_INTEROP_DEBUGGING) +#if defined(TARGET_X86) && defined(FEATURE_INTEROP_DEBUGGING) CONTEXT ctxTemp; // Temp context for Leaf UM chain. Need it here so that it stays alive for whole stackwalk. // Important case for Interop Debugging - @@ -2133,7 +2133,7 @@ StackWalkAction DebuggerWalkStack(Thread *thread, PrepareLeafUMChain(&data, &ctxTemp); } -#endif // defined(_TARGET_X86_) && defined(FEATURE_INTEROP_DEBUGGING) +#endif // defined(TARGET_X86) && defined(FEATURE_INTEROP_DEBUGGING) if ((result != SWA_FAILED) && !thread->IsUnstarted() && !thread->IsDead()) { diff --git a/src/coreclr/src/debug/ee/funceval.cpp b/src/coreclr/src/debug/ee/funceval.cpp index 94868ba894125..2c1788d1860bf 100644 --- a/src/coreclr/src/debug/ee/funceval.cpp +++ b/src/coreclr/src/debug/ee/funceval.cpp @@ -189,20 +189,20 @@ inline static void GetAndSetLiteralValue(LPVOID pDst, CorElementType dstType, LP case ELEMENT_TYPE_CHAR: *(UINT16*)pDst = (UINT16)srcValue; break; -#if !defined(BIT64) +#if !defined(HOST_64BIT) case ELEMENT_TYPE_I: #endif case ELEMENT_TYPE_I4: *(int*)pDst = (int)srcValue; break; -#if !defined(BIT64) +#if !defined(HOST_64BIT) case ELEMENT_TYPE_U: #endif case ELEMENT_TYPE_U4: case ELEMENT_TYPE_R4: *(unsigned*)pDst = (unsigned)srcValue; break; -#if defined(BIT64) +#if defined(HOST_64BIT) case ELEMENT_TYPE_I: #endif case ELEMENT_TYPE_I8: @@ -210,7 +210,7 @@ inline static void GetAndSetLiteralValue(LPVOID pDst, CorElementType dstType, LP *(INT64*)pDst = (INT64)srcValue; break; -#if defined(BIT64) +#if defined(HOST_64BIT) case ELEMENT_TYPE_U: #endif case ELEMENT_TYPE_U8: @@ -286,7 +286,7 @@ static SIZE_T GetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *re ret = (SIZE_T)GetFP(&pDE->m_context); break; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) case REGISTER_X86_EAX: ret = pDE->m_context.Eax; break; @@ -311,7 +311,7 @@ static SIZE_T GetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *re ret = pDE->m_context.Edi; break; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) case REGISTER_AMD64_RAX: ret = pDE->m_context.Rax; break; @@ -388,7 +388,7 @@ static SIZE_T GetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *re ret = FPSpillToR8(&(pDE->m_context.Xmm0) + (reg - REGISTER_AMD64_XMM0)); break; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // fall through case REGISTER_ARM64_X0: case REGISTER_ARM64_X1: @@ -462,7 +462,7 @@ static SIZE_T GetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *re ret = FPSpillToR8(&pDE->m_context.V[reg - REGISTER_ARM64_V0]); break; -#endif // !_TARGET_X86_ && !_TARGET_AMD64_ && !_TARGET_ARM64_ +#endif // !TARGET_X86 && !TARGET_AMD64 && !TARGET_ARM64 default: _ASSERT(!"Invalid register number!"); @@ -502,7 +502,7 @@ static void SetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *regA SetFP(&pDE->m_context, newValue); break; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case REGISTER_X86_EAX: pDE->m_context.Eax = newValue; break; @@ -527,7 +527,7 @@ static void SetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *regA pDE->m_context.Edi = newValue; break; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) case REGISTER_AMD64_RAX: pDE->m_context.Rax = newValue; break; @@ -604,7 +604,7 @@ static void SetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *regA R8ToFPSpill(&(pDE->m_context.Xmm0) + (reg - REGISTER_AMD64_XMM0), newValue); break; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // fall through case REGISTER_ARM64_X0: case REGISTER_ARM64_X1: @@ -678,7 +678,7 @@ static void SetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *regA R8ToFPSpill(&pDE->m_context.V[reg - REGISTER_ARM64_V0], newValue); break; -#endif // !_TARGET_X86_ && !_TARGET_AMD64_ && !_TARGET_ARM64_ +#endif // !TARGET_X86 && !TARGET_AMD64 && !TARGET_ARM64 default: _ASSERT(!"Invalid register number!"); @@ -714,15 +714,15 @@ static PVOID GetRegisterValueAndReturnAddress(DebuggerEval *pDE, PVOID pAddr; -#if !defined(BIT64) +#if !defined(HOST_64BIT) pAddr = pInt64Buf; DWORD *pLow = (DWORD*)(pInt64Buf); DWORD *pHigh = pLow + 1; -#endif // BIT64 +#endif // HOST_64BIT switch (pFEAD->argHome.kind) { -#if !defined(BIT64) +#if !defined(HOST_64BIT) case RAK_REGREG: *pLow = GetRegisterValue(pDE, pFEAD->argHome.u.reg2, pFEAD->argHome.u.reg2Addr, pFEAD->argHome.u.reg2Value); *pHigh = GetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value); @@ -737,7 +737,7 @@ static PVOID GetRegisterValueAndReturnAddress(DebuggerEval *pDE, *pLow = *((DWORD*)CORDB_ADDRESS_TO_PTR(pFEAD->argHome.addr)); *pHigh = GetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value); break; -#endif // BIT64 +#endif // HOST_64BIT case RAK_REG: // Simply grab the value out of the proper register. @@ -850,15 +850,15 @@ static void GetFuncEvalArgValue(DebuggerEval *pDE, { INT64 *pSource; -#if defined(BIT64) +#if defined(HOST_64BIT) _ASSERTE(dataLocation & DL_MaybeInteriorPtrArray); pSource = (INT64 *)pMaybeInteriorPtrArg; -#else // !BIT64 +#else // !HOST_64BIT _ASSERTE(dataLocation & DL_BufferForArgsArray); pSource = pBufferArg; -#endif // !BIT64 +#endif // !HOST_64BIT if (!isByRef) { @@ -900,10 +900,10 @@ static void GetFuncEvalArgValue(DebuggerEval *pDE, unsigned size = argTH.GetMethodTable()->GetNumInstanceFieldBytes(); if (size <= sizeof(ARG_SLOT) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // On AMD64 we pass value types of size which are not powers of 2 by ref. && ((size & (size-1)) == 0) -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 ) { memcpyNoGCRefs(ArgSlotEndianessFixup(pArgument, sizeof(LPVOID)), pAddr, size); @@ -975,7 +975,7 @@ static void GetFuncEvalArgValue(DebuggerEval *pDE, INDEBUG(DataLocation expectedLocation); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((pFEAD->argElementType == ELEMENT_TYPE_I4) || (pFEAD->argElementType == ELEMENT_TYPE_U4) || (pFEAD->argElementType == ELEMENT_TYPE_R4)) @@ -1216,13 +1216,13 @@ static void GetFuncEvalArgValue(DebuggerEval *pDE, static CorDebugRegister GetArgAddrFromReg( DebuggerIPCE_FuncEvalArgData *pFEAD) { CorDebugRegister retval = REGISTER_INSTRUCTION_POINTER; // good as default as any -#if defined(BIT64) +#if defined(HOST_64BIT) retval = (pFEAD->argHome.kind == RAK_REG ? pFEAD->argHome.reg1 : (CorDebugRegister)((int)REGISTER_IA64_F0 + pFEAD->argHome.floatIndex)); -#else // !BIT64 +#else // !HOST_64BIT retval = pFEAD->argHome.reg1; -#endif // !BIT64 +#endif // !HOST_64BIT return retval; } @@ -1254,11 +1254,11 @@ static void SetFuncEvalByRefArgValue(DebuggerEval *pDE, { INT64 source; -#if defined(BIT64) +#if defined(HOST_64BIT) source = (INT64)maybeInteriorPtrArg; -#else // !BIT64 +#else // !HOST_64BIT source = bufferByRefArg; -#endif // !BIT64 +#endif // !HOST_64BIT if (pFEAD->argIsLiteral) { @@ -1272,7 +1272,7 @@ static void SetFuncEvalByRefArgValue(DebuggerEval *pDE, } else { -#if !defined(BIT64) +#if !defined(HOST_64BIT) // RAK_REG is the only 4 byte type, all others are 8 byte types. _ASSERTE(pFEAD->argHome.kind != RAK_REG); @@ -1299,12 +1299,12 @@ static void SetFuncEvalByRefArgValue(DebuggerEval *pDE, default: break; } -#else // BIT64 +#else // HOST_64BIT // The only types we use are RAK_REG and RAK_FLOAT, and both of them can be 4 or 8 bytes. _ASSERTE((pFEAD->argHome.kind == RAK_REG) || (pFEAD->argHome.kind == RAK_FLOAT)); SetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, source); -#endif // BIT64 +#endif // HOST_64BIT } } break; @@ -1314,7 +1314,7 @@ static void SetFuncEvalByRefArgValue(DebuggerEval *pDE, { SIZE_T source; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((pFEAD->argElementType == ELEMENT_TYPE_I4) || (pFEAD->argElementType == ELEMENT_TYPE_U4) || (pFEAD->argElementType == ELEMENT_TYPE_R4)) @@ -1325,7 +1325,7 @@ static void SetFuncEvalByRefArgValue(DebuggerEval *pDE, { #endif source = (SIZE_T)bufferByRefArg; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 } #endif @@ -1457,7 +1457,7 @@ static void GCProtectAllPassedArgs(DebuggerEval *pDE, case ELEMENT_TYPE_R8: // 64bit values -#if defined(BIT64) +#if defined(HOST_64BIT) // // Only need to worry about protecting if a pointer is a 64 bit quantity. // @@ -1506,7 +1506,7 @@ static void GCProtectAllPassedArgs(DebuggerEval *pDE, } #endif } -#endif // BIT64 +#endif // HOST_64BIT break; case ELEMENT_TYPE_VALUETYPE: @@ -1580,7 +1580,7 @@ static void GCProtectAllPassedArgs(DebuggerEval *pDE, case ELEMENT_TYPE_R4: // 32bit values -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 _ASSERTE(sizeof(void *) == sizeof(INT32)); if (pFEAD->argAddr != NULL) @@ -1636,7 +1636,7 @@ static void GCProtectAllPassedArgs(DebuggerEval *pDE, } #endif } -#endif // _TARGET_X86_ +#endif // TARGET_X86 default: // @@ -2362,7 +2362,7 @@ void CopyArgsToBuffer(DebuggerEval *pDE, else { -#if !defined(BIT64) +#if !defined(HOST_64BIT) // RAK_REG is the only 4 byte type, all others are 8 byte types. _ASSERTE(pFEAD->argHome.kind != RAK_REG); @@ -2379,13 +2379,13 @@ void CopyArgsToBuffer(DebuggerEval *pDE, *pDest = *pAddr; -#else // BIT64 +#else // HOST_64BIT // Both RAK_REG and RAK_FLOAT can be either 4 bytes or 8 bytes. _ASSERTE((pFEAD->argHome.kind == RAK_REG) || (pFEAD->argHome.kind == RAK_FLOAT)); CorDebugRegister regNum = GetArgAddrFromReg(pFEAD); *pDest = GetRegisterValue(pDE, regNum, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value); -#endif // BIT64 +#endif // HOST_64BIT @@ -3975,7 +3975,7 @@ void * STDCALL FuncEvalHijackWorker(DebuggerEval *pDE) // Signal to the helper thread that we're done with our func eval. Start by creating a DebuggerFuncEvalComplete // object. Give it an address at which to create the patch, which is a chunk of memory specified by our // DebuggerEval big enough to hold a breakpoint instruction. -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM dest = (BYTE*)((DWORD)&(pDE->m_bpInfoSegment->m_breakpointInstruction) | THUMB_CODE); #else dest = &(pDE->m_bpInfoSegment->m_breakpointInstruction); @@ -4046,7 +4046,7 @@ void * STDCALL FuncEvalHijackWorker(DebuggerEval *pDE) } -#if defined(FEATURE_EH_FUNCLETS) && !defined(FEATURE_PAL) +#if defined(FEATURE_EH_FUNCLETS) && !defined(TARGET_UNIX) EXTERN_C EXCEPTION_DISPOSITION FuncEvalHijackPersonalityRoutine(IN PEXCEPTION_RECORD pExceptionRecord @@ -4057,15 +4057,15 @@ FuncEvalHijackPersonalityRoutine(IN PEXCEPTION_RECORD pExceptionRecord ) { DebuggerEval* pDE = NULL; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) pDE = *(DebuggerEval**)(pDispatcherContext->EstablisherFrame); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // on ARM the establisher frame is the SP of the caller of FuncEvalHijack, on other platforms it's FuncEvalHijack's SP. // in FuncEvalHijack we allocate 8 bytes of stack space and then store R0 at the current SP, so if we subtract 8 from // the establisher frame we can get the stack location where R0 was stored. pDE = *(DebuggerEval**)(pDispatcherContext->EstablisherFrame - 8); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // on ARM64 the establisher frame is the SP of the caller of FuncEvalHijack. // in FuncEvalHijack we allocate 32 bytes of stack space and then store R0 at the current SP + 16, so if we subtract 16 from // the establisher frame we can get the stack location where R0 was stored. @@ -4083,6 +4083,6 @@ FuncEvalHijackPersonalityRoutine(IN PEXCEPTION_RECORD pExceptionRecord } -#endif // FEATURE_EH_FUNCLETS && !FEATURE_PAL +#endif // FEATURE_EH_FUNCLETS && !TARGET_UNIX #endif // ifndef DACCESS_COMPILE diff --git a/src/coreclr/src/debug/ee/i386/x86walker.cpp b/src/coreclr/src/debug/ee/i386/x86walker.cpp index a793e93f0ef6c..fd6799c15ccd9 100644 --- a/src/coreclr/src/debug/ee/i386/x86walker.cpp +++ b/src/coreclr/src/debug/ee/i386/x86walker.cpp @@ -18,7 +18,7 @@ #include "openum.h" -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // // The x86 walker is currently pretty minimal. It only recognizes call and return opcodes, plus a few jumps. The rest diff --git a/src/coreclr/src/debug/ee/rcthread.cpp b/src/coreclr/src/debug/ee/rcthread.cpp index a6122cffa9701..0096728652b2f 100644 --- a/src/coreclr/src/debug/ee/rcthread.cpp +++ b/src/coreclr/src/debug/ee/rcthread.cpp @@ -12,7 +12,7 @@ #include "stdafx.h" #include "threadsuspend.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "securitywrapper.h" #endif @@ -1459,7 +1459,7 @@ HRESULT DebuggerRCThread::AsyncStop(void) NOTHROW; GC_NOTRIGGER; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 PRECONDITION(!ThisIsHelperThreadWorker()); #else PRECONDITION(!ThisIsHelperThreadWorker()); @@ -1653,7 +1653,7 @@ HRESULT DebuggerRCThread::ReDaclEvents(PSECURITY_DESCRIPTOR pSecurityDescriptor) { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (m_pDCB != NULL) { if (m_pDCB->m_rightSideEventAvailable) @@ -1677,7 +1677,7 @@ HRESULT DebuggerRCThread::ReDaclEvents(PSECURITY_DESCRIPTOR pSecurityDescriptor) } } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX return S_OK; } diff --git a/src/coreclr/src/debug/ee/walker.h b/src/coreclr/src/debug/ee/walker.h index d8b6b369fe3d8..123d39dbdf11f 100644 --- a/src/coreclr/src/debug/ee/walker.h +++ b/src/coreclr/src/debug/ee/walker.h @@ -121,7 +121,7 @@ class Walker bool m_isAbsoluteBranch; // Is it an obsolute branch or not }; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 class NativeWalker : public Walker { @@ -151,7 +151,7 @@ class NativeWalker : public Walker DWORD m_opcode; // Current instruction or opcode }; -#elif defined (_TARGET_ARM_) +#elif defined (TARGET_ARM) class NativeWalker : public Walker { @@ -168,7 +168,7 @@ class NativeWalker : public Walker DWORD GetReg(DWORD reg); }; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) class NativeWalker : public Walker { @@ -197,7 +197,7 @@ class NativeWalker : public Walker DWORD m_opcode; // Current instruction or opcode }; -#elif defined (_TARGET_ARM64_) +#elif defined (TARGET_ARM64) #include "controller.h" class NativeWalker : public Walker { diff --git a/src/coreclr/src/debug/inc/arm_primitives.h b/src/coreclr/src/debug/inc/arm_primitives.h index 4dddcc777136f..2b23ca79422d0 100644 --- a/src/coreclr/src/debug/inc/arm_primitives.h +++ b/src/coreclr/src/debug/inc/arm_primitives.h @@ -71,7 +71,7 @@ inline void CORDbgAdjustPCForBreakInstruction(DT_CONTEXT* pContext) { LIMITED_METHOD_CONTRACT; -#if defined(DBG_TARGET_ARM64) +#if defined(TARGET_ARM64) pContext->Pc -= CORDbg_BREAK_INSTRUCTION_SIZE; #else // @ARMTODO: ARM appears to leave the PC at the start of the breakpoint (at least according to Windbg, diff --git a/src/coreclr/src/debug/inc/common.h b/src/coreclr/src/debug/inc/common.h index 877204c993a45..034d9bb241022 100644 --- a/src/coreclr/src/debug/inc/common.h +++ b/src/coreclr/src/debug/inc/common.h @@ -14,7 +14,7 @@ // conversions between PTR types (eg. DPTR) and CORDB_ADDRESS, and not need conversions // from host pointer types to CORDB_ADDRESS. // -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(const void* ptr) { SUPPORTS_DAC; @@ -31,7 +31,7 @@ inline CORDB_ADDRESS PTR_TO_CORDB_ADDRESS(UINT_PTR ptr) } #else #define PTR_TO_CORDB_ADDRESS(_ptr) (CORDB_ADDRESS)(ULONG_PTR)(_ptr) -#endif //_TARGET_X86_ || _TARGET_ARM_ +#endif //TARGET_X86 || TARGET_ARM #define CORDB_ADDRESS_TO_PTR(_cordb_addr) ((LPVOID)(SIZE_T)(_cordb_addr)) @@ -93,7 +93,7 @@ extern void CORDbgSetDebuggerREGDISPLAYFromContext(DebuggerREGDISPLAY *pDRD, inline ULONG32 ContextSizeForFlags(ULONG32 flags) { -#if defined(CONTEXT_EXTENDED_REGISTERS) && defined(_TARGET_X86_) +#if defined(CONTEXT_EXTENDED_REGISTERS) && defined(TARGET_X86) // Older platforms didn't have extended registers in // the context definition so only enforce that size // if the extended register flag is set. @@ -102,7 +102,7 @@ ULONG32 ContextSizeForFlags(ULONG32 flags) return offsetof(T_CONTEXT, ExtendedRegisters); } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { return sizeof(T_CONTEXT); } diff --git a/src/coreclr/src/debug/inc/dbgappdomain.h b/src/coreclr/src/debug/inc/dbgappdomain.h index 7b2db679443bc..0939dd0e2a5da 100644 --- a/src/coreclr/src/debug/inc/dbgappdomain.h +++ b/src/coreclr/src/debug/inc/dbgappdomain.h @@ -51,7 +51,7 @@ struct AppDomainInfo // Enforce the AppDomain IPC block binary layout doesn't change between versions. // Only an issue for x86 since that's the only platform w/ multiple versions. -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) static_assert_no_msg(offsetof(AppDomainInfo, m_id) == 0x0); static_assert_no_msg(offsetof(AppDomainInfo, m_iNameLengthInBytes) == 0x4); static_assert_no_msg(offsetof(AppDomainInfo, m_szAppDomainName) == 0x8); @@ -362,7 +362,7 @@ struct AppDomainEnumerationIPCBlock // Enforce the AppDomain IPC block binary layout doesn't change between versions. // Only an issue for x86 since that's the only platform w/ multiple versions. -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) static_assert_no_msg(offsetof(AppDomainEnumerationIPCBlock, m_hMutex) == 0x0); static_assert_no_msg(offsetof(AppDomainEnumerationIPCBlock, m_iTotalSlots) == 0x4); static_assert_no_msg(offsetof(AppDomainEnumerationIPCBlock, m_iNumOfUsedSlots) == 0x8); diff --git a/src/coreclr/src/debug/inc/dbgipcevents.h b/src/coreclr/src/debug/inc/dbgipcevents.h index 66fc095cd92cf..5a78835d0756b 100644 --- a/src/coreclr/src/debug/inc/dbgipcevents.h +++ b/src/coreclr/src/debug/inc/dbgipcevents.h @@ -178,17 +178,17 @@ struct MSLAYOUT DebuggerIPCRuntimeOffsets // declared DebuggerIPCEvent at the end of this header (and we can do so because in the transport case there // aren't any embedded buffers in the DebuggerIPCControlBlock). -#if defined(DBG_TARGET_X86) || defined(DBG_TARGET_ARM) -#ifdef BIT64 +#if defined(TARGET_X86) || defined(TARGET_ARM) +#ifdef HOST_64BIT #define CorDBIPC_BUFFER_SIZE 2104 #else #define CorDBIPC_BUFFER_SIZE 2092 #endif -#else // !_TARGET_X86_ && !_TARGET_ARM_ +#else // !TARGET_X86 && !TARGET_ARM // This is the size of a DebuggerIPCEvent. You will hit an assert in Cordb::Initialize() (di\rsmain.cpp) // if this is not defined correctly. AMD64 actually has a page size of 0x1000, not 0x2000. #define CorDBIPC_BUFFER_SIZE 4016 // (4016 + 6) * 2 + 148 = 8192 (two (DebuggerIPCEvent + alignment padding) + other fields = page size) -#endif // DBG_TARGET_X86 || DBG_TARGET_ARM +#endif // TARGET_X86 || TARGET_ARM // // DebuggerIPCControlBlock describes the layout of the shared memory shared between the Left Side and the Right @@ -220,11 +220,11 @@ struct MSLAYOUT DebuggerIPCControlBlock HRESULT m_errorHR; unsigned int m_errorCode; -#if defined(DBG_TARGET_64BIT) +#if defined(TARGET_64BIT) // 64-bit needs this padding to make the handles after this aligned. // But x86 can't have this padding b/c it breaks binary compatibility between v1.1 and v2.0. ULONG padding4; -#endif // DBG_TARGET_64BIT +#endif // TARGET_64BIT RemoteHANDLE m_rightSideEventAvailable; @@ -320,11 +320,11 @@ struct MSLAYOUT DebuggerIPCControlBlockTransport HRESULT m_errorHR; unsigned int m_errorCode; -#if defined(DBG_TARGET_64BIT) +#if defined(TARGET_64BIT) // 64-bit needs this padding to make the handles after this aligned. // But x86 can't have this padding b/c it breaks binary compatibility between v1.1 and v2.0. ULONG padding4; -#endif // DBG_TARGET_64BIT +#endif // TARGET_64BIT // This is set immediately when the helper thread is created. // This will be set even if there's a temporary helper thread or if the real helper @@ -366,7 +366,7 @@ struct MSLAYOUT DebuggerIPCControlBlockTransport #include "dbgtransportsession.h" #endif // defined(FEATURE_DBGIPC_TRANSPORT_VM) || defined(FEATURE_DBGIPC_TRANSPORT_DI) -#if defined(DBG_TARGET_X86) && !defined(FEATURE_CORESYSTEM) +#if defined(TARGET_X86) && !defined(FEATURE_CORESYSTEM) // We have an versioning requirement. // Certain portions of the v1.0 and v1.1 IPC block are shared. This is b/c a v1.1 debugger needs to be able // to look at a v2.0 app enough to recognize the version mismatch. @@ -1076,7 +1076,7 @@ struct MSLAYOUT IPCENames // We use a class/struct so that the function can rema struct MSLAYOUT DebuggerREGDISPLAY { -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) #define DebuggerIPCE_FloatCount 8 SIZE_T Edi; @@ -1096,7 +1096,7 @@ struct MSLAYOUT DebuggerREGDISPLAY SIZE_T SP; SIZE_T PC; -#elif defined(DBG_TARGET_AMD64) +#elif defined(TARGET_AMD64) #define DebuggerIPCE_FloatCount 16 SIZE_T Rax; @@ -1133,7 +1133,7 @@ struct MSLAYOUT DebuggerREGDISPLAY SIZE_T SP; SIZE_T PC; -#elif defined(DBG_TARGET_ARM) +#elif defined(TARGET_ARM) #define DebuggerIPCE_FloatCount 32 SIZE_T R0; @@ -1168,7 +1168,7 @@ struct MSLAYOUT DebuggerREGDISPLAY void *pLR; SIZE_T PC; void *pPC; -#elif defined(DBG_TARGET_ARM64) +#elif defined(TARGET_ARM64) #define DebuggerIPCE_FloatCount 32 SIZE_T X[29]; @@ -1191,12 +1191,12 @@ inline LPVOID GetSPAddress(const DebuggerREGDISPLAY * display) return (LPVOID)&display->SP; } -#if !defined(DBG_TARGET_AMD64) && !defined(DBG_TARGET_ARM) +#if !defined(TARGET_AMD64) && !defined(TARGET_ARM) inline LPVOID GetFPAddress(const DebuggerREGDISPLAY * display) { return (LPVOID)&display->FP; } -#endif // !DBG_TARGET_AMD64 +#endif // !TARGET_AMD64 class MSLAYOUT FramePointer @@ -1866,34 +1866,34 @@ struct MSLAYOUT DebuggerMDANotification // that the debugger only uses REGNUM_SP and REGNUM_AMBIENT_SP though, so we can just virtualize these two for // the target platform. // Keep this is sync with the definitions in inc/corinfo.h. -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) #define DBG_TARGET_REGNUM_SP 4 #define DBG_TARGET_REGNUM_AMBIENT_SP 9 -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 static_assert_no_msg(DBG_TARGET_REGNUM_SP == ICorDebugInfo::REGNUM_SP); static_assert_no_msg(DBG_TARGET_REGNUM_AMBIENT_SP == ICorDebugInfo::REGNUM_AMBIENT_SP); -#endif // _TARGET_X86_ -#elif defined(DBG_TARGET_AMD64) +#endif // TARGET_X86 +#elif defined(TARGET_AMD64) #define DBG_TARGET_REGNUM_SP 4 #define DBG_TARGET_REGNUM_AMBIENT_SP 17 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 static_assert_no_msg(DBG_TARGET_REGNUM_SP == ICorDebugInfo::REGNUM_SP); static_assert_no_msg(DBG_TARGET_REGNUM_AMBIENT_SP == ICorDebugInfo::REGNUM_AMBIENT_SP); -#endif // _TARGET_AMD64_ -#elif defined(DBG_TARGET_ARM) +#endif // TARGET_AMD64 +#elif defined(TARGET_ARM) #define DBG_TARGET_REGNUM_SP 13 #define DBG_TARGET_REGNUM_AMBIENT_SP 17 -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM C_ASSERT(DBG_TARGET_REGNUM_SP == ICorDebugInfo::REGNUM_SP); C_ASSERT(DBG_TARGET_REGNUM_AMBIENT_SP == ICorDebugInfo::REGNUM_AMBIENT_SP); -#endif // _TARGET_ARM_ -#elif defined(DBG_TARGET_ARM64) +#endif // TARGET_ARM +#elif defined(TARGET_ARM64) #define DBG_TARGET_REGNUM_SP 31 #define DBG_TARGET_REGNUM_AMBIENT_SP 34 -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 C_ASSERT(DBG_TARGET_REGNUM_SP == ICorDebugInfo::REGNUM_SP); C_ASSERT(DBG_TARGET_REGNUM_AMBIENT_SP == ICorDebugInfo::REGNUM_AMBIENT_SP); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #else #error Target registers are not defined for this platform #endif diff --git a/src/coreclr/src/debug/inc/dbgtargetcontext.h b/src/coreclr/src/debug/inc/dbgtargetcontext.h index d439bbe7e476f..9c370020ebff4 100644 --- a/src/coreclr/src/debug/inc/dbgtargetcontext.h +++ b/src/coreclr/src/debug/inc/dbgtargetcontext.h @@ -33,21 +33,21 @@ // This odd define pattern is needed because in DBI we set _TARGET_ to match the host and // DBG_TARGET to control our targeting. In x-plat DBI DBG_TARGET won't match _TARGET_ and // DBG_TARGET needs to take precedence -#if defined(DBG_TARGET_X86) +#if defined(TARGET_X86) #define DTCONTEXT_IS_X86 -#elif defined (DBG_TARGET_AMD64) +#elif defined (TARGET_AMD64) #define DTCONTEXT_IS_AMD64 -#elif defined (DBG_TARGET_ARM) +#elif defined (TARGET_ARM) #define DTCONTEXT_IS_ARM -#elif defined (DBG_TARGET_ARM64) +#elif defined (TARGET_ARM64) #define DTCONTEXT_IS_ARM64 -#elif defined (_TARGET_X86_) +#elif defined (TARGET_X86) #define DTCONTEXT_IS_X86 -#elif defined (_TARGET_AMD64_) +#elif defined (TARGET_AMD64) #define DTCONTEXT_IS_AMD64 -#elif defined (_TARGET_ARM_) +#elif defined (TARGET_ARM) #define DTCONTEXT_IS_ARM -#elif defined (_TARGET_ARM64_) +#elif defined (TARGET_ARM64) #define DTCONTEXT_IS_ARM64 #endif diff --git a/src/coreclr/src/debug/inc/diagnosticsipc.h b/src/coreclr/src/debug/inc/diagnosticsipc.h index fb0a64fdd097f..eabea6c3ceaea 100644 --- a/src/coreclr/src/debug/inc/diagnosticsipc.h +++ b/src/coreclr/src/debug/inc/diagnosticsipc.h @@ -7,11 +7,11 @@ #include -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX struct sockaddr_un; #else #include -#endif /* FEATURE_PAL */ +#endif /* TARGET_UNIX */ typedef void (*ErrorCallback)(const char *szMessage, uint32_t code); @@ -39,7 +39,7 @@ class IpcStream final private: -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX const int _serverSocket; sockaddr_un *const _pServerAddress; bool _isClosed; @@ -54,7 +54,7 @@ class IpcStream final char _pNamedPipeName[MaxNamedPipeNameLength]; // https://docs.microsoft.com/en-us/windows/desktop/api/winbase/nf-winbase-createnamedpipea DiagnosticsIpc(const char(&namedPipeName)[MaxNamedPipeNameLength]); -#endif /* FEATURE_PAL */ +#endif /* TARGET_UNIX */ DiagnosticsIpc() = delete; DiagnosticsIpc(const DiagnosticsIpc &src) = delete; @@ -64,13 +64,13 @@ class IpcStream final }; private: -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX int _clientSocket = -1; IpcStream(int clientSocket) : _clientSocket(clientSocket) {} #else HANDLE _hPipe = INVALID_HANDLE_VALUE; IpcStream(HANDLE hPipe) : _hPipe(hPipe) {} -#endif /* FEATURE_PAL */ +#endif /* TARGET_UNIX */ IpcStream() = delete; IpcStream(const IpcStream &src) = delete; diff --git a/src/coreclr/src/debug/inc/dump/dumpcommon.h b/src/coreclr/src/debug/inc/dump/dumpcommon.h index e57b4b3a129d2..df1c6317d1926 100644 --- a/src/coreclr/src/debug/inc/dump/dumpcommon.h +++ b/src/coreclr/src/debug/inc/dump/dumpcommon.h @@ -5,7 +5,7 @@ #ifndef DEBUGGER_DUMPCOMMON_H #define DEBUGGER_DUMPCOMMON_H -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX typedef enum _MINIDUMP_TYPE { MiniDumpNormal = 0x00000000, MiniDumpWithDataSegs = 0x00000001, @@ -32,7 +32,7 @@ typedef enum _MINIDUMP_TYPE { MiniDumpWithAvxXStateContext = 0x00200000, MiniDumpValidTypeFlags = 0x003fffff, } MINIDUMP_TYPE; -#endif // FEATURE_PAL +#endif // TARGET_UNIX #if defined(DACCESS_COMPILE) || defined(RIGHT_SIDE_COMPILE) diff --git a/src/coreclr/src/debug/inc/twowaypipe.h b/src/coreclr/src/debug/inc/twowaypipe.h index 1ad9a7f6209bc..18942b4a4ba8d 100644 --- a/src/coreclr/src/debug/inc/twowaypipe.h +++ b/src/coreclr/src/debug/inc/twowaypipe.h @@ -8,7 +8,7 @@ #include "processdescriptor.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #define INVALID_PIPE -1 #else #define INVALID_PIPE INVALID_HANDLE_VALUE @@ -83,7 +83,7 @@ class TwoWayPipe State m_state; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX int m_inboundPipe, m_outboundPipe; // two one sided pipes used for communication char m_inPipeName[MAX_DEBUGGER_TRANSPORT_PIPE_NAME_LENGTH]; // filename of the inbound pipe @@ -99,7 +99,7 @@ class TwoWayPipe HANDLE CreateOneWayPipe(DWORD id, bool inbound); HANDLE m_inboundPipe, m_outboundPipe; //two one sided pipes used for communication -#endif //FEATURE_PAL +#endif //TARGET_UNIX }; #endif //TwoWayPipe_H diff --git a/src/coreclr/src/debug/shared/dbgtransportsession.cpp b/src/coreclr/src/debug/shared/dbgtransportsession.cpp index ca4b3f2f12c19..e719c12868761 100644 --- a/src/coreclr/src/debug/shared/dbgtransportsession.cpp +++ b/src/coreclr/src/debug/shared/dbgtransportsession.cpp @@ -418,9 +418,9 @@ void MarshalDCBTransportToDCB(DebuggerIPCControlBlockTransport* pIn, DebuggerIPC pOut->m_errorHR = pIn->m_errorHR; pOut->m_errorCode = pIn->m_errorCode; -#if defined(DBG_TARGET_64BIT) +#if defined(TARGET_64BIT) pOut->padding4 = pIn->padding4; -#endif // DBG_TARGET_64BIT +#endif // TARGET_64BIT // @@ -472,9 +472,9 @@ void MarshalDCBToDCBTransport(DebuggerIPCControlBlock* pIn, DebuggerIPCControlBl pOut->m_errorHR = pIn->m_errorHR; pOut->m_errorCode = pIn->m_errorCode; -#if defined(DBG_TARGET_64BIT) +#if defined(TARGET_64BIT) pOut->padding4 = pIn->padding4; -#endif // DBG_TARGET_64BIT +#endif // TARGET_64BIT pOut->m_realHelperThreadId = pIn->m_realHelperThreadId; pOut->m_helperThreadId = pIn->m_helperThreadId; @@ -1152,7 +1152,7 @@ HRESULT DbgTransportSession::CheckBufferAccess(__in_ecount(cbBuffer) PBYTE pbBuf // VirtualQuery doesn't know much about memory allocated outside of PAL's VirtualAlloc // that's why on Unix we can't rely on in to detect invalid memory reads -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX do { // Find the attributes of the largest set of pages with common attributes starting from our base address. diff --git a/src/coreclr/src/debug/shim/CMakeLists.txt b/src/coreclr/src/debug/shim/CMakeLists.txt index 750995c9c0174..f54e2087e53e3 100644 --- a/src/coreclr/src/debug/shim/CMakeLists.txt +++ b/src/coreclr/src/debug/shim/CMakeLists.txt @@ -1,7 +1,7 @@ if(WIN32) #use static crt add_definitions(-MT) - add_definitions(-DHOST_IS_WINDOWS_OS) + add_definitions(-DHOST_WINDOWS) endif(WIN32) set(DEBUGSHIM_SOURCES diff --git a/src/coreclr/src/debug/shim/debugshim.cpp b/src/coreclr/src/debug/shim/debugshim.cpp index f280b8ae169e9..3b1d9363864e9 100644 --- a/src/coreclr/src/debug/shim/debugshim.cpp +++ b/src/coreclr/src/debug/shim/debugshim.cpp @@ -22,18 +22,6 @@ #define IMAGE_FILE_MACHINE_ARM64 0xAA64 // ARM64 Little-Endian #endif -// making the defines very clear, these represent the host architecture - aka -// the arch on which this code is running -#if defined(_X86_) -#define _HOST_X86_ -#elif defined(_AMD64_) -#define _HOST_AMD64_ -#elif defined(_ARM_) -#define _HOST_ARM_ -#elif defined(_ARM64_) -#define _HOST_ARM64_ -#endif - //***************************************************************************** // CLRDebuggingImpl implementation (ICLRDebugging) //***************************************************************************** @@ -225,7 +213,7 @@ STDMETHODIMP CLRDebuggingImpl::OpenVirtualProcess( _ASSERTE(pFlags == NULL || *pFlags == 0); } } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX else { // On Linux/MacOS the DAC module handle needs to be re-created using the DAC PAL instance @@ -245,7 +233,7 @@ STDMETHODIMP CLRDebuggingImpl::OpenVirtualProcess( hr = E_HANDLE; } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // If no errors so far and "OpenVirtualProcessImpl2" doesn't exist @@ -290,7 +278,7 @@ STDMETHODIMP CLRDebuggingImpl::OpenVirtualProcess( if (pDacModulePath != NULL) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX free(pDacModulePath); #else CoTaskMemFree(pDacModulePath); @@ -299,7 +287,7 @@ STDMETHODIMP CLRDebuggingImpl::OpenVirtualProcess( if (pDbiModulePath != NULL) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX free(pDbiModulePath); #else CoTaskMemFree(pDbiModulePath); @@ -422,7 +410,7 @@ HRESULT CLRDebuggingImpl::GetCLRInfo(ICorDebugDataTarget* pDataTarget, __out_z __inout_ecount(dwDacNameCharCount) WCHAR* pDacName, DWORD dwDacNameCharCount) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX WORD imageFileMachine = 0; DWORD resourceSectionRVA = 0; HRESULT hr = GetMachineAndResourceSectionRVA(pDataTarget, moduleBaseAddress, &imageFileMachine, &resourceSectionRVA); @@ -475,35 +463,35 @@ HRESULT CLRDebuggingImpl::GetCLRInfo(ICorDebugDataTarget* pDataTarget, HRESULT hrGetResource = E_FAIL; // First check for the resource which has type = RC_DATA = 10, name = "CLRDEBUGINFO", language = 0 -#if defined (HOST_IS_WINDOWS_OS) && defined(_HOST_X86_) +#if defined (HOST_WINDOWS) && defined(HOST_X86) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSX86"); #endif -#if !defined (HOST_IS_WINDOWS_OS) && defined(_HOST_X86_) +#if !defined (HOST_WINDOWS) && defined(HOST_X86) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSX86"); #endif -#if defined (HOST_IS_WINDOWS_OS) && defined(_HOST_AMD64_) +#if defined (HOST_WINDOWS) && defined(HOST_AMD64) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSAMD64"); #endif -#if !defined (HOST_IS_WINDOWS_OS) && defined(_HOST_AMD64_) +#if !defined (HOST_WINDOWS) && defined(HOST_AMD64) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSAMD64"); #endif -#if defined (HOST_IS_WINDOWS_OS) && defined(_HOST_ARM64_) +#if defined (HOST_WINDOWS) && defined(HOST_ARM64) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSARM64"); #endif -#if !defined (HOST_IS_WINDOWS_OS) && defined(_HOST_ARM64_) +#if !defined (HOST_WINDOWS) && defined(HOST_ARM64) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSARM64"); #endif -#if defined (HOST_IS_WINDOWS_OS) && defined(_HOST_ARM_) +#if defined (HOST_WINDOWS) && defined(HOST_ARM) const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSARM"); #endif -#if !defined (HOST_IS_WINDOWS_OS) && defined(_HOST_ARM_) +#if !defined (HOST_WINDOWS) && defined(HOST_ARM) const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSARM"); #endif @@ -512,12 +500,12 @@ HRESULT CLRDebuggingImpl::GetCLRInfo(ICorDebugDataTarget* pDataTarget, useCrossPlatformNaming = SUCCEEDED(hrGetResource); -#if defined(HOST_IS_WINDOWS_OS) && (defined(_HOST_X86_) || defined(_HOST_AMD64_) || defined(_HOST_ARM_)) - #if defined(_HOST_X86_) +#if defined(HOST_WINDOWS) && (defined(HOST_X86) || defined(HOST_AMD64) || defined(HOST_ARM)) + #if defined(HOST_X86) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_I386 - #elif defined(_HOST_AMD64_) + #elif defined(HOST_AMD64) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_AMD64 - #elif defined(_HOST_ARM_) + #elif defined(HOST_ARM) #define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_ARMNT #endif @@ -608,7 +596,7 @@ HRESULT CLRDebuggingImpl::GetCLRInfo(ICorDebugDataTarget* pDataTarget, *pdwDacSizeOfImage = 0; return S_OK; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // Formats the long name for DAC @@ -618,18 +606,18 @@ HRESULT CLRDebuggingImpl::FormatLongDacModuleName(__out_z __inout_ecount(cchBuff VS_FIXEDFILEINFO * pVersion) { -#ifndef HOST_IS_WINDOWS_OS +#ifndef HOST_WINDOWS _ASSERTE(!"NYI"); return E_NOTIMPL; #endif -#if defined(_HOST_X86_) +#if defined(HOST_X86) const WCHAR* pHostArch = W("x86"); -#elif defined(_HOST_AMD64_) +#elif defined(HOST_AMD64) const WCHAR* pHostArch = W("amd64"); -#elif defined(_HOST_ARM_) +#elif defined(HOST_ARM) const WCHAR* pHostArch = W("arm"); -#elif defined(_HOST_ARM64_) +#elif defined(HOST_ARM64) const WCHAR* pHostArch = W("arm64"); #else _ASSERTE(!"Unknown host arch"); diff --git a/src/coreclr/src/dlls/dbgshim/dbgshim.cpp b/src/coreclr/src/dlls/dbgshim/dbgshim.cpp index aad052496e797..d894e7b98ee9b 100644 --- a/src/coreclr/src/dlls/dbgshim/dbgshim.cpp +++ b/src/coreclr/src/dlls/dbgshim/dbgshim.cpp @@ -15,7 +15,7 @@ #include #include #include -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include #endif @@ -26,7 +26,7 @@ #include #include -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define PSAPI_VERSION 2 #include #endif @@ -55,7 +55,7 @@ if it exists, it will: */ -#ifdef FEATURE_PAL +#ifdef HOST_UNIX #define INITIALIZE_SHIM { if (PAL_InitializeDLL() != 0) return E_FAIL; } #else #define INITIALIZE_SHIM @@ -170,7 +170,7 @@ CloseResumeHandle( return S_OK; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX static void @@ -179,7 +179,7 @@ RuntimeStartupHandler( HMODULE hModule, PVOID parameter); -#else // FEATURE_PAL +#else // TARGET_UNIX static DWORD @@ -193,7 +193,7 @@ GetContinueStartupEvent( LPCWSTR szTelestoFullPath, __out HANDLE *phContinueStartupEvent); -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Functions that we'll look for in the loaded Mscordbi module. typedef HRESULT (STDAPICALLTYPE *FPCoreCLRCreateCordbObject)( @@ -261,7 +261,7 @@ class RuntimeStartupHelper DWORD m_processId; PSTARTUP_CALLBACK m_callback; PVOID m_parameter; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PVOID m_unregisterToken; LPWSTR m_applicationGroupId; #else @@ -269,7 +269,7 @@ class RuntimeStartupHelper HANDLE m_startupEvent; DWORD m_threadId; HANDLE m_threadHandle; -#endif // FEATURE_PAL +#endif // TARGET_UNIX public: RuntimeStartupHelper(DWORD dwProcessId, PSTARTUP_CALLBACK pfnCallback, PVOID parameter) : @@ -277,7 +277,7 @@ class RuntimeStartupHelper m_processId(dwProcessId), m_callback(pfnCallback), m_parameter(parameter), -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_unregisterToken(NULL), m_applicationGroupId(NULL) #else @@ -285,18 +285,18 @@ class RuntimeStartupHelper m_startupEvent(NULL), m_threadId(0), m_threadHandle(NULL) -#endif // FEATURE_PAL +#endif // TARGET_UNIX { } ~RuntimeStartupHelper() { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (m_applicationGroupId != NULL) { delete m_applicationGroupId; } -#else // FEATURE_PAL +#else // TARGET_UNIX if (m_startupEvent != NULL) { CloseHandle(m_startupEvent); @@ -305,7 +305,7 @@ class RuntimeStartupHelper { CloseHandle(m_threadHandle); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX } LONG AddRef() @@ -324,7 +324,7 @@ class RuntimeStartupHelper return ref; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX HRESULT Register(LPCWSTR lpApplicationGroupId) { @@ -419,7 +419,7 @@ class RuntimeStartupHelper } } -#else // FEATURE_PAL +#else // TARGET_UNIX HRESULT Register(LPCWSTR lpApplicationGroupId) { @@ -656,10 +656,10 @@ class RuntimeStartupHelper } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX }; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX static void @@ -669,7 +669,7 @@ RuntimeStartupHandler(char *pszModulePath, HMODULE hModule, PVOID parameter) helper->InvokeStartupCallback(pszModulePath, hModule); } -#else // FEATURE_PAL +#else // TARGET_UNIX static DWORD @@ -681,7 +681,7 @@ StartupHelperThread(LPVOID p) return 0; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX //----------------------------------------------------------------------------- // Public API. @@ -833,7 +833,7 @@ GetStartupNotificationEvent( if (phStartupEvent == NULL) return E_INVALIDARG; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HRESULT hr; DWORD currentSessionId = 0, debuggeeSessionId = 0; if (!ProcessIdToSessionId(GetCurrentProcessId(), ¤tSessionId)) @@ -901,7 +901,7 @@ GetStartupNotificationEvent( #else *phStartupEvent = NULL; return E_NOTIMPL; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // Refer to clr\src\mscoree\mscorwks_ntdef.src. const WORD kOrdinalForMetrics = 2; @@ -942,7 +942,7 @@ GetTargetCLRMetrics( CONSISTENCY_CHECK(szTelestoFullPath != NULL); CONSISTENCY_CHECK(pEngineMetricsOut != NULL); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HRESULT hr = S_OK; HandleHolder hCoreClrFile = WszCreateFile(szTelestoFullPath, @@ -1084,7 +1084,7 @@ GetTargetCLRMetrics( { *pdwRVAContinueStartupEvent = NULL; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // Returns true iff the module represents CoreClr. @@ -1301,7 +1301,7 @@ EnumerateCLRs( pStringArray[idx] = &pStringData[idx * MAX_LONGPATH]; GetModuleFileNameEx(hProcess, modules[i], pStringArray[idx], MAX_LONGPATH); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // fill in event handle -- if GetContinueStartupEvent fails, it will still return // INVALID_HANDLE_VALUE in hContinueStartupEvent, which is what we want. we don't // want to bail out of the enumeration altogether if we can't get an event from @@ -1312,7 +1312,7 @@ EnumerateCLRs( pEventArray[idx] = hContinueStartupEvent; #else pEventArray[idx] = NULL; -#endif // FEATURE_PAL +#endif // TARGET_UNIX idx++; } @@ -1373,7 +1373,7 @@ CloseCLREnumeration( if ((pHandleArray + dwArrayLength) != (HANDLE*)pStringArray) return E_INVALIDARG; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX for (DWORD i = 0; i < dwArrayLength; i++) { HANDLE hTemp = pHandleArray[i]; @@ -1383,7 +1383,7 @@ CloseCLREnumeration( CloseHandle(hTemp); } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX delete[] pHandleArray; return S_OK; @@ -1677,7 +1677,7 @@ CheckDbiAndRuntimeVersion( SString & szFullDbiPath, SString & szFullCoreClrPath) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD dwDbiVersionMS = 0; DWORD dwDbiVersionLS = 0; DWORD dwCoreClrVersionMS = 0; @@ -1698,7 +1698,7 @@ CheckDbiAndRuntimeVersion( } #else return true; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } //----------------------------------------------------------------------------- @@ -1806,7 +1806,7 @@ CreateDebuggingInterfaceFromVersion2( // Issue:951525: coreclr mscordbi load fails on downlevel OS since LoadLibraryEx can't find // dependent forwarder DLLs. Force LoadLibrary to look for dependencies in szFullDbiPath plus the default // search paths. -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX hMod = WszLoadLibraryEx(szFullDbiPath, NULL, LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR | LOAD_LIBRARY_SEARCH_DEFAULT_DIRS); #else hMod = LoadLibraryExW(szFullDbiPath, NULL, 0); @@ -1890,7 +1890,7 @@ CreateDebuggingInterfaceFromVersion( return CreateDebuggingInterfaceFromVersionEx(CorDebugVersion_2_0, szDebuggeeVersion, ppCordb); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //------------------------------------------------------------------------------ // Manually retrieves the "continue startup" event from the correct CLR instance @@ -1955,7 +1955,7 @@ GetContinueStartupEvent( return hr; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #if defined(FEATURE_CORESYSTEM) #include "debugshim.h" diff --git a/src/coreclr/src/dlls/mscordac/libredefines.S b/src/coreclr/src/dlls/mscordac/libredefines.S index 2449b9e3b1514..0a2bb97e274f5 100644 --- a/src/coreclr/src/dlls/mscordac/libredefines.S +++ b/src/coreclr/src/dlls/mscordac/libredefines.S @@ -1,10 +1,10 @@ -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) .intel_syntax noprefix #endif #include "unixasmmacros.inc" -#if defined(_ARM_) +#if defined(HOST_ARM) .syntax unified .thumb #endif diff --git a/src/coreclr/src/dlls/mscordac/mscordac.src b/src/coreclr/src/dlls/mscordac/mscordac.src index e28bd5df1fb48..8ff2f4678ec71 100644 --- a/src/coreclr/src/dlls/mscordac/mscordac.src +++ b/src/coreclr/src/dlls/mscordac/mscordac.src @@ -7,10 +7,10 @@ EXPORTS OutOfProcessFunctionTableCallbackEx DacDbiInterfaceInstance -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX OutOfProcessExceptionEventCallback OutOfProcessExceptionEventSignatureCallback -#endif // FEATURE_PAL +#endif // TARGET_UNIX OutOfProcessExceptionEventDebuggerLaunchCallback CLRDataCreateInstance diff --git a/src/coreclr/src/dlls/mscordac/palredefines.S b/src/coreclr/src/dlls/mscordac/palredefines.S index b0cd6159d357b..b0f5990895be7 100644 --- a/src/coreclr/src/dlls/mscordac/palredefines.S +++ b/src/coreclr/src/dlls/mscordac/palredefines.S @@ -1,10 +1,10 @@ -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) .intel_syntax noprefix #endif #include "unixasmmacros.inc" -#if defined(_ARM_) +#if defined(HOST_ARM) .syntax unified .thumb #endif diff --git a/src/coreclr/src/dlls/mscordbi/mscordbi.cpp b/src/coreclr/src/dlls/mscordbi/mscordbi.cpp index 82a158c106089..e03cf76c9f91e 100644 --- a/src/coreclr/src/dlls/mscordbi/mscordbi.cpp +++ b/src/coreclr/src/dlls/mscordbi/mscordbi.cpp @@ -19,7 +19,7 @@ extern BOOL WINAPI DbgDllMain(HINSTANCE hInstance, DWORD dwReason, // OS when the dll gets loaded. Control is simply deferred to the main code. //***************************************************************************** extern "C" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL WINAPI DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved) diff --git a/src/coreclr/src/dlls/mscoree/coreclr/CMakeLists.txt b/src/coreclr/src/dlls/mscoree/coreclr/CMakeLists.txt index 82348133f60ac..efd7c60caa51c 100644 --- a/src/coreclr/src/dlls/mscoree/coreclr/CMakeLists.txt +++ b/src/coreclr/src/dlls/mscoree/coreclr/CMakeLists.txt @@ -159,16 +159,16 @@ if(WIN32) get_include_directories(INC_DIR) get_compile_definitions(PREPROCESS_DEFINITIONS) list(APPEND INC_DIR -I${CLR_DIR}/src/vm -I${CLR_DIR}/src/vm/${ARCH_SOURCES_DIR} -I${CLR_DIR}/src/debug/ee -I${CLR_DIR}/src/gc) - list(APPEND PREPROCESS_DEFINITIONS -DDACCESS_COMPILE -DDBG_TARGET_64BIT) + list(APPEND PREPROCESS_DEFINITIONS -DDACCESS_COMPILE -DTARGET_64BIT) if (CLR_CMAKE_HOST_ARCH_AMD64) - list(APPEND PREPROCESS_DEFINITIONS -DDBG_TARGET_AMD64) + list(APPEND PREPROCESS_DEFINITIONS -DTARGET_AMD64) elseif (CLR_CMAKE_HOST_ARCH_ARM64) - list(APPEND PREPROCESS_DEFINITIONS -DDBG_TARGET_ARM64) + list(APPEND PREPROCESS_DEFINITIONS -DTARGET_ARM64) elseif (CLR_CMAKE_HOST_ARCH_ARM) - list(APPEND PREPROCESS_DEFINITIONS -DDBG_TARGET_ARM) + list(APPEND PREPROCESS_DEFINITIONS -DTARGET_ARM) elseif (CLR_CMAKE_HOST_ARCH_I386) - list(APPEND PREPROCESS_DEFINITIONS -DDBG_TARGET_X86) + list(APPEND PREPROCESS_DEFINITIONS -DTARGET_X86) else() clr_unknown_arch() endif() diff --git a/src/coreclr/src/dlls/mscoree/mscoree.cpp b/src/coreclr/src/dlls/mscoree/mscoree.cpp index 95c15cd5ee9cc..2a8c7875b87c3 100644 --- a/src/coreclr/src/dlls/mscoree/mscoree.cpp +++ b/src/coreclr/src/dlls/mscoree/mscoree.cpp @@ -38,7 +38,7 @@ HINSTANCE g_hThisInst; // This library. extern "C" IExecutionEngine* IEE(); -#ifdef PLATFORM_WINDOWS +#ifdef TARGET_WINDOWS #include // for __security_init_cookie() @@ -99,11 +99,11 @@ extern "C" BOOL WINAPI CoreDllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpRe return result; } -#endif // PLATFORM_WINDOWS +#endif // TARGET_WINDOWS extern "C" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX DLLEXPORT // For Win32 PAL LoadLibrary emulation #endif BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved) @@ -114,7 +114,7 @@ BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved) { case DLL_PROCESS_ATTACH: { -#ifndef PLATFORM_WINDOWS +#ifndef TARGET_WINDOWS // It's critical that we invoke InitUtilCode() before the CRT initializes. // We have a lot of global ctors that will break if we let the CRT initialize without // this step having been done. diff --git a/src/coreclr/src/dlls/mscoree/unixinterface.cpp b/src/coreclr/src/dlls/mscoree/unixinterface.cpp index d8a5443d0375d..35d7c56c14c8e 100644 --- a/src/coreclr/src/dlls/mscoree/unixinterface.cpp +++ b/src/coreclr/src/dlls/mscoree/unixinterface.cpp @@ -167,7 +167,7 @@ int coreclr_initialize( unsigned int* domainId) { HRESULT hr; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX DWORD error = PAL_InitializeCoreCLR(exePath); hr = HRESULT_FROM_WIN32(error); @@ -288,7 +288,7 @@ int coreclr_shutdown( hr = host->Stop(); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_Shutdown(); #endif @@ -320,7 +320,7 @@ int coreclr_shutdown_2( hr = host->Stop(); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_Shutdown(); #endif diff --git a/src/coreclr/src/dlls/mscorpe/ceefilegenwriter.cpp b/src/coreclr/src/dlls/mscorpe/ceefilegenwriter.cpp index f1532f1671a6d..8c18573c1b518 100644 --- a/src/coreclr/src/dlls/mscorpe/ceefilegenwriter.cpp +++ b/src/coreclr/src/dlls/mscorpe/ceefilegenwriter.cpp @@ -334,7 +334,7 @@ HRESULT CeeFileGenWriter::link() hr = emitExeMain(); if (FAILED(hr)) return hr; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX hr = emitResourceSection(); if (FAILED(hr)) return hr; @@ -388,7 +388,7 @@ HRESULT CeeFileGenWriter::generateImage(void **ppImage) HRESULT hr = S_OK; LPCWSTR outputFileName = NULL; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HANDLE hThreadToken = NULL; // Impersonation is only supported on Win2k and above. if (!OpenThreadToken(GetCurrentThread(), TOKEN_READ | TOKEN_IMPERSONATE, TRUE, &hThreadToken)) @@ -409,7 +409,7 @@ HRESULT CeeFileGenWriter::generateImage(void **ppImage) return HRESULT_FROM_GetLastError(); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef ENC_DELTA_HACK // fixups break because we've set the base RVA to 0 for the delta stream @@ -440,7 +440,7 @@ HRESULT CeeFileGenWriter::generateImage(void **ppImage) IfFailGo(getPEWriter().write(ppImage)); ErrExit: -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (hThreadToken != NULL) { BOOL success = SetThreadToken(NULL, hThreadToken); @@ -452,7 +452,7 @@ HRESULT CeeFileGenWriter::generateImage(void **ppImage) hr = HRESULT_FROM_GetLastError(); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return hr; } // HRESULT CeeFileGenWriter::generateImage() @@ -962,7 +962,7 @@ HRESULT GetClrSystemDirectory(SString& pbuffer) return CopySystemDirectory(pPath, pbuffer); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL RunProcess(LPCWSTR tempResObj, LPCWSTR pszFilename, DWORD* pdwExitCode, PEWriter &pewriter) { BOOL fSuccess = FALSE; @@ -1442,7 +1442,7 @@ lDone: ; return hr; } // HRESULT CeeFileGenWriter::emitResourceSection() -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX HRESULT CeeFileGenWriter::setManifestEntry(ULONG size, ULONG offset) { @@ -1608,7 +1608,7 @@ HRESULT CeeFileGenWriter::addFixup(CeeSection& sectionSource, unsigned offset, C TESTANDRETURN(pfixup != NULL, E_OUTOFMEMORY); // Initialize the IMAGE_DEBUG_TYPE_FIXUP entry relocations -#ifdef BIT64 +#ifdef HOST_64BIT _ASSERTE(!"Base relocs are not yet implemented for 64-bit"); m_pDebugDir->AddressOfRawData = 0; // @ToDo: srRelocAbsolutePtr can't take a 64-bit address #else @@ -1716,7 +1716,7 @@ HRESULT CeeFileGenWriter::UpdateFixups() switch (relocType) { -#ifdef _X86_ +#ifdef HOST_X86 case srRelocAbsolute: // Emitted bytes: RVA, offset relative to image base // reloc src contains target offset relative to target section @@ -1778,7 +1778,7 @@ HRESULT CeeFileGenWriter::UpdateFixups() pfixup->wType = IMAGE_REL_I386_TOKEN; break; -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) /* // // X86-64 relocations diff --git a/src/coreclr/src/dlls/mscorpe/pewriter.cpp b/src/coreclr/src/dlls/mscorpe/pewriter.cpp index ab03a85826de3..e274bcf571378 100644 --- a/src/coreclr/src/dlls/mscorpe/pewriter.cpp +++ b/src/coreclr/src/dlls/mscorpe/pewriter.cpp @@ -246,7 +246,7 @@ static inline HRESULT SubOvf_U_U32(UINT64 & a, unsigned int b) return S_OK; } -#ifndef _AMD64_ +#ifndef HOST_AMD64 /* subtract two unsigned pointers yeilding a signed pointer sized int */ static inline HRESULT SubOvf_U_U(INT64 & r, UINT64 a, UINT64 b) { @@ -482,7 +482,7 @@ HRESULT PEWriterSection::applyRelocs(IMAGE_NT_HEADERS * pNtHeaders, else if (curType == srRelocRelative) { if (externalAddress) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) newStarPos = GET_UNALIGNED_INT32(pos); #else // x86 UINT64 targetAddr = GET_UNALIGNED_VAL32(pos); @@ -954,7 +954,7 @@ HRESULT PEWriter::Init(PESectionMan *pFrom, DWORD createFlags, LPCWSTR seedFileN m_hSeedFileMap = hMapFile; m_pSeedFileDecoder = pPEDecoder; -#ifdef BIT64 +#ifdef HOST_64BIT m_pSeedFileNTHeaders = pPEDecoder->GetNTHeaders64(); #else m_pSeedFileNTHeaders = pPEDecoder->GetNTHeaders32(); @@ -1914,9 +1914,9 @@ HRESULT PEWriter::fixup(CeeGenTokenMapper *pMapper) switch((int)rcur->type) { case 0x7FFA: // Ptr to symbol name -#ifdef BIT64 +#ifdef HOST_64BIT _ASSERTE(!"this is probably broken!!"); -#endif // BIT64 +#endif // HOST_64BIT szSymbolName = (char*)(UINT_PTR)(rcur->offset); break; @@ -1934,16 +1934,16 @@ HRESULT PEWriter::fixup(CeeGenTokenMapper *pMapper) else return E_OUTOFMEMORY; TokInSymbolTable[NumberOfSymbols++] = 0; memset(&is,0,sizeof(IMAGE_SYMBOL)); -#ifdef BIT64 +#ifdef HOST_64BIT _ASSERTE(!"this is probably broken!!"); -#endif // BIT64 +#endif // HOST_64BIT strcpy_s((char*)&is,sizeof(is),(char*)(UINT_PTR)(rcur->offset)); if((pch = reloc->getBlock(sizeof(IMAGE_SYMBOL)))) memcpy(pch,&is,sizeof(IMAGE_SYMBOL)); else return E_OUTOFMEMORY; -#ifdef BIT64 +#ifdef HOST_64BIT _ASSERTE(!"this is probably broken!!"); -#endif // BIT64 +#endif // HOST_64BIT delete (char*)(UINT_PTR)(rcur->offset); ToRelocTable = FALSE; tk = 0; diff --git a/src/coreclr/src/gc/env/common.h b/src/coreclr/src/gc/env/common.h index 1c2f75c9d6140..35a9fd1737db5 100644 --- a/src/coreclr/src/gc/env/common.h +++ b/src/coreclr/src/gc/env/common.h @@ -25,7 +25,7 @@ #include -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX #include #endif diff --git a/src/coreclr/src/gc/env/gcenv.base.h b/src/coreclr/src/gc/env/gcenv.base.h index 48077fb32e77d..45774e8e38d41 100644 --- a/src/coreclr/src/gc/env/gcenv.base.h +++ b/src/coreclr/src/gc/env/gcenv.base.h @@ -55,7 +55,7 @@ typedef uint32_t ULONG; // ----------------------------------------------------------------------------------------------------------- // HRESULT subset. -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX typedef int32_t HRESULT; #else // this must exactly match the typedef used by windows.h @@ -106,7 +106,7 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x) #define UNREFERENCED_PARAMETER(P) (void)(P) -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX #define _vsnprintf_s(string, sizeInBytes, count, format, args) vsnprintf(string, sizeInBytes, format, args) #define sprintf_s snprintf #define swprintf_s swprintf @@ -135,14 +135,14 @@ typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter); #define WAIT_FAILED 0xFFFFFFFF #if defined(_MSC_VER) - #if defined(_ARM_) + #if defined(HOST_ARM) __forceinline void YieldProcessor() { } extern "C" void __emit(const unsigned __int32 opcode); #pragma intrinsic(__emit) #define MemoryBarrier() { __emit(0xF3BF); __emit(0x8F5F); } - #elif defined(_ARM64_) + #elif defined(HOST_ARM64) extern "C" void __yield(void); #pragma intrinsic(__yield) @@ -152,7 +152,7 @@ typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter); #pragma intrinsic(__dmb) #define MemoryBarrier() { __dmb(_ARM64_BARRIER_SY); } - #elif defined(_AMD64_) + #elif defined(HOST_AMD64) extern "C" void _mm_pause ( @@ -170,7 +170,7 @@ typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter); #define YieldProcessor _mm_pause #define MemoryBarrier _mm_mfence - #elif defined(_X86_) + #elif defined(HOST_X86) #define YieldProcessor() __asm { rep nop } #define MemoryBarrier() MemoryBarrierImpl() @@ -182,7 +182,7 @@ typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter); } } - #else // !_ARM_ && !_AMD64_ && !_X86_ + #else // !HOST_ARM && !HOST_AMD64 && !HOST_X86 #error Unsupported architecture #endif #else // _MSC_VER diff --git a/src/coreclr/src/gc/env/gcenv.interlocked.inl b/src/coreclr/src/gc/env/gcenv.interlocked.inl index e3a430c59abc4..02361463ff51c 100644 --- a/src/coreclr/src/gc/env/gcenv.interlocked.inl +++ b/src/coreclr/src/gc/env/gcenv.interlocked.inl @@ -14,10 +14,10 @@ #ifndef _MSC_VER __forceinline void Interlocked::ArmInterlockedOperationBarrier() { -#ifdef _ARM64_ +#ifdef HOST_ARM64 // See PAL_ArmInterlockedOperationBarrier() in the PAL __sync_synchronize(); -#endif // _ARM64_ +#endif // HOST_ARM64 } #endif // !_MSC_VER @@ -133,7 +133,7 @@ template __forceinline T Interlocked::ExchangeAddPtr(T volatile* addend, T value) { #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT static_assert(sizeof(int64_t) == sizeof(T), "Size of LONGLONG must be the same as size of T"); return _InterlockedExchangeAdd64((int64_t*)addend, value); #else @@ -189,7 +189,7 @@ template __forceinline T Interlocked::ExchangePointer(T volatile * destination, T value) { #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT return (T)(TADDR)_InterlockedExchangePointer((void* volatile *)destination, value); #else return (T)(TADDR)_InterlockedExchange((long volatile *)(void* volatile *)destination, (long)(void*)value); @@ -205,7 +205,7 @@ template __forceinline T Interlocked::ExchangePointer(T volatile * destination, std::nullptr_t value) { #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT return (T)(TADDR)_InterlockedExchangePointer((void* volatile *)destination, value); #else return (T)(TADDR)_InterlockedExchange((long volatile *)(void* volatile *)destination, (long)(void*)value); @@ -229,7 +229,7 @@ template __forceinline T Interlocked::CompareExchangePointer(T volatile *destination, T exchange, T comparand) { #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT return (T)(TADDR)_InterlockedCompareExchangePointer((void* volatile *)destination, exchange, comparand); #else return (T)(TADDR)_InterlockedCompareExchange((long volatile *)(void* volatile *)destination, (long)(void*)exchange, (long)(void*)comparand); @@ -245,7 +245,7 @@ template __forceinline T Interlocked::CompareExchangePointer(T volatile *destination, T exchange, std::nullptr_t comparand) { #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT return (T)(TADDR)_InterlockedCompareExchangePointer((void* volatile *)destination, (void*)exchange, (void*)comparand); #else return (T)(TADDR)_InterlockedCompareExchange((long volatile *)(void* volatile *)destination, (long)(void*)exchange, (long)(void*)comparand); diff --git a/src/coreclr/src/gc/env/gcenv.object.h b/src/coreclr/src/gc/env/gcenv.object.h index 6500c6a9fa008..7a9908a42f211 100644 --- a/src/coreclr/src/gc/env/gcenv.object.h +++ b/src/coreclr/src/gc/env/gcenv.object.h @@ -21,9 +21,9 @@ class ObjHeader { private: -#if defined(BIT64) +#if defined(HOST_64BIT) uint32_t m_uAlignpad; -#endif // BIT64 +#endif // HOST_64BIT uint32_t m_uSyncBlockValue; public: diff --git a/src/coreclr/src/gc/env/gcenv.os.h b/src/coreclr/src/gc/env/gcenv.os.h index 26d1ea8e2c251..ae398e5503e3b 100644 --- a/src/coreclr/src/gc/env/gcenv.os.h +++ b/src/coreclr/src/gc/env/gcenv.os.h @@ -140,7 +140,7 @@ class GCEvent { // GC thread function prototype typedef void (*GCThreadFunction)(void* param); -#ifdef BIT64 +#ifdef HOST_64BIT // Right now we support maximum 1024 procs - meaning that we will create at most // that many GC threads and GC heaps. #define MAX_SUPPORTED_CPUS 1024 @@ -148,7 +148,7 @@ typedef void (*GCThreadFunction)(void* param); #else #define MAX_SUPPORTED_CPUS 64 #define MAX_SUPPORTED_NODES 16 -#endif // BIT64 +#endif // HOST_64BIT // Add of processor indices used to store affinity. class AffinitySet diff --git a/src/coreclr/src/gc/env/gcenv.structs.h b/src/coreclr/src/gc/env/gcenv.structs.h index 4f51ad0d9e06d..f7f8f4038d466 100644 --- a/src/coreclr/src/gc/env/gcenv.structs.h +++ b/src/coreclr/src/gc/env/gcenv.structs.h @@ -16,7 +16,7 @@ struct GCSystemInfo typedef void * HANDLE; -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX typedef char TCHAR; #define _T(s) s @@ -30,7 +30,7 @@ typedef wchar_t TCHAR; #endif -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX class EEThreadId { @@ -57,7 +57,7 @@ class EEThreadId } }; -#else // PLATFORM_UNIX +#else // TARGET_UNIX #ifndef _INC_WINDOWS extern "C" uint32_t __stdcall GetCurrentThreadId(); @@ -84,11 +84,11 @@ class EEThreadId } }; -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX #ifndef _INC_WINDOWS -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX typedef struct _RTL_CRITICAL_SECTION { pthread_mutex_t mutex; diff --git a/src/coreclr/src/gc/env/volatile.h b/src/coreclr/src/gc/env/volatile.h index 948e7b7632467..c7964e9107d8f 100644 --- a/src/coreclr/src/gc/env/volatile.h +++ b/src/coreclr/src/gc/env/volatile.h @@ -67,12 +67,12 @@ #error The Volatile type is currently only defined for Visual C++ and GNU C++ #endif -#if defined(__GNUC__) && !defined(_X86_) && !defined(_AMD64_) && !defined(_ARM_) && !defined(_ARM64_) +#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) #error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM or ARM64 CPUs #endif #if defined(__GNUC__) -#if defined(_ARM_) || defined(_ARM64_) +#if defined(HOST_ARM) || defined(HOST_ARM64) // This is functionally equivalent to the MemoryBarrier() macro used on ARM on Windows. #define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb ish" : : : "memory") #else @@ -88,8 +88,8 @@ // notice. // #define VOLATILE_MEMORY_BARRIER() asm volatile ("" : : : "memory") -#endif // _ARM_ || _ARM64_ -#elif (defined(_ARM_) || defined(_ARM64_)) && _ISO_VOLATILE +#endif // HOST_ARM || HOST_ARM64 +#elif (defined(HOST_ARM) || defined(HOST_ARM64)) && _ISO_VOLATILE // ARM & ARM64 have a very weak memory model and very few tools to control that model. We're forced to perform a full // memory barrier to preserve the volatile semantics. Technically this is only necessary on MP systems but we // currently don't have a cheap way to determine the number of CPUs from this header file. Revisit this if it @@ -127,7 +127,7 @@ struct RemoveVolatile // Starting at version 3.8, clang errors out on initializing of type int * to volatile int *. To fix this, we add two templates to cast away volatility // Helper structures for casting away volatileness -#if defined(_ARM64_) && defined(_MSC_VER) +#if defined(HOST_ARM64) && defined(_MSC_VER) #include #endif @@ -136,7 +136,7 @@ inline T VolatileLoad(T const * pt) { #ifndef DACCESS_COMPILE -#if defined(_ARM64_) && defined(__GNUC__) +#if defined(HOST_ARM64) && defined(__GNUC__) T val; static const unsigned lockFreeAtomicSizeMask = (1 << 1) | (1 << 2) | (1 << 4) | (1 << 8); if((1 << sizeof(T)) & lockFreeAtomicSizeMask) @@ -148,7 +148,7 @@ T VolatileLoad(T const * pt) val = *(T volatile const *)pt; asm volatile ("dmb ishld" : : : "memory"); } -#elif defined(_ARM64_) && defined(_MSC_VER) +#elif defined(HOST_ARM64) && defined(_MSC_VER) // silence warnings on casts in branches that are not taken. #pragma warning(push) #pragma warning(disable : 4302) @@ -218,7 +218,7 @@ inline void VolatileStore(T* pt, T val) { #ifndef DACCESS_COMPILE -#if defined(_ARM64_) && defined(__GNUC__) +#if defined(HOST_ARM64) && defined(__GNUC__) static const unsigned lockFreeAtomicSizeMask = (1 << 1) | (1 << 2) | (1 << 4) | (1 << 8); if((1 << sizeof(T)) & lockFreeAtomicSizeMask) { @@ -229,7 +229,7 @@ void VolatileStore(T* pt, T val) VOLATILE_MEMORY_BARRIER(); *(T volatile *)pt = val; } -#elif defined(_ARM64_) && defined(_MSC_VER) +#elif defined(HOST_ARM64) && defined(_MSC_VER) // silence warnings on casts in branches that are not taken. #pragma warning(push) #pragma warning(disable : 4302) diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index 542da1e032765..873287fc4cb24 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -68,11 +68,11 @@ static size_t smoothed_desired_per_heap = 0; #define demotion_plug_len_th (6*1024*1024) -#ifdef BIT64 +#ifdef HOST_64BIT #define MARK_STACK_INITIAL_LENGTH 1024 #else #define MARK_STACK_INITIAL_LENGTH 128 -#endif // BIT64 +#endif // HOST_64BIT #define LOH_PIN_QUEUE_LENGTH 100 #define LOH_PIN_DECAY 10 @@ -409,7 +409,7 @@ size_t round_up_power2 (size_t size) // so return 1 (because 1 rounds up to itself). DWORD highest_set_bit_index; if (0 == -#ifdef BIT64 +#ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( @@ -430,7 +430,7 @@ size_t round_down_power2 (size_t size) // If the call failed, size must be zero so return zero. DWORD highest_set_bit_index; if (0 == -#ifdef BIT64 +#ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( @@ -452,7 +452,7 @@ int index_of_highest_set_bit (size_t value) // If the call failed (because value is zero), return -1. DWORD highest_set_bit_index; return (0 == -#ifdef BIT64 +#ifdef HOST_64BIT BitScanReverse64( #else BitScanReverse( @@ -2228,7 +2228,7 @@ size_t align_on_segment_hard_limit (size_t add) #ifdef SERVER_GC -#ifdef BIT64 +#ifdef HOST_64BIT #define INITIAL_ALLOC ((size_t)((size_t)4*1024*1024*1024)) #define LHEAP_ALLOC ((size_t)(1024*1024*256)) @@ -2238,11 +2238,11 @@ size_t align_on_segment_hard_limit (size_t add) #define INITIAL_ALLOC ((size_t)(1024*1024*64)) #define LHEAP_ALLOC ((size_t)(1024*1024*32)) -#endif // BIT64 +#endif // HOST_64BIT #else //SERVER_GC -#ifdef BIT64 +#ifdef HOST_64BIT #define INITIAL_ALLOC ((size_t)(1024*1024*256)) #define LHEAP_ALLOC ((size_t)(1024*1024*128)) @@ -2252,7 +2252,7 @@ size_t align_on_segment_hard_limit (size_t add) #define INITIAL_ALLOC ((size_t)(1024*1024*16)) #define LHEAP_ALLOC ((size_t)(1024*1024*16)) -#endif // BIT64 +#endif // HOST_64BIT #endif //SERVER_GC @@ -2559,7 +2559,7 @@ size_t gc_heap::current_total_committed_bookkeeping = 0; double gc_heap::short_plugs_pad_ratio = 0; #endif //SHORT_PLUGS -#if defined(BIT64) +#if defined(HOST_64BIT) #define MAX_ALLOWED_MEM_LOAD 85 // consider putting this in dynamic data - @@ -2568,7 +2568,7 @@ double gc_heap::short_plugs_pad_ratio = 0; #define MIN_YOUNGEST_GEN_DESIRED (16*1024*1024) size_t gc_heap::youngest_gen_desired_th; -#endif //BIT64 +#endif //HOST_64BIT uint32_t gc_heap::last_gc_memory_load = 0; @@ -3695,11 +3695,11 @@ size_t seg_mapping_word_of (uint8_t* add) #else //GROWABLE_SEG_MAPPING_TABLE BOOL seg_mapping_table_init() { -#ifdef BIT64 +#ifdef HOST_64BIT uint64_t total_address_space = (uint64_t)8*1024*1024*1024*1024; #else uint64_t total_address_space = (uint64_t)4*1024*1024*1024; -#endif // BIT64 +#endif // HOST_64BIT size_t num_entries = (size_t)(total_address_space >> gc_heap::min_segment_size_shr); seg_mapping_table = new seg_mapping[num_entries]; @@ -4656,9 +4656,9 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE) } #ifdef MULTIPLE_HEAPS -#ifdef BIT64 +#ifdef HOST_64BIT if (!large_seg) -#endif // BIT64 +#endif // HOST_64BIT { if (g_num_processors > 4) initial_seg_size /= 2; @@ -4679,11 +4679,11 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE) } #ifdef SEG_MAPPING_TABLE -#ifdef BIT64 +#ifdef HOST_64BIT seg_size = round_up_power2 (seg_size); #else seg_size = round_down_power2 (seg_size); -#endif // BIT64 +#endif // HOST_64BIT #endif //SEG_MAPPING_TABLE return (seg_size); @@ -5097,7 +5097,7 @@ BOOL gc_heap::unprotect_segment (heap_segment* seg) #endif #ifdef MULTIPLE_HEAPS -#ifdef _X86_ +#ifdef HOST_X86 #ifdef _MSC_VER #pragma warning(disable:4035) static ptrdiff_t get_cycle_count() @@ -5117,7 +5117,7 @@ BOOL gc_heap::unprotect_segment (heap_segment* seg) #else //_MSC_VER #error Unknown compiler #endif //_MSC_VER -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #ifdef _MSC_VER extern "C" uint64_t __rdtsc(); #pragma intrinsic(__rdtsc) @@ -5145,7 +5145,7 @@ extern "C" uint64_t __rdtsc(); // all buffer access times being reported as equal in access_time(). return 0; } -#endif //_TARGET_X86_ +#endif //TARGET_X86 // We may not be on contiguous numa nodes so need to store // the node index as well. @@ -5981,9 +5981,9 @@ bool gc_heap::virtual_alloc_commit_for_heap (void* addr, size_t size, int h_numb bool gc_heap::virtual_commit (void* address, size_t size, int h_number, bool* hard_limit_exceeded_p) { -#ifndef BIT64 +#ifndef HOST_64BIT assert (heap_hard_limit == 0); -#endif //!BIT64 +#endif //!HOST_64BIT if (heap_hard_limit) { @@ -6042,9 +6042,9 @@ bool gc_heap::virtual_commit (void* address, size_t size, int h_number, bool* ha bool gc_heap::virtual_decommit (void* address, size_t size, int h_number) { -#ifndef BIT64 +#ifndef HOST_64BIT assert (heap_hard_limit == 0); -#endif //!BIT64 +#endif //!HOST_64BIT bool decommit_succeeded_p = GCToOSInterface::VirtualDecommit (address, size); @@ -7177,11 +7177,11 @@ BOOL gc_heap::card_bundles_enabled () #endif // CARD_BUNDLE -#if defined (_TARGET_AMD64_) +#if defined (TARGET_AMD64) #define brick_size ((size_t)4096) #else #define brick_size ((size_t)2048) -#endif //_TARGET_AMD64_ +#endif //TARGET_AMD64 inline size_t gc_heap::brick_of (uint8_t* add) @@ -7403,11 +7403,11 @@ uint32_t*& card_table_mark_array (uint32_t* c_table) return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->mark_array; } -#ifdef BIT64 +#ifdef HOST_64BIT #define mark_bit_pitch ((size_t)16) #else #define mark_bit_pitch ((size_t)8) -#endif // BIT64 +#endif // HOST_64BIT #define mark_word_width ((size_t)32) #define mark_word_size (mark_word_width * mark_bit_pitch) @@ -7851,11 +7851,11 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, top = saved_g_highest_address; } size_t ps = ha-la; -#ifdef BIT64 +#ifdef HOST_64BIT if (ps > (uint64_t)200*1024*1024*1024) ps += (uint64_t)100*1024*1024*1024; else -#endif // BIT64 +#endif // HOST_64BIT ps *= 2; if (saved_g_lowest_address < g_gc_lowest_address) @@ -9697,9 +9697,9 @@ BOOL gc_heap::is_mark_set (uint8_t* o) return marked (o); } -#if defined (_MSC_VER) && defined (_TARGET_X86_) +#if defined (_MSC_VER) && defined (TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame -#endif //_MSC_VER && _TARGET_X86_ +#endif //_MSC_VER && TARGET_X86 // return the generation number of an object. // It is assumed that the object is valid. @@ -9739,9 +9739,9 @@ int gc_heap::object_gennum_plan (uint8_t* o) return max_generation; } -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations -#endif //_MSC_VER && _TARGET_X86_ +#endif //_MSC_VER && TARGET_X86 heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h_number) { @@ -16312,7 +16312,7 @@ int gc_heap::generation_to_condemn (int n_initial, if (!provisional_mode_triggered && evaluate_elevation && (low_ephemeral_space || high_memory_load || v_high_memory_load)) { *elevation_requested_p = TRUE; -#ifdef BIT64 +#ifdef HOST_64BIT // if we are in high memory load and have consumed 10% of the gen2 budget, do a gen2 now. if (high_memory_load || v_high_memory_load) { @@ -16328,7 +16328,7 @@ int gc_heap::generation_to_condemn (int n_initial, if (n <= max_generation) { -#endif // BIT64 +#endif // HOST_64BIT if (high_fragmentation) { //elevate to max_generation @@ -16359,9 +16359,9 @@ int gc_heap::generation_to_condemn (int n_initial, n = max (n, max_generation - 1); dprintf (GTC_LOG, ("h%d: nf c %d", heap_number, n)); } -#ifdef BIT64 +#ifdef HOST_64BIT } -#endif // BIT64 +#endif // HOST_64BIT } if (!provisional_mode_triggered && (n == (max_generation - 1)) && (n_alloc < (max_generation -1))) @@ -17084,10 +17084,10 @@ void gc_heap::gc1() desired_per_heap = min_gc_size; } } -#ifdef BIT64 +#ifdef HOST_64BIT desired_per_heap = joined_youngest_desired (desired_per_heap); dprintf (2, ("final gen0 new_alloc: %Id", desired_per_heap)); -#endif // BIT64 +#endif // HOST_64BIT gc_data_global.final_youngest_desired = desired_per_heap; } #if 1 //subsumed by the linear allocation model @@ -21509,11 +21509,11 @@ void gc_heap::reset_mark_stack () // // The number of bits in a brick. -#if defined (_TARGET_AMD64_) +#if defined (TARGET_AMD64) #define brick_bits (12) #else #define brick_bits (11) -#endif //_TARGET_AMD64_ +#endif //TARGET_AMD64 C_ASSERT(brick_size == (1 << brick_bits)); // The number of bits needed to represent the offset to a child node. @@ -21786,7 +21786,7 @@ size_t gc_heap::update_brick_table (uint8_t* tree, size_t current_brick, void gc_heap::plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate) { -#ifdef BIT64 +#ifdef HOST_64BIT // We should never demote big plugs to gen0. if (gen == youngest_generation) { @@ -21818,7 +21818,7 @@ void gc_heap::plan_generation_start (generation* gen, generation* consing_gen, u mark_stack_large_bos++; } } -#endif // BIT64 +#endif // HOST_64BIT generation_plan_allocation_start (gen) = allocate_in_condemned_generations (consing_gen, Align (min_obj_size), -1); @@ -23858,7 +23858,7 @@ void gc_heap::plan_phase (int condemned_gen_number) BOOL should_compact= FALSE; ephemeral_promotion = FALSE; -#ifdef BIT64 +#ifdef HOST_64BIT if ((!settings.concurrent) && !provisional_mode_triggered && ((condemned_gen_number < max_generation) && @@ -23882,11 +23882,11 @@ void gc_heap::plan_phase (int condemned_gen_number) } else { -#endif // BIT64 +#endif // HOST_64BIT should_compact = decide_on_compacting (condemned_gen_number, fragmentation, should_expand); -#ifdef BIT64 +#ifdef HOST_64BIT } -#endif // BIT64 +#endif // HOST_64BIT #ifdef FEATURE_LOH_COMPACTION loh_compacted_p = FALSE; @@ -24815,7 +24815,7 @@ void gc_heap::make_unused_array (uint8_t* x, size_t size, BOOL clearp, BOOL rese } ((CObjectHeader*)x)->SetFree(size); -#ifdef BIT64 +#ifdef HOST_64BIT #if BIGENDIAN #error "This won't work on big endian platforms" @@ -24861,7 +24861,7 @@ void gc_heap::clear_unused_array (uint8_t* x, size_t size) ((CObjectHeader*)x)->UnsetFree(); -#ifdef BIT64 +#ifdef HOST_64BIT #if BIGENDIAN #error "This won't work on big endian platforms" @@ -32602,7 +32602,7 @@ void gc_heap::compute_promoted_allocation (int gen_number) compute_in (gen_number); } -#ifdef BIT64 +#ifdef HOST_64BIT inline size_t gc_heap::trim_youngest_desired (uint32_t memory_load, size_t total_new_allocation, @@ -32666,7 +32666,7 @@ size_t gc_heap::joined_youngest_desired (size_t new_allocation) return final_new_allocation; } -#endif // BIT64 +#endif // HOST_64BIT inline gc_history_global* gc_heap::get_gc_data_global() @@ -32754,9 +32754,9 @@ void gc_heap::compute_new_dynamic_data (int gen_number) { dd_desired_allocation (dd) = higher_bound; } -#if defined (BIT64) && !defined (MULTIPLE_HEAPS) +#if defined (HOST_64BIT) && !defined (MULTIPLE_HEAPS) dd_desired_allocation (dd) = joined_youngest_desired (dd_desired_allocation (dd)); -#endif // BIT64 && !MULTIPLE_HEAPS +#endif // HOST_64BIT && !MULTIPLE_HEAPS trim_youngest_desired_low_memory(); dprintf (2, ("final gen0 new_alloc: %Id", dd_desired_allocation (dd))); } @@ -32854,7 +32854,7 @@ void gc_heap::decommit_ephemeral_segment_pages() if (settings.condemned_generation >= (max_generation-1)) { size_t new_slack_space = -#ifdef BIT64 +#ifdef HOST_64BIT max(min(min(soh_segment_size/32, dd_max_size(dd)), (generation_size (max_generation) / 10)), dd_desired_allocation(dd)); #else #ifdef FEATURE_CORECLR @@ -32862,7 +32862,7 @@ void gc_heap::decommit_ephemeral_segment_pages() #else dd_max_size (dd); #endif //FEATURE_CORECLR -#endif // BIT64 +#endif // HOST_64BIT slack_space = min (slack_space, new_slack_space); } @@ -33067,9 +33067,9 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number, } } -#ifdef BIT64 +#ifdef HOST_64BIT BOOL high_memory = FALSE; -#endif // BIT64 +#endif // HOST_64BIT if (!should_compact) { @@ -33095,7 +33095,7 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number, #endif // BACKGROUND_GC } -#ifdef BIT64 +#ifdef HOST_64BIT // check for high memory situation if(!should_compact) { @@ -33127,7 +33127,7 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number, high_memory = TRUE; } } -#endif // BIT64 +#endif // HOST_64BIT } // The purpose of calling ensure_gap_allocation here is to make sure @@ -33144,9 +33144,9 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number, { //check the progress if ( -#ifdef BIT64 +#ifdef HOST_64BIT (high_memory && !should_compact) || -#endif // BIT64 +#endif // HOST_64BIT (generation_plan_allocation_start (generation_of (max_generation - 1)) >= generation_allocation_start (generation_of (max_generation - 1)))) { @@ -33379,7 +33379,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, uint32_t flags, int alloc_context acontext; acontext.init(); -#if BIT64 +#if HOST_64BIT size_t maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size)); #else size_t maxObjectSize = (INT32_MAX - 7 - Align(min_obj_size)); @@ -36221,7 +36221,7 @@ HRESULT GCHeap::Initialize() bool is_restricted; gc_heap::total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit (&is_restricted); -#ifdef BIT64 +#ifdef HOST_64BIT gc_heap::heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit(); if (!(gc_heap::heap_hard_limit)) @@ -36248,7 +36248,7 @@ HRESULT GCHeap::Initialize() //printf ("heap_hard_limit is %Id, total physical mem: %Id, %s restricted\n", // gc_heap::heap_hard_limit, gc_heap::total_physical_mem, (is_restricted ? "is" : "is not")); -#endif //BIT64 +#endif //HOST_64BIT uint32_t nhp = 1; uint32_t nhp_from_config = 0; @@ -36271,7 +36271,7 @@ HRESULT GCHeap::Initialize() } if ((cpu_index_ranges_holder.Get() != nullptr) -#ifdef PLATFORM_WINDOWS +#ifdef TARGET_WINDOWS || (config_affinity_mask != 0) #endif ) @@ -36304,10 +36304,10 @@ HRESULT GCHeap::Initialize() { nhp = min(nhp, num_affinitized_processors); } -#ifndef PLATFORM_WINDOWS +#ifndef TARGET_WINDOWS // Limit the GC heaps to the number of processors available in the system. nhp = min (nhp, GCToOSInterface::GetTotalProcessorCount()); -#endif // !PLATFORM_WINDOWS +#endif // !TARGET_WINDOWS } #endif //!FEATURE_REDHAWK #endif //MULTIPLE_HEAPS @@ -36391,9 +36391,9 @@ HRESULT GCHeap::Initialize() gc_heap::pm_stress_on = (GCConfig::GetGCProvModeStress() != 0); -#if defined(BIT64) +#if defined(HOST_64BIT) gc_heap::youngest_gen_desired_th = gc_heap::mem_one_percent; -#endif // BIT64 +#endif // HOST_64BIT WaitForGCEvent = new (nothrow) GCEvent; @@ -37344,7 +37344,7 @@ BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p) HRESULT GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode) { -#if defined(BIT64) +#if defined(HOST_64BIT) if (low_memory_p) { size_t total_allocated = 0; @@ -37373,7 +37373,7 @@ GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode) return S_OK; } } -#endif // BIT64 +#endif // HOST_64BIT #ifdef MULTIPLE_HEAPS gc_heap* hpt = gc_heap::g_heaps[0]; diff --git a/src/coreclr/src/gc/gcdesc.h b/src/coreclr/src/gc/gcdesc.h index ac4e94a3abcb1..dee6c41480e4d 100644 --- a/src/coreclr/src/gc/gcdesc.h +++ b/src/coreclr/src/gc/gcdesc.h @@ -11,9 +11,9 @@ #ifndef _GCDESC_H_ #define _GCDESC_H_ -#ifdef BIT64 +#ifdef HOST_64BIT typedef uint32_t HALF_SIZE_T; -#else // BIT64 +#else // HOST_64BIT typedef uint16_t HALF_SIZE_T; #endif diff --git a/src/coreclr/src/gc/gcpriv.h b/src/coreclr/src/gc/gcpriv.h index 347c38491f55e..952eb83c695a8 100644 --- a/src/coreclr/src/gc/gcpriv.h +++ b/src/coreclr/src/gc/gcpriv.h @@ -182,19 +182,19 @@ void GCLogConfig (const char *fmt, ... ); #ifdef SERVER_GC -#ifdef BIT64 +#ifdef HOST_64BIT #define MAX_INDEX_POWER2 30 #else #define MAX_INDEX_POWER2 26 -#endif // BIT64 +#endif // HOST_64BIT #else //SERVER_GC -#ifdef BIT64 +#ifdef HOST_64BIT #define MAX_INDEX_POWER2 28 #else #define MAX_INDEX_POWER2 24 -#endif // BIT64 +#endif // HOST_64BIT #endif //SERVER_GC @@ -566,9 +566,9 @@ class gc_mechanisms_store bool stress_induced; #endif // STRESS_HEAP -#ifdef BIT64 +#ifdef HOST_64BIT uint32_t entry_memory_load; -#endif // BIT64 +#endif // HOST_64BIT void store (gc_mechanisms* gm) { @@ -597,9 +597,9 @@ class gc_mechanisms_store stress_induced = (gm->stress_induced != 0); #endif // STRESS_HEAP -#ifdef BIT64 +#ifdef HOST_64BIT entry_memory_load = gm->entry_memory_load; -#endif // BIT64 +#endif // HOST_64BIT } }; @@ -2840,14 +2840,14 @@ class gc_heap PER_HEAP void decommit_ephemeral_segment_pages(); -#ifdef BIT64 +#ifdef HOST_64BIT PER_HEAP_ISOLATED size_t trim_youngest_desired (uint32_t memory_load, size_t total_new_allocation, size_t total_min_allocation); PER_HEAP_ISOLATED size_t joined_youngest_desired (size_t new_allocation); -#endif // BIT64 +#endif // HOST_64BIT PER_HEAP_ISOLATED size_t get_total_heap_size (); PER_HEAP_ISOLATED @@ -3381,10 +3381,10 @@ class gc_heap double short_plugs_pad_ratio; #endif //SHORT_PLUGS -#ifdef BIT64 +#ifdef HOST_64BIT PER_HEAP_ISOLATED size_t youngest_gen_desired_th; -#endif //BIT64 +#endif //HOST_64BIT PER_HEAP_ISOLATED uint32_t last_gc_memory_load; @@ -3974,11 +3974,11 @@ class gc_heap alloc_list loh_alloc_list[NUM_LOH_ALIST-1]; #define NUM_GEN2_ALIST (12) -#ifdef BIT64 +#ifdef HOST_64BIT #define BASE_GEN2_ALIST (1*256) #else #define BASE_GEN2_ALIST (1*128) -#endif // BIT64 +#endif // HOST_64BIT PER_HEAP alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1]; @@ -4929,11 +4929,11 @@ dynamic_data* gc_heap::dynamic_data_of (int gen_number) // The value of card_size is determined empirically according to the average size of an object // In the code we also rely on the assumption that one card_table entry (uint32_t) covers an entire os page // -#if defined (BIT64) +#if defined (HOST_64BIT) #define card_size ((size_t)(2*GC_PAGE_SIZE/card_word_width)) #else #define card_size ((size_t)(GC_PAGE_SIZE/card_word_width)) -#endif // BIT64 +#endif // HOST_64BIT inline size_t card_word (size_t card) diff --git a/src/coreclr/src/gc/sample/GCSample.cpp b/src/coreclr/src/gc/sample/GCSample.cpp index fcbe10cafb0ab..91ffc2e11e979 100644 --- a/src/coreclr/src/gc/sample/GCSample.cpp +++ b/src/coreclr/src/gc/sample/GCSample.cpp @@ -78,7 +78,7 @@ Object * AllocateObject(MethodTable * pMT) return pObject; } -#if defined(BIT64) +#if defined(HOST_64BIT) // Card byte shift is different on 64bit. #define card_byte_shift 11 #else diff --git a/src/coreclr/src/gc/sample/GCSample.vcxproj b/src/coreclr/src/gc/sample/GCSample.vcxproj index 98dadcad54e3c..74c48d85e1851 100644 --- a/src/coreclr/src/gc/sample/GCSample.vcxproj +++ b/src/coreclr/src/gc/sample/GCSample.vcxproj @@ -51,7 +51,7 @@ Use Level3 Disabled - WIN32;_X86_;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + WIN32;HOST_X86;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true common.h .;..;..\env @@ -68,7 +68,7 @@ MaxSpeed true true - WIN32;_X86_;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) + WIN32;HOST_X86;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions) true .;..;..\env diff --git a/src/coreclr/src/gc/sample/gcenv.h b/src/coreclr/src/gc/sample/gcenv.h index b14625b3a3392..dfafdffc14c6b 100644 --- a/src/coreclr/src/gc/sample/gcenv.h +++ b/src/coreclr/src/gc/sample/gcenv.h @@ -35,7 +35,7 @@ #include "gcenv.ee.h" #include "volatile.h" -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX #include "gcenv.unix.inl" #else #include "gcenv.windows.inl" diff --git a/src/coreclr/src/gc/unix/gcenv.unix.cpp b/src/coreclr/src/gc/unix/gcenv.unix.cpp index 1571bf65de83c..9f6e2c31d485a 100644 --- a/src/coreclr/src/gc/unix/gcenv.unix.cpp +++ b/src/coreclr/src/gc/unix/gcenv.unix.cpp @@ -130,7 +130,7 @@ FOR_ALL_NUMA_FUNCTIONS #endif // HAVE_NUMA_H -#if defined(_ARM_) || defined(_ARM64_) +#if defined(HOST_ARM) || defined(HOST_ARM64) #define SYSCONF_GET_NUMPROCS _SC_NPROCESSORS_CONF #else #define SYSCONF_GET_NUMPROCS _SC_NPROCESSORS_ONLN @@ -871,7 +871,7 @@ size_t GCToOSInterface::GetCacheSizePerLogicalCpu(bool trueSize) size_t maxSize, maxTrueSize; maxSize = maxTrueSize = GetLogicalProcessorCacheSizeFromOS(); // Returns the size of the highest level processor cache -#if defined(_ARM64_) +#if defined(HOST_ARM64) // Bigger gen0 size helps arm64 targets maxSize = maxTrueSize * 3; #endif @@ -953,7 +953,7 @@ uint32_t GCToOSInterface::GetCurrentProcessCpuCount() // non zero if it has succeeded, 0 if it has failed size_t GCToOSInterface::GetVirtualMemoryLimit() { -#ifdef BIT64 +#ifdef HOST_64BIT // There is no API to get the total virtual address space size on // Unix, so we use a constant value representing 128TB, which is // the approximate size of total user virtual address space on diff --git a/src/coreclr/src/gc/windows/gcenv.windows.cpp b/src/coreclr/src/gc/windows/gcenv.windows.cpp index e46328322f118..4a1928bafeae1 100644 --- a/src/coreclr/src/gc/windows/gcenv.windows.cpp +++ b/src/coreclr/src/gc/windows/gcenv.windows.cpp @@ -97,7 +97,7 @@ void InitNumaNodeInfo() return; } -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64)) // Calculate greatest common divisor DWORD GCD(DWORD u, DWORD v) { @@ -156,7 +156,7 @@ bool InitLargePagesPrivilege() bool InitCPUGroupInfoArray() { -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64)) BYTE *bBuffer = NULL; SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pSLPIEx = NULL; SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pRecord = NULL; @@ -229,7 +229,7 @@ bool InitCPUGroupInfoArray() bool InitCPUGroupInfoRange() { -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64)) WORD begin = 0; WORD nr_proc = 0; @@ -251,7 +251,7 @@ void InitCPUGroupInfo() { g_fEnableGCCPUGroups = false; -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64)) if (!GCConfig::GetGCCpuGroup()) return; @@ -263,7 +263,7 @@ void InitCPUGroupInfo() // only enable CPU groups if more than one group exists g_fEnableGCCPUGroups = g_nGroups > 1; -#endif // _TARGET_AMD64_ || _TARGET_ARM64_ +#endif // TARGET_AMD64 || TARGET_ARM64 // Determine if the process is affinitized to a single processor (or if the system has a single processor) DWORD_PTR processAffinityMask, systemAffinityMask; @@ -518,7 +518,7 @@ void GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uin { assert(g_fEnableGCCPUGroups); -#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if !defined(FEATURE_REDHAWK) && (defined(TARGET_AMD64) || defined(TARGET_ARM64)) WORD bTemp = 0; WORD bDiff = processor_number - bTemp; @@ -899,7 +899,7 @@ size_t GCToOSInterface::GetCacheSizePerLogicalCpu(bool trueSize) size_t maxSize, maxTrueSize; -#ifdef _X86_ +#ifdef HOST_X86 int dwBuffer[4]; __cpuid(dwBuffer, 0); @@ -913,7 +913,7 @@ size_t GCToOSInterface::GetCacheSizePerLogicalCpu(bool trueSize) if (dwBuffer[2] == 'letn') { maxTrueSize = GetLogicalProcessorCacheSizeFromOS(); //use OS API for cache enumeration on LH and above -#ifdef BIT64 +#ifdef HOST_64BIT if (maxCpuId >= 2) { // If we're running on a Prescott or greater core, EM64T tests @@ -999,7 +999,7 @@ size_t GCToOSInterface::GetCacheSizePerLogicalCpu(bool trueSize) maxSize = maxTrueSize = GetLogicalProcessorCacheSizeFromOS() ; // Returns the size of the highest level processor cache #endif -#if defined(_ARM64_) +#if defined(HOST_ARM64) // Bigger gen0 size helps arm64 targets maxSize = maxTrueSize * 3; #endif diff --git a/src/coreclr/src/gcdump/gcdump.cpp b/src/coreclr/src/gcdump/gcdump.cpp index 79b46c9de9da2..c6403b7c888a2 100644 --- a/src/coreclr/src/gcdump/gcdump.cpp +++ b/src/coreclr/src/gcdump/gcdump.cpp @@ -11,9 +11,9 @@ * to the standard code-manager spec. */ -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "utilcode.h" // For _ASSERTE() -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX #include "gcdump.h" /*****************************************************************************/ diff --git a/src/coreclr/src/gcdump/gcdumpnonx86.cpp b/src/coreclr/src/gcdump/gcdumpnonx86.cpp index eca4b489cd943..d063e72838e97 100644 --- a/src/coreclr/src/gcdump/gcdumpnonx86.cpp +++ b/src/coreclr/src/gcdump/gcdumpnonx86.cpp @@ -17,7 +17,7 @@ PCSTR GetRegName (UINT32 regnum) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 switch (regnum) { @@ -41,7 +41,7 @@ PCSTR GetRegName (UINT32 regnum) return "???"; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) static CHAR szRegName[16]; if (regnum < 29) @@ -63,7 +63,7 @@ PCSTR GetRegName (UINT32 regnum) } return "???"; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if (regnum > 128) return "???"; @@ -286,7 +286,7 @@ size_t GCDump::DumpGCTable(PTR_CBYTE gcInfoBlock, | DECODE_GC_LIFETIMES | DECODE_PROLOG_LENGTH | DECODE_RETURN_KIND -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) | DECODE_HAS_TAILCALLS #endif ), @@ -359,7 +359,7 @@ size_t GCDump::DumpGCTable(PTR_CBYTE gcInfoBlock, ofs = -ofs; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // The PSPSym is relative to InitialSP on X64 and CallerSP on other platforms. gcPrintf("initial.sp%c%x\n", sign, ofs); #else @@ -437,11 +437,11 @@ size_t GCDump::DumpGCTable(PTR_CBYTE gcInfoBlock, ? "" : GetRegName(hdrdecoder.GetStackBaseRegister())); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 gcPrintf("Wants Report Only Leaf: %u\n", hdrdecoder.WantsReportOnlyLeaf()); -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) gcPrintf("Has tailcalls: %u\n", hdrdecoder.HasTailCalls()); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA gcPrintf("Size of parameter area: %x\n", hdrdecoder.GetSizeOfStackParameterArea()); #endif diff --git a/src/coreclr/src/gcdump/i386/gcdumpx86.cpp b/src/coreclr/src/gcdump/i386/gcdumpx86.cpp index b0507ad46d968..14945e8746bc2 100644 --- a/src/coreclr/src/gcdump/i386/gcdumpx86.cpp +++ b/src/coreclr/src/gcdump/i386/gcdumpx86.cpp @@ -6,12 +6,12 @@ */ /*****************************************************************************/ -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /*****************************************************************************/ -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "utilcode.h" // For _ASSERTE() -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX #include "gcdump.h" @@ -1089,5 +1089,5 @@ EPILOG_MSG: gcPrintf(" Offset %04X is within the method's epilog" } /*****************************************************************************/ -#endif // _TARGET_X86_ +#endif // TARGET_X86 /*****************************************************************************/ diff --git a/src/coreclr/src/gcinfo/gcinfo_arm/CMakeLists.txt b/src/coreclr/src/gcinfo/gcinfo_arm/CMakeLists.txt index bcbe1a03c385b..3dd25ee3057c4 100644 --- a/src/coreclr/src/gcinfo/gcinfo_arm/CMakeLists.txt +++ b/src/coreclr/src/gcinfo/gcinfo_arm/CMakeLists.txt @@ -1,5 +1,5 @@ -remove_definitions(-D_TARGET_X86_) -add_definitions(-D_TARGET_ARM_) +remove_definitions(-DTARGET_X86) +add_definitions(-DTARGET_ARM) add_library_clr(gcinfo_arm STATIC diff --git a/src/coreclr/src/gcinfo/gcinfo_arm64/CMakeLists.txt b/src/coreclr/src/gcinfo/gcinfo_arm64/CMakeLists.txt index cc0885d9e5ab7..11069b52f2f22 100644 --- a/src/coreclr/src/gcinfo/gcinfo_arm64/CMakeLists.txt +++ b/src/coreclr/src/gcinfo/gcinfo_arm64/CMakeLists.txt @@ -1,5 +1,5 @@ -remove_definitions(-D_TARGET_AMD64_) -add_definitions(-D_TARGET_ARM64_) +remove_definitions(-DTARGET_AMD64) +add_definitions(-DTARGET_ARM64) add_library_clr(gcinfo_arm64 STATIC diff --git a/src/coreclr/src/gcinfo/gcinfodumper.cpp b/src/coreclr/src/gcinfo/gcinfodumper.cpp index 7f1ec8f474ccc..8fa31dbd28f76 100644 --- a/src/coreclr/src/gcinfo/gcinfodumper.cpp +++ b/src/coreclr/src/gcinfo/gcinfodumper.cpp @@ -11,11 +11,11 @@ #define GC_CALL_PINNED 0x2 -#ifdef BIT64 +#ifdef HOST_64BIT // All stack offsets are INT32's, so this guarantees a disjoint range of // addresses for each register. #define ADDRESS_SPACING UI64(0x100000000) -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #define ADDRESS_SPACING 0x100000 #else #error pick suitable ADDRESS_SPACING for platform @@ -111,7 +111,7 @@ BOOL GcInfoDumper::ReportPointerRecord ( static RegisterInfo rgRegisters[] = { #define REG(reg, field) { FIELD_OFFSET(T_CONTEXT, field) } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 REG(rax, Rax), REG(rcx, Rcx), REG(rdx, Rdx), @@ -128,7 +128,7 @@ BOOL GcInfoDumper::ReportPointerRecord ( REG(r13, R13), REG(r14, R14), REG(r15, R15), -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #undef REG #define REG(reg, field) { FIELD_OFFSET(ArmVolatileContextPointer, field) } REG(r0, R0), @@ -150,7 +150,7 @@ BOOL GcInfoDumper::ReportPointerRecord ( { FIELD_OFFSET(T_KNONVOLATILE_CONTEXT_POINTERS, Lr) }, { FIELD_OFFSET(T_CONTEXT, Sp) }, { FIELD_OFFSET(T_KNONVOLATILE_CONTEXT_POINTERS, R7) }, -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #undef REG #define REG(reg, field) { FIELD_OFFSET(Arm64VolatileContextPointer, field) } REG(x0, X0), @@ -201,16 +201,16 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this iFirstRegister = 0; nRegisters = nCONTEXTRegisters; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 iSPRegister = (FIELD_OFFSET(CONTEXT, Rsp) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONGLONG); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) iSPRegister = (FIELD_OFFSET(T_CONTEXT, Sp) - FIELD_OFFSET(T_CONTEXT, X0)) / sizeof(ULONGLONG); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) iSPRegister = (FIELD_OFFSET(T_CONTEXT, Sp) - FIELD_OFFSET(T_CONTEXT, R0)) / sizeof(ULONG); UINT iBFRegister = m_StackBaseRegister; #endif -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) BYTE* pContext = (BYTE*)&(pRD->volatileCurrContextPointers); #else BYTE* pContext = (BYTE*)pRD->pCurrentContext; @@ -223,7 +223,7 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this for (UINT iReg = 0; iReg < nRegisters; iReg++) { UINT iEncodedReg = iFirstRegister + iReg; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (ctx == 1) { if ((iReg < 4 || iReg == 12)) // skip volatile registers for second context @@ -259,7 +259,7 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this { break; } -#elif defined (_TARGET_ARM64_) +#elif defined (TARGET_ARM64) iEncodedReg = iEncodedReg + ctx; //We have to compensate for not tracking x18 if (ctx == 1) { @@ -281,7 +281,7 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this #endif { _ASSERTE(iReg < nCONTEXTRegisters); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM pReg = *(SIZE_T**)(pContext + rgRegisters[iReg].cbContextOffset); if (iEncodedReg == 12) { @@ -296,7 +296,7 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this pReg = *(SIZE_T**)((BYTE*)pRD->pCurrentContextPointers + rgRegisters[iEncodedReg].cbContextOffset); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) pReg = *(SIZE_T**)(pContext + rgRegisters[iReg].cbContextOffset); if (iEncodedReg == iSPRegister) { @@ -317,7 +317,7 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this if (ptr == (SIZE_T)pReg) { // Make sure the register is in the current frame. -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) if (0 != ctx) { m_Error = REPORTED_REGISTER_IN_CALLERS_FRAME; @@ -369,14 +369,14 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this GcStackSlotBase base; if (iSPRegister == iEncodedReg) { -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) base = GC_SP_REL; #else if (0 == ctx) base = GC_SP_REL; else base = GC_CALLER_SP_REL; -#endif //defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#endif //defined(TARGET_ARM) || defined(TARGET_ARM64) } else { @@ -398,7 +398,7 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this } } -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) pContext = (BYTE*)pRD->pCurrentContextPointers; #else pContext = (BYTE*)pRD->pCallerContext; @@ -521,7 +521,7 @@ GcInfoDumper::EnumerateStateChangesResults GcInfoDumper::EnumerateStateChanges ( UINT iReg; -#ifdef BIT64 +#ifdef HOST_64BIT ULONG64 UniqueAddress = ADDRESS_SPACING*2; ULONG64 *pReg; #else @@ -537,7 +537,7 @@ GcInfoDumper::EnumerateStateChangesResults GcInfoDumper::EnumerateStateChanges ( } \ } while (0) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 FILL_REGS(pCurrentContext->Rax, 16); FILL_REGS(pCallerContext->Rax, 16); @@ -552,7 +552,7 @@ GcInfoDumper::EnumerateStateChangesResults GcInfoDumper::EnumerateStateChanges ( *(ppCurrentRax + iReg) = ®disp.pCurrentContext->Rax + iReg; *(ppCallerRax + iReg) = ®disp.pCallerContext ->Rax + iReg; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) FILL_REGS(pCurrentContext->R0, 16); FILL_REGS(pCallerContext->R0, 16); @@ -578,7 +578,7 @@ GcInfoDumper::EnumerateStateChangesResults GcInfoDumper::EnumerateStateChanges ( /// Set R12 *(ppVolatileReg+4) = ®disp.pCurrentContext->R0+12; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) FILL_REGS(pCurrentContext->X0, 33); FILL_REGS(pCallerContext->X0, 33); @@ -650,9 +650,9 @@ PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on th (GcInfoDecoderFlags)( DECODE_SECURITY_OBJECT | DECODE_CODE_LENGTH | DECODE_VARARG -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) | DECODE_HAS_TAILCALLS -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 | DECODE_INTERRUPTIBILITY), offset); @@ -671,7 +671,7 @@ PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on th #ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED UINT32 safePointOffset = offset; -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) safePointOffset++; #endif if(safePointDecoder.IsSafePoint(safePointOffset)) diff --git a/src/coreclr/src/gcinfo/gcinfoencoder.cpp b/src/coreclr/src/gcinfo/gcinfoencoder.cpp index 73455250124c7..4fef96cca90be 100644 --- a/src/coreclr/src/gcinfo/gcinfoencoder.cpp +++ b/src/coreclr/src/gcinfo/gcinfoencoder.cpp @@ -484,11 +484,11 @@ GcInfoEncoder::GcInfoEncoder( m_StackBaseRegister = NO_STACK_BASE_REGISTER; m_SizeOfEditAndContinuePreservedArea = NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA; m_ReversePInvokeFrameSlot = NO_REVERSE_PINVOKE_FRAME; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 m_WantsReportOnlyLeaf = false; -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) m_HasTailCalls = false; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 m_IsVarArg = false; m_pLastInterruptibleRange = NULL; @@ -496,13 +496,13 @@ GcInfoEncoder::GcInfoEncoder( m_IsSlotTableFrozen = FALSE; #endif //_DEBUG -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // If the compiler doesn't set the GCInfo, report RT_Unset. // This is used for compatibility with JITs that aren't updated to use the new API. m_ReturnKind = RT_Unset; #else m_ReturnKind = RT_Illegal; -#endif // _TARGET_X86_ +#endif // TARGET_X86 m_CodeLength = 0; #ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA m_SizeOfStackOutgoingAndScratchArea = -1; @@ -691,7 +691,7 @@ void GcInfoEncoder::SetCodeLength( UINT32 length ) void GcInfoEncoder::SetSecurityObjectStackSlot( INT32 spOffset ) { _ASSERTE( spOffset != NO_SECURITY_OBJECT ); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) _ASSERTE( spOffset < 0x10 && "The security object cannot reside in an input variable!" ); #endif _ASSERTE( m_SecurityObjectStackSlot == NO_SECURITY_OBJECT || m_SecurityObjectStackSlot == spOffset ); @@ -753,17 +753,17 @@ void GcInfoEncoder::SetSizeOfEditAndContinuePreservedArea( UINT32 slots ) m_SizeOfEditAndContinuePreservedArea = slots; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 void GcInfoEncoder::SetWantsReportOnlyLeaf() { m_WantsReportOnlyLeaf = true; } -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) void GcInfoEncoder::SetHasTailCalls() { m_HasTailCalls = true; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA void GcInfoEncoder::SetSizeOfStackOutgoingAndScratchArea( UINT32 size ) @@ -923,7 +923,7 @@ void GcInfoEncoder::FinalizeSlotIds() bool GcInfoEncoder::IsAlwaysScratch(GcSlotDesc &slotDesc) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) _ASSERTE( m_SizeOfStackOutgoingAndScratchArea != (UINT32)-1 ); if(slotDesc.IsRegister()) @@ -942,7 +942,7 @@ bool GcInfoEncoder::IsAlwaysScratch(GcSlotDesc &slotDesc) else return FALSE; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) _ASSERTE( m_SizeOfStackOutgoingAndScratchArea != (UINT32)-1 ); if(slotDesc.IsRegister()) @@ -1006,11 +1006,11 @@ void GcInfoEncoder::Build() !hasContextParamType && (m_InterruptibleRanges.Count() == 0) && !hasReversePInvokeFrame && ((m_StackBaseRegister == NO_STACK_BASE_REGISTER) || (NORMALIZE_STACK_BASE_REGISTER(m_StackBaseRegister) == 0))) && (m_SizeOfEditAndContinuePreservedArea == NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA) && -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 !m_WantsReportOnlyLeaf && -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) !m_HasTailCalls && -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 !IsStructReturnKind(m_ReturnKind); // All new code is generated for the latest GCINFO_VERSION. @@ -1032,11 +1032,11 @@ void GcInfoEncoder::Build() GCINFO_WRITE(m_Info1, ((m_PSPSymStackSlot != NO_PSP_SYM) ? 1 : 0), 1, FlagsSize); GCINFO_WRITE(m_Info1, m_contextParamType, 2, FlagsSize); GCINFO_WRITE(m_Info1, ((m_StackBaseRegister != NO_STACK_BASE_REGISTER) ? 1 : 0), 1, FlagsSize); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 GCINFO_WRITE(m_Info1, (m_WantsReportOnlyLeaf ? 1 : 0), 1, FlagsSize); -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) GCINFO_WRITE(m_Info1, (m_HasTailCalls ? 1 : 0), 1, FlagsSize); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 GCINFO_WRITE(m_Info1, ((m_SizeOfEditAndContinuePreservedArea != NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA) ? 1 : 0), 1, FlagsSize); GCINFO_WRITE(m_Info1, (hasReversePInvokeFrame ? 1 : 0), 1, FlagsSize); diff --git a/src/coreclr/src/hosts/unixcoreruncommon/coreruncommon.cpp b/src/coreclr/src/hosts/unixcoreruncommon/coreruncommon.cpp index c107fe68aa9aa..dd50a5dc4ac4d 100644 --- a/src/coreclr/src/hosts/unixcoreruncommon/coreruncommon.cpp +++ b/src/coreclr/src/hosts/unixcoreruncommon/coreruncommon.cpp @@ -321,7 +321,7 @@ int ExecuteManagedAssembly( // Indicates failure int exitCode = -1; -#ifdef _ARM_ +#ifdef HOST_ARM // libunwind library is used to unwind stack frame, but libunwind for ARM // does not support ARM vfpv3/NEON registers in DWARF format correctly. // Therefore let's disable stack unwinding using DWARF information @@ -333,7 +333,7 @@ int ExecuteManagedAssembly( // UNW_ARM_METHOD_FRAME 0x02 // UNW_ARM_METHOD_EXIDX 0x04 putenv(const_cast("UNW_ARM_UNWIND_METHOD=6")); -#endif // _ARM_ +#endif // HOST_ARM std::string coreClrDllPath(clrFilesAbsolutePath); coreClrDllPath.append("/"); diff --git a/src/coreclr/src/ilasm/assembler.h b/src/coreclr/src/ilasm/assembler.h index e08f5263133ad..43cbf42bb6514 100644 --- a/src/coreclr/src/ilasm/assembler.h +++ b/src/coreclr/src/ilasm/assembler.h @@ -51,7 +51,7 @@ #define dwUniBuf 16384 -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX extern char *g_pszExeFile; #endif diff --git a/src/coreclr/src/ilasm/main.cpp b/src/coreclr/src/ilasm/main.cpp index 5d1aa7def166f..3134736560fe2 100644 --- a/src/coreclr/src/ilasm/main.cpp +++ b/src/coreclr/src/ilasm/main.cpp @@ -30,7 +30,7 @@ static DWORD g_dwSubsystem=(DWORD)-1,g_dwComImageFlags=(DWORD)-1,g_dwFileAlig static ULONGLONG g_stBaseAddress=0; static size_t g_stSizeOfStackReserve=0; extern unsigned int g_uConsoleCP; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX char * g_pszExeFile; #endif @@ -216,7 +216,7 @@ extern "C" int _cdecl wmain(int argc, __in WCHAR **argv) //------------------------------------------------- for (i = 1; i < argc; i++) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if(argv[i][0] == L'-') #else if((argv[i][0] == L'/') || (argv[i][0] == L'-')) @@ -691,7 +691,7 @@ extern "C" int _cdecl wmain(int argc, __in WCHAR **argv) } else { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD dwBinType; if(GetBinaryTypeA(szInputFilename,&dwBinType)) { @@ -778,7 +778,7 @@ extern "C" int _cdecl wmain(int argc, __in WCHAR **argv) } else { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD dwBinType; if(GetBinaryTypeA(szInputFilename,&dwBinType)) { @@ -906,7 +906,7 @@ extern "C" int _cdecl wmain(int argc, __in WCHAR **argv) #endif -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX int main(int argc, char* str[]) { g_pszExeFile = str[0]; @@ -939,5 +939,5 @@ int main(int argc, char* str[]) return ret; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX diff --git a/src/coreclr/src/ilasm/writer.cpp b/src/coreclr/src/ilasm/writer.cpp index cc28f6ee2fa5e..94da077ad2b9a 100644 --- a/src/coreclr/src/ilasm/writer.cpp +++ b/src/coreclr/src/ilasm/writer.cpp @@ -1265,7 +1265,7 @@ HRESULT Assembler::CreatePEFile(__in __nullterminated WCHAR *pwzOutputFilename) if(m_wzResourceFile) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX report->msg("Warning: The Win32 resource file '%S' is ignored and not emitted on xPlatform.\n", m_wzResourceFile); #else if (FAILED(hr=m_pCeeFileGen->SetResourceFileName(m_pCeeFile, m_wzResourceFile))) diff --git a/src/coreclr/src/ildasm/dasm.cpp b/src/coreclr/src/ildasm/dasm.cpp index f10888dd57160..f7142cdea799c 100644 --- a/src/coreclr/src/ildasm/dasm.cpp +++ b/src/coreclr/src/ildasm/dasm.cpp @@ -34,7 +34,7 @@ #include #endif -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include "resourcestring.h" #define NATIVE_STRING_RESOURCE_NAME dasm_rc DECLARE_NATIVE_STRING_RESOURCE_TABLE(NATIVE_STRING_RESOURCE_NAME); @@ -249,7 +249,7 @@ WCHAR* RstrW(unsigned id) default: break; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX LoadNativeStringResource(NATIVE_STRING_RESOURCE_TABLE(NATIVE_STRING_RESOURCE_NAME),id, buff, cchBuff, NULL); #else _ASSERTE(g_hResources != NULL); @@ -7694,7 +7694,7 @@ BOOL DumpFile() fSuccess = TRUE; } fSuccess = TRUE; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if(g_pFile) // dump .RES file (if any), if not to console { WCHAR wzResFileName[2048], *pwc; diff --git a/src/coreclr/src/ildasm/dasm.rc b/src/coreclr/src/ildasm/dasm.rc index 3a572afb56ed5..1859d92832249 100644 --- a/src/coreclr/src/ildasm/dasm.rc +++ b/src/coreclr/src/ildasm/dasm.rc @@ -4,7 +4,7 @@ #include "resource.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define APSTUDIO_READONLY_SYMBOLS ///////////////////////////////////////////////////////////////////////////// // @@ -14,7 +14,7 @@ ///////////////////////////////////////////////////////////////////////////// #undef APSTUDIO_READONLY_SYMBOLS -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #define FX_VER_FILEDESCRIPTION_STR "Microsoft .NET Framework IL disassembler\0" @@ -101,32 +101,32 @@ BEGIN IDS_USAGE_24 L" /STATS Include statistics on the image.\n" IDS_USAGE_25 L" /CLASSLIST Include list of classes defined in the module.\n" IDS_USAGE_26 L" /ALL Combination of /HEADER,/BYTES,/STATS,/CLASSLIST,/TOKENS\n\n" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX IDS_USAGE_27 L"Options for EXE,DLL,OBJ and LIB files:\n" #else IDS_USAGE_27 L"" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX IDS_USAGE_28 L" /METADATA[=] Show MetaData, where is:\n" IDS_USAGE_29 L" MDHEADER Show MetaData header information and sizes.\n" IDS_USAGE_30 L" HEX Show more things in hex as well as words.\n" IDS_USAGE_31 L" CSV Show the record counts and heap sizes.\n" IDS_USAGE_32 L" UNREX Show unresolved externals.\n" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX IDS_USAGE_33 L" DEBUG Show debug information in addition to other MetaData.\n" #else IDS_USAGE_33 L"" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX IDS_USAGE_34 L" SCHEMA Show the MetaData header and schema information.\n" IDS_USAGE_35 L" RAW Show the raw MetaData tables.\n" IDS_USAGE_36 L" HEAPS Show the raw heaps.\n" IDS_USAGE_37 L" VALIDATE Validate the consistency of the metadata.\n" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX IDS_USAGE_38 L"Options for LIB files only:\n" IDS_USAGE_39 L" /OBJECTFILE= Show MetaData of a single object file in library\n" #else IDS_USAGE_38 L"" IDS_USAGE_39 L"" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX END STRINGTABLE DISCARDABLE @@ -254,7 +254,7 @@ END #endif // English (U.S.) resources ///////////////////////////////////////////////////////////////////////////// -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #ifndef APSTUDIO_INVOKED ///////////////////////////////////////////////////////////////////////////// @@ -264,4 +264,4 @@ END ///////////////////////////////////////////////////////////////////////////// #endif // not APSTUDIO_INVOKED -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX diff --git a/src/coreclr/src/ildasm/dres.cpp b/src/coreclr/src/ildasm/dres.cpp index 7f8d686b42fc4..438eb0a045b5e 100644 --- a/src/coreclr/src/ildasm/dres.cpp +++ b/src/coreclr/src/ildasm/dres.cpp @@ -7,7 +7,7 @@ // #include "ildasmpch.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "debugmacros.h" #include "corpriv.h" #include "dasmenum.hpp" @@ -312,5 +312,5 @@ DWORD DumpResourceToFile(__in __nullterminated WCHAR* wzFileName) return ret; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX diff --git a/src/coreclr/src/ildasm/windasm.cpp b/src/coreclr/src/ildasm/windasm.cpp index e6f2dd64c06b6..77ce3e676fb60 100644 --- a/src/coreclr/src/ildasm/windasm.cpp +++ b/src/coreclr/src/ildasm/windasm.cpp @@ -160,7 +160,7 @@ int ProcessOneArg(__in __nullterminated char* szArg, __out char** ppszObjFileNam if(strlen(szArg) == 0) return 0; if ((strcmp(szArg, "/?") == 0) || (strcmp(szArg, "-?") == 0)) return 1; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if(szArg[0] == '-') #else if((szArg[0] == '/') || (szArg[0] == '-')) @@ -510,7 +510,7 @@ int ParseCmdLineA(__in __nullterminated char* szCmdLine, __out char** ppszObjFil int __cdecl main(int nCmdShow, char* lpCmdLine[]) { -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) if (0 != PAL_Initialize(nCmdShow, lpCmdLine)) { printError(g_pFile, "Error: Fail to PAL_Initialize\n"); @@ -549,7 +549,7 @@ int __cdecl main(int nCmdShow, char* lpCmdLine[]) hConsoleOut = GetStdHandle(STD_OUTPUT_HANDLE); hConsoleErr = GetStdHandle(STD_ERROR_HANDLE); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Dev11 #5320 - pull the localized resource loader up so if ParseCmdLineW need resources, they're already loaded g_hResources = WszGetModuleHandle(NULL); #endif diff --git a/src/coreclr/src/inc/arraylist.h b/src/coreclr/src/inc/arraylist.h index f73477df12dfc..a4c6b2cb5c1ab 100644 --- a/src/coreclr/src/inc/arraylist.h +++ b/src/coreclr/src/inc/arraylist.h @@ -41,7 +41,7 @@ class ArrayListBase { SPTR(ArrayListBlock) m_next; DWORD m_blockSize; -#ifdef BIT64 +#ifdef HOST_64BIT DWORD m_padding; #endif PTR_VOID m_array[0]; @@ -61,7 +61,7 @@ class ArrayListBase { PTR_ArrayListBlock m_next; DWORD m_blockSize; -#ifdef BIT64 +#ifdef HOST_64BIT DWORD m_padding; #endif void * m_array[ARRAY_BLOCK_SIZE_START]; @@ -231,7 +231,7 @@ class ArrayListBase { if (m_remaining < m_block->m_blockSize) ZeroMemory(&(m_block->m_array[m_remaining]), (m_block->m_blockSize - m_remaining) * sizeof(void*)); -#ifdef BIT64 +#ifdef HOST_64BIT m_block->m_padding = 0; #endif } diff --git a/src/coreclr/src/inc/bbsweep.h b/src/coreclr/src/inc/bbsweep.h index 71c786a8aba80..b24cf231db58b 100644 --- a/src/coreclr/src/inc/bbsweep.h +++ b/src/coreclr/src/inc/bbsweep.h @@ -19,9 +19,9 @@ #ifndef _BBSWEEP_H_ #define _BBSWEEP_H_ -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // The CLR headers don't allow us to use methods like SetEvent directly (instead // we need to use the host APIs). However, this file is included both in the CLR @@ -315,7 +315,7 @@ class BBSweep return bInitialized; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL IsAppContainerProcess(HANDLE hToken) { #ifndef TokenIsAppContainer @@ -330,14 +330,14 @@ class BBSweep } return fIsAppContainerProcess; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // helper to get the correct object name prefix void GetObjectNamePrefix(DWORD processID, BOOL fromRuntime, __inout_z WCHAR* objectNamePrefix) { // default prefix swprintf_s(objectNamePrefix, MAX_LONGPATH, W("Global")); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // // This method can be called: // 1. From process init code @@ -403,7 +403,7 @@ class BBSweep } } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX } private: diff --git a/src/coreclr/src/inc/bitposition.h b/src/coreclr/src/inc/bitposition.h index df7915546a5e5..423c4ed24a6bb 100644 --- a/src/coreclr/src/inc/bitposition.h +++ b/src/coreclr/src/inc/bitposition.h @@ -25,11 +25,11 @@ inline unsigned BitPosition(unsigned value) { _ASSERTE((value != 0) && ((value & (value-1)) == 0)); -#if defined(_ARM_) && defined(__llvm__) +#if defined(HOST_ARM) && defined(__llvm__) // use intrinsic functions for arm32 // this is applied for LLVM only but it may work for some compilers DWORD index = __builtin_clz(__builtin_arm_rbit(value)); -#elif !defined(_AMD64_) +#elif !defined(HOST_AMD64) const unsigned PRIME = 37; static const char hashTable[PRIME] = diff --git a/src/coreclr/src/inc/blobfetcher.h b/src/coreclr/src/inc/blobfetcher.h index 2455ba0249cd6..a7f65c4150928 100644 --- a/src/coreclr/src/inc/blobfetcher.h +++ b/src/coreclr/src/inc/blobfetcher.h @@ -63,7 +63,7 @@ class CBlobFetcher CBlobFetcher& operator=(const CBlobFetcher & src); public: -#if defined(BIT64) +#if defined(HOST_64BIT) // needs to be 64 so that we can purposefully cache align code in ngen'd images enum { maxAlign = 64 }; // maximum alignment we support #else diff --git a/src/coreclr/src/inc/check.inl b/src/coreclr/src/inc/check.inl index 9a3597ef8192b..47ffd15908f59 100644 --- a/src/coreclr/src/inc/check.inl +++ b/src/coreclr/src/inc/check.inl @@ -180,7 +180,7 @@ inline CHECK CheckAligned(UINT value, UINT alignment) CHECK_OK; } -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX // For Unix this and the previous function get the same types. // So, exclude this one. inline CHECK CheckAligned(ULONG value, UINT alignment) @@ -189,7 +189,7 @@ inline CHECK CheckAligned(ULONG value, UINT alignment) CHECK(AlignmentTrim(value, alignment) == 0); CHECK_OK; } -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX inline CHECK CheckAligned(UINT64 value, UINT alignment) { @@ -270,7 +270,7 @@ inline CHECK CheckUnderflow(UINT value1, UINT value2) CHECK_OK; } -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX // For Unix this and the previous function get the same types. // So, exclude this one. inline CHECK CheckUnderflow(ULONG value1, ULONG value2) @@ -279,7 +279,7 @@ inline CHECK CheckUnderflow(ULONG value1, ULONG value2) CHECK_OK; } -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX inline CHECK CheckUnderflow(UINT64 value1, UINT64 value2) { diff --git a/src/coreclr/src/inc/clr/fs/path.h b/src/coreclr/src/inc/clr/fs/path.h index 57cbcce12a783..dd4ed9e7489e6 100644 --- a/src/coreclr/src/inc/clr/fs/path.h +++ b/src/coreclr/src/inc/clr/fs/path.h @@ -31,7 +31,7 @@ namespace clr _ASSERTE(wzPath != nullptr); // Similar to System.IO.Path.IsRelative() -#if PLATFORM_UNIX +#if TARGET_UNIX if(wzPath[0] == VOLUME_SEPARATOR_CHAR_W) { return false; diff --git a/src/coreclr/src/inc/clr_std/type_traits b/src/coreclr/src/inc/clr_std/type_traits index 334c53b6f0421..357fb1f292a9e 100644 --- a/src/coreclr/src/inc/clr_std/type_traits +++ b/src/coreclr/src/inc/clr_std/type_traits @@ -199,7 +199,7 @@ namespace std typedef unsigned int type; }; -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX template<> struct make_unsigned @@ -207,7 +207,7 @@ namespace std typedef unsigned long type; }; -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX template<> struct make_unsigned<__int64> @@ -234,7 +234,7 @@ namespace std typedef signed int type; }; -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX template<> struct make_signed @@ -242,7 +242,7 @@ namespace std typedef signed long type; }; -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX template<> struct make_signed @@ -359,7 +359,7 @@ namespace std // On Unix 'long' is a 64-bit type (same as __int64) and the following two definitions // conflict with _Is_integral and _Is_integral. -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX template<> struct _Is_integral : true_type @@ -371,7 +371,7 @@ namespace std : true_type { // determine whether _Ty is integral }; -#endif /* PLATFORM_UNIX */ +#endif /* TARGET_UNIX */ #if _HAS_CHAR16_T_LANGUAGE_SUPPORT template<> @@ -427,7 +427,7 @@ namespace std // In PAL, we define long as int and so this becomes int double, // which is a nonsense -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX template<> struct _Is_floating_point : true_type diff --git a/src/coreclr/src/inc/clrconfigvalues.h b/src/coreclr/src/inc/clrconfigvalues.h index 25a447c7ef317..f2ce39104520f 100644 --- a/src/coreclr/src/inc/clrconfigvalues.h +++ b/src/coreclr/src/inc/clrconfigvalues.h @@ -377,7 +377,7 @@ RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TailCallLoopOpt, W("TailCallLoopOpt"), 1, "Con RETAIL_CONFIG_DWORD_INFO(EXTERNAL_Jit_NetFx40PInvokeStackResilience, W("NetFx40_PInvokeStackResilience"), (DWORD)-1, "Makes P/Invoke resilient against mismatched signature and calling convention (significant perf penalty).") // AltJitAssertOnNYI should be 0 on targets where JIT is under development or bring up stage, so as to facilitate fallback to main JIT on hitting a NYI. -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) RETAIL_CONFIG_DWORD_INFO(INTERNAL_AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 0, "Controls the AltJit behavior of NYI stuff") #else RETAIL_CONFIG_DWORD_INFO(INTERNAL_AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 1, "Controls the AltJit behavior of NYI stuff") @@ -387,16 +387,16 @@ RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_JitRegisterFP, W("JitRegisterFP"), 3, "Cont RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitELTHookEnabled, W("JitELTHookEnabled"), 0, "On ARM, setting this will emit Enter/Leave/TailCall callbacks") RETAIL_CONFIG_DWORD_INFO_EX(INTERNAL_JitMemStats, W("JitMemStats"), 0, "Display JIT memory usage statistics", CLRConfig::REGUTIL_default) RETAIL_CONFIG_DWORD_INFO(INTERNAL_JitVNMapSelBudget, W("JitVNMapSelBudget"), 100, "Max # of MapSelect's considered for a particular top-level invocation.") -#if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_X86) || defined(TARGET_ARM64) #define EXTERNAL_FeatureSIMD_Default 1 -#else // !(defined(_TARGET_AMD64_) || defined(_TARGET_X86_) || defined(_TARGET_ARM64_)) +#else // !(defined(TARGET_AMD64) || defined(TARGET_X86) || defined(TARGET_ARM64)) #define EXTERNAL_FeatureSIMD_Default 0 -#endif // !(defined(_TARGET_AMD64_) || defined(_TARGET_X86_) || defined(_TARGET_ARM64_)) -#if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#endif // !(defined(TARGET_AMD64) || defined(TARGET_X86) || defined(TARGET_ARM64)) +#if defined(TARGET_AMD64) || defined(TARGET_X86) #define EXTERNAL_JitEnableAVX_Default 1 -#else // !(defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#else // !(defined(TARGET_AMD64) || defined(TARGET_X86) #define EXTERNAL_JitEnableAVX_Default 0 -#endif // !(defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#endif // !(defined(TARGET_AMD64) || defined(TARGET_X86) RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_FeatureSIMD, W("FeatureSIMD"), EXTERNAL_FeatureSIMD_Default, "Enable SIMD intrinsics recognition in System.Numerics.dll and/or System.Numerics.Vectors.dll", CLRConfig::REGUTIL_default) RETAIL_CONFIG_DWORD_INFO(INTERNAL_SIMD16ByteOnly, W("SIMD16ByteOnly"), 0, "Limit maximum SIMD vector length to 16 bytes (used by x64_arm64_altjit)") RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_EnableAVX, W("EnableAVX"), EXTERNAL_JitEnableAVX_Default, "Enable AVX instruction set for wide operations as default", CLRConfig::REGUTIL_default) @@ -601,12 +601,12 @@ RETAIL_CONFIG_DWORD_INFO(INTERNAL_ThreadPool_ForceMaxWorkerThreads, W("ThreadPoo RETAIL_CONFIG_DWORD_INFO(INTERNAL_ThreadPool_DisableStarvationDetection, W("ThreadPool_DisableStarvationDetection"), 0, "Disables the ThreadPool feature that forces new threads to be added when workitems run for too long") RETAIL_CONFIG_DWORD_INFO(INTERNAL_ThreadPool_DebugBreakOnWorkerStarvation, W("ThreadPool_DebugBreakOnWorkerStarvation"), 0, "Breaks into the debugger if the ThreadPool detects work queue starvation") RETAIL_CONFIG_DWORD_INFO(INTERNAL_ThreadPool_EnableWorkerTracking, W("ThreadPool_EnableWorkerTracking"), 0, "Enables extra expensive tracking of how many workers threads are working simultaneously") -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // Spinning scheme is currently different on ARM64, see CLRLifoSemaphore::Wait(DWORD, UINT32, UINT32) RETAIL_CONFIG_DWORD_INFO(INTERNAL_ThreadPool_UnfairSemaphoreSpinLimit, W("ThreadPool_UnfairSemaphoreSpinLimit"), 0x32, "Maximum number of spins per processor a thread pool worker thread performs before waiting for work") -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 RETAIL_CONFIG_DWORD_INFO(INTERNAL_ThreadPool_UnfairSemaphoreSpinLimit, W("ThreadPool_UnfairSemaphoreSpinLimit"), 0x46, "Maximum number of spins a thread pool worker thread performs before waiting for work") -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 RETAIL_CONFIG_DWORD_INFO(EXTERNAL_Thread_UseAllCpuGroups, W("Thread_UseAllCpuGroups"), 0, "Specifies if to automatically distribute thread across CPU Groups") CONFIG_DWORD_INFO(INTERNAL_ThreadpoolTickCountAdjustment, W("ThreadpoolTickCountAdjustment"), 0, "") diff --git a/src/coreclr/src/inc/clrnt.h b/src/coreclr/src/inc/clrnt.h index 363511d4b7743..4d508fe62623e 100644 --- a/src/coreclr/src/inc/clrnt.h +++ b/src/coreclr/src/inc/clrnt.h @@ -96,7 +96,7 @@ typedef signed char SCHAR; typedef SCHAR *PSCHAR; typedef LONG NTSTATUS; -#ifndef FEATURE_PAL +#ifndef HOST_UNIX #define TLS_MINIMUM_AVAILABLE 64 // winnt #define TLS_EXPANSION_SLOTS 1024 @@ -284,7 +284,7 @@ typedef ANSI_STRING64 *PANSI_STRING64; #define GDI_HANDLE_BUFFER_SIZE32 34 #define GDI_HANDLE_BUFFER_SIZE64 60 -#if !defined(_TARGET_AMD64_) +#if !defined(TARGET_AMD64) #define GDI_HANDLE_BUFFER_SIZE GDI_HANDLE_BUFFER_SIZE32 #else #define GDI_HANDLE_BUFFER_SIZE GDI_HANDLE_BUFFER_SIZE64 @@ -587,7 +587,7 @@ typedef struct _TEB { PVOID SystemReserved1[54]; // Used by FP emulator NTSTATUS ExceptionCode; // for RaiseUserException ACTIVATION_CONTEXT_STACK ActivationContextStack; // Fusion activation stack - // sizeof(PVOID) is a way to express processor-dependence, more generally than #ifdef BIT64 + // sizeof(PVOID) is a way to express processor-dependence, more generally than #ifdef HOST_64BIT UCHAR SpareBytes1[48 - sizeof(PVOID) - sizeof(ACTIVATION_CONTEXT_STACK)]; GDI_TEB_BATCH GdiTebBatch; // Gdi batching CLIENT_ID RealClientId; @@ -748,9 +748,9 @@ typedef VM_COUNTERS *PVM_COUNTERS; #undef TYPE3 -#endif // !defined(FEATURE_PAL) +#endif // !defined(HOST_UNIX) -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) typedef enum _FUNCTION_TABLE_TYPE { RF_SORTED, @@ -763,7 +763,7 @@ typedef struct _DYNAMIC_FUNCTION_TABLE { PT_RUNTIME_FUNCTION FunctionTable; LARGE_INTEGER TimeStamp; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM ULONG MinimumAddress; ULONG MaximumAddress; ULONG BaseAddress; @@ -780,12 +780,12 @@ typedef struct _DYNAMIC_FUNCTION_TABLE { ULONG EntryCount; } DYNAMIC_FUNCTION_TABLE, *PDYNAMIC_FUNCTION_TABLE; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 // // AMD64 // -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #define RUNTIME_FUNCTION__BeginAddress(prf) (prf)->BeginAddress #define RUNTIME_FUNCTION__SetBeginAddress(prf,address) ((prf)->BeginAddress = (address)) @@ -811,9 +811,9 @@ PEXCEPTION_ROUTINE IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL ); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX extern RtlVirtualUnwindFn* RtlVirtualUnwind_Unsafe; -#else // !FEATURE_PAL +#else // !TARGET_UNIX PEXCEPTION_ROUTINE RtlVirtualUnwind_Unsafe( IN ULONG HandlerType, @@ -825,16 +825,16 @@ RtlVirtualUnwind_Unsafe( OUT PULONG64 EstablisherFrame, IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL ); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // // X86 // -#ifdef _TARGET_X86_ -#ifndef FEATURE_PAL +#ifdef TARGET_X86 +#ifndef TARGET_UNIX // // x86 ABI does not define RUNTIME_FUNCTION. Define our own to allow unification between x86 and other platforms. // @@ -847,7 +847,7 @@ typedef struct _DISPATCHER_CONTEXT { _EXCEPTION_REGISTRATION_RECORD* RegistrationPointer; } DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #define RUNTIME_FUNCTION__BeginAddress(prf) (prf)->BeginAddress #define RUNTIME_FUNCTION__SetBeginAddress(prf,addr) ((prf)->BeginAddress = (addr)) @@ -889,9 +889,9 @@ RtlVirtualUnwind ( ); #endif // FEATURE_EH_FUNCLETS -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #include "daccess.h" // @@ -934,7 +934,7 @@ typedef struct _UNWIND_INFO { // dummy } UNWIND_INFO, *PUNWIND_INFO; -#if defined(FEATURE_PAL) || defined(_X86_) +#if defined(TARGET_UNIX) || defined(HOST_X86) EXTERN_C NTSYSAPI VOID @@ -962,13 +962,13 @@ RtlVirtualUnwind ( __out PDWORD EstablisherFrame, __inout_opt PT_KNONVOLATILE_CONTEXT_POINTERS ContextPointers ); -#endif // FEATURE_PAL || _X86_ +#endif // TARGET_UNIX || HOST_X86 #define UNW_FLAG_NHANDLER 0x0 -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 #include "daccess.h" #define UNW_FLAG_NHANDLER 0x0 /* any handler */ diff --git a/src/coreclr/src/inc/clrtypes.h b/src/coreclr/src/inc/clrtypes.h index 78c2e6ae4caa6..a83fb9e1f656a 100644 --- a/src/coreclr/src/inc/clrtypes.h +++ b/src/coreclr/src/inc/clrtypes.h @@ -23,7 +23,7 @@ #include "staticcontract.h" #include "static_assert.h" -#if BIT64 +#if HOST_64BIT #define POINTER_BITS (64) #else #define POINTER_BITS (32) @@ -389,7 +389,7 @@ inline UINT AlignmentTrim(UINT value, UINT alignment) return value&(alignment-1); } -#ifndef PLATFORM_UNIX +#ifndef HOST_UNIX // For Unix this and the previous function get the same types. // So, exclude this one. inline UINT AlignmentTrim(ULONG value, UINT alignment) @@ -398,7 +398,7 @@ inline UINT AlignmentTrim(ULONG value, UINT alignment) STATIC_CONTRACT_SUPPORTS_DAC; return value&(alignment-1); } -#endif // PLATFORM_UNIX +#endif // HOST_UNIX inline UINT AlignmentTrim(UINT64 value, UINT alignment) { diff --git a/src/coreclr/src/inc/corcompile.h b/src/coreclr/src/inc/corcompile.h index 2343dad89c85d..fd5096c502fc8 100644 --- a/src/coreclr/src/inc/corcompile.h +++ b/src/coreclr/src/inc/corcompile.h @@ -54,7 +54,7 @@ typedef DPTR(struct CORCOMPILE_VIRTUAL_SECTION_INFO) typedef DPTR(struct CORCOMPILE_IMPORT_SECTION) PTR_CORCOMPILE_IMPORT_SECTION; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 typedef DPTR(RUNTIME_FUNCTION) PTR_RUNTIME_FUNCTION; @@ -62,7 +62,7 @@ typedef DPTR(RUNTIME_FUNCTION) PTR_RUNTIME_FUNCTION; // Chained unwind info. Used for cold methods. #define RUNTIME_FUNCTION_INDIRECT 0x80000000 -#endif // _TARGET_X86_ +#endif // TARGET_X86 // The stride is choosen as maximum value that still gives good page locality of RUNTIME_FUNCTION table touches (only one page of // RUNTIME_FUNCTION table is going to be touched during most IP2MD lookups). @@ -138,7 +138,7 @@ enum CorCompileImportFlags #define CORCOMPILE_TAG_TOKEN(token) ((SIZE_T)(((token)<<1)|CORCOMPILE_TOKEN_TAG)) #define CORCOMPILE_UNTAG_TOKEN(token) ((((SIZE_T)(token))&~CORCOMPILE_TOKEN_TAG)>>1) -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Tagging of code pointers on ARM uses inverse logic because of the thumb bit. #define CORCOMPILE_IS_PCODE_TAGGED(token) ((((SIZE_T)(token)) & 0x00000001) == 0x00000000) #define CORCOMPILE_TAG_PCODE(token) ((SIZE_T)(((token)<<1)|0x80000000)) @@ -149,7 +149,7 @@ enum CorCompileImportFlags inline BOOL CORCOMPILE_IS_FIXUP_TAGGED(SIZE_T fixup, PTR_CORCOMPILE_IMPORT_SECTION pSection) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Tagging of code pointers on ARM has to use inverse logic because of the thumb bit if (pSection->Flags & CORCOMPILE_IMPORT_FLAGS_PCODE) { @@ -528,7 +528,7 @@ struct CORCOMPILE_CODE_MANAGER_ENTRY ULONG ColdUntrainedMethodOffset; }; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) #define _PRECODE_EXTERNAL_METHOD_THUNK 0x41 #define _PRECODE_VIRTUAL_IMPORT_THUNK 0x42 @@ -547,7 +547,7 @@ struct CORCOMPILE_CODE_MANAGER_ENTRY WORD padding; }; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) struct CORCOMPILE_VIRTUAL_IMPORT_THUNK { @@ -582,7 +582,7 @@ struct CORCOMPILE_CODE_MANAGER_ENTRY PCODE m_pTarget; }; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) struct CORCOMPILE_VIRTUAL_IMPORT_THUNK { // Array of words to do the following: @@ -871,11 +871,11 @@ struct CORCOMPILE_DEPENDENCY /*********************************************************************************/ // Flags used to encode HelperTable -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) #define HELPER_TABLE_ENTRY_LEN 16 #else #define HELPER_TABLE_ENTRY_LEN 8 -#endif //defined(_TARGET_ARM64_) +#endif //defined(TARGET_ARM64) #define HELPER_TABLE_ALIGN 8 #define CORCOMPILE_HELPER_PTR 0x80000000 // The entry is pointer to the helper (jump thunk otherwise) diff --git a/src/coreclr/src/inc/cordebuginfo.h b/src/coreclr/src/inc/cordebuginfo.h index fff6fbf8e8adf..32df25371c704 100644 --- a/src/coreclr/src/inc/cordebuginfo.h +++ b/src/coreclr/src/inc/cordebuginfo.h @@ -69,7 +69,7 @@ class ICorDebugInfo // contained in debug/inc/DbgIPCEvents.h. enum RegNum { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 REGNUM_EAX, REGNUM_ECX, REGNUM_EDX, @@ -78,7 +78,7 @@ class ICorDebugInfo REGNUM_EBP, REGNUM_ESI, REGNUM_EDI, -#elif _TARGET_ARM_ +#elif TARGET_ARM REGNUM_R0, REGNUM_R1, REGNUM_R2, @@ -95,7 +95,7 @@ class ICorDebugInfo REGNUM_SP, REGNUM_LR, REGNUM_PC, -#elif _TARGET_ARM64_ +#elif TARGET_ARM64 REGNUM_X0, REGNUM_X1, REGNUM_X2, @@ -129,7 +129,7 @@ class ICorDebugInfo REGNUM_LR, REGNUM_SP, REGNUM_PC, -#elif _TARGET_AMD64_ +#elif TARGET_AMD64 REGNUM_RAX, REGNUM_RCX, REGNUM_RDX, @@ -153,18 +153,18 @@ class ICorDebugInfo REGNUM_AMBIENT_SP, // ambient SP support. Ambient SP is the original SP in the non-BP based frame. // Ambient SP should not change even if there are push/pop operations in the method. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 REGNUM_FP = REGNUM_EBP, REGNUM_SP = REGNUM_ESP, -#elif _TARGET_AMD64_ +#elif TARGET_AMD64 REGNUM_SP = REGNUM_RSP, -#elif _TARGET_ARM_ +#elif TARGET_ARM #ifdef REDHAWK REGNUM_FP = REGNUM_R7, #else REGNUM_FP = REGNUM_R11, #endif //REDHAWK -#elif _TARGET_ARM64_ +#elif TARGET_ARM64 //Nothing to do here. FP is already alloted. #else // RegNum values should be properly defined for this platform diff --git a/src/coreclr/src/inc/corhdr.h b/src/coreclr/src/inc/corhdr.h index d08111f6136e0..abef978313a9e 100644 --- a/src/coreclr/src/inc/corhdr.h +++ b/src/coreclr/src/inc/corhdr.h @@ -21,7 +21,7 @@ #define FRAMEWORK_REGISTRY_KEY_W W("Software\\Microsoft\\.NETFramework") // keys for HKCU -#ifdef BIT64 +#ifdef HOST_64BIT #define USER_FRAMEWORK_REGISTRY_KEY "Software\\Microsoft\\.NETFramework64" #define USER_FRAMEWORK_REGISTRY_KEY_W W("Software\\Microsoft\\.NETFramework64") #else @@ -1176,9 +1176,9 @@ typedef struct IMAGE_COR_ILMETHOD_SECT_EH_FAT /***********************************/ typedef struct IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL { -#ifdef BIT64 +#ifdef HOST_64BIT unsigned Flags : 16; -#else // !BIT64 +#else // !HOST_64BIT CorExceptionFlag Flags : 16; #endif unsigned TryOffset : 16; diff --git a/src/coreclr/src/inc/corhlpr.cpp b/src/coreclr/src/inc/corhlpr.cpp index 666cbf7adc715..da6700c121bc3 100644 --- a/src/coreclr/src/inc/corhlpr.cpp +++ b/src/coreclr/src/inc/corhlpr.cpp @@ -46,7 +46,7 @@ void __stdcall DecoderInit(void *pThis, COR_ILMETHOD *header) } if (header->Fat.IsFat()) { -#ifdef BIT64 +#ifdef HOST_64BIT if((((size_t) header) & 3) == 0) // header is aligned #else _ASSERTE((((size_t) header) & 3) == 0); // header is aligned diff --git a/src/coreclr/src/inc/corhlpr.h b/src/coreclr/src/inc/corhlpr.h index 06f973d9ad0cc..0ef8fbf0e92fb 100644 --- a/src/coreclr/src/inc/corhlpr.h +++ b/src/coreclr/src/inc/corhlpr.h @@ -12,7 +12,7 @@ #ifndef __CORHLPR_H__ #define __CORHLPR_H__ -#if defined(_MSC_VER) && defined(_X86_) && !defined(FPO_ON) +#if defined(_MSC_VER) && defined(HOST_X86) && !defined(FPO_ON) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #define FPO_ON 1 #define CORHLPR_TURNED_FPO_ON 1 diff --git a/src/coreclr/src/inc/corhlprpriv.h b/src/coreclr/src/inc/corhlprpriv.h index 2d04d7d0c0e42..8fcafd08d93c3 100644 --- a/src/coreclr/src/inc/corhlprpriv.h +++ b/src/coreclr/src/inc/corhlprpriv.h @@ -14,7 +14,7 @@ #include "corhlpr.h" #include "fstring.h" -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // If routines don't get inlined, don't pay the EBP frame penalty #endif @@ -642,7 +642,7 @@ HRESULT _GetFixedSigOfVarArg( // S_OK or error. #endif //!SOS_INCLUDE -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // restore command line default optimizations #endif diff --git a/src/coreclr/src/inc/corhost.h b/src/coreclr/src/inc/corhost.h index 816651a76efa4..6de2b3ffb0aa0 100644 --- a/src/coreclr/src/inc/corhost.h +++ b/src/coreclr/src/inc/corhost.h @@ -79,9 +79,9 @@ class CrstStatic; class CorHost2 : public CorRuntimeHostBase -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX , public IPrivateManagedExceptionReporting /* This interface is for internal Watson testing only*/ -#endif // FEATURE_PAL +#endif // TARGET_UNIX , public ICLRRuntimeHost4 { friend struct _DacGlobals; @@ -102,10 +102,10 @@ class CorHost2 : // *** ICorRuntimeHost methods *** -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // defined in IPrivateManagedExceptionReporting interface for internal Watson testing only STDMETHODIMP GetBucketParametersForCurrentException(BucketParameters *pParams); -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Starts the runtime. This is equivalent to CoInitializeCor(). STDMETHODIMP Start(); diff --git a/src/coreclr/src/inc/corinfo.h b/src/coreclr/src/inc/corinfo.h index 43891ef834c3d..bf5d8b8ed1e62 100644 --- a/src/coreclr/src/inc/corinfo.h +++ b/src/coreclr/src/inc/corinfo.h @@ -1871,9 +1871,9 @@ struct CORINFO_String : public CORINFO_Object struct CORINFO_Array : public CORINFO_Object { unsigned length; -#ifdef BIT64 +#ifdef HOST_64BIT unsigned alignpad; -#endif // BIT64 +#endif // HOST_64BIT #if 0 /* Multi-dimensional arrays have the lengths and bounds here */ @@ -1897,9 +1897,9 @@ struct CORINFO_Array : public CORINFO_Object struct CORINFO_Array8 : public CORINFO_Object { unsigned length; -#ifdef BIT64 +#ifdef HOST_64BIT unsigned alignpad; -#endif // BIT64 +#endif // HOST_64BIT union { @@ -1914,9 +1914,9 @@ struct CORINFO_Array8 : public CORINFO_Object struct CORINFO_RefArray : public CORINFO_Object { unsigned length; -#ifdef BIT64 +#ifdef HOST_64BIT unsigned alignpad; -#endif // BIT64 +#endif // HOST_64BIT #if 0 /* Multi-dimensional arrays have the lengths and bounds here */ @@ -1945,7 +1945,7 @@ struct CORINFO_VarArgInfo #define SIZEOF__CORINFO_Object TARGET_POINTER_SIZE /* methTable */ #define OFFSETOF__CORINFO_Array__length SIZEOF__CORINFO_Object -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define OFFSETOF__CORINFO_Array__data (OFFSETOF__CORINFO_Array__length + sizeof(unsigned __int32) /* length */ + sizeof(unsigned __int32) /* alignpad */) #else #define OFFSETOF__CORINFO_Array__data (OFFSETOF__CORINFO_Array__length + sizeof(unsigned __int32) /* length */) diff --git a/src/coreclr/src/inc/corjitflags.h b/src/coreclr/src/inc/corjitflags.h index 65c089d577fbf..ab89219a55a8f 100644 --- a/src/coreclr/src/inc/corjitflags.h +++ b/src/coreclr/src/inc/corjitflags.h @@ -34,7 +34,7 @@ class CORJIT_FLAGS CORJIT_FLAG_GCPOLL_CALLS = 6, // Emit calls to JIT_POLLGC for thread suspension. CORJIT_FLAG_MCJIT_BACKGROUND = 7, // Calling from multicore JIT background thread, do not call JitComplete - #if defined(_TARGET_X86_) + #if defined(TARGET_X86) CORJIT_FLAG_PINVOKE_RESTORE_ESP = 8, // Restore ESP after returning from inlined PInvoke CORJIT_FLAG_TARGET_P4 = 9, @@ -42,7 +42,7 @@ class CORJIT_FLAGS CORJIT_FLAG_USE_CMOV = 11, // Generated code may use cmov instruction CORJIT_FLAG_USE_SSE2 = 12, // Generated code may use SSE-2 instructions - #else // !defined(_TARGET_X86_) + #else // !defined(TARGET_X86) CORJIT_FLAG_UNUSED1 = 8, CORJIT_FLAG_UNUSED2 = 9, @@ -50,29 +50,29 @@ class CORJIT_FLAGS CORJIT_FLAG_UNUSED4 = 11, CORJIT_FLAG_UNUSED5 = 12, - #endif // !defined(_TARGET_X86_) + #endif // !defined(TARGET_X86) CORJIT_FLAG_UNUSED6 = 13, - #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) + #if defined(TARGET_X86) || defined(TARGET_AMD64) CORJIT_FLAG_USE_AVX = 14, CORJIT_FLAG_USE_AVX2 = 15, CORJIT_FLAG_USE_AVX_512 = 16, - #else // !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) + #else // !defined(TARGET_X86) && !defined(TARGET_AMD64) CORJIT_FLAG_UNUSED7 = 14, CORJIT_FLAG_UNUSED8 = 15, CORJIT_FLAG_UNUSED9 = 16, - #endif // !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) + #endif // !defined(TARGET_X86) && !defined(TARGET_AMD64) - #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) + #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) CORJIT_FLAG_FEATURE_SIMD = 17, #else CORJIT_FLAG_UNUSED10 = 17, - #endif // !(defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) + #endif // !(defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64)) CORJIT_FLAG_MAKEFINALCODE = 18, // Use the final code generator, i.e., not the interpreter. CORJIT_FLAG_READYTORUN = 19, // Use version-resilient code generation @@ -98,15 +98,15 @@ class CORJIT_FLAGS CORJIT_FLAG_TIER0 = 39, // This is the initial tier for tiered compilation which should generate code as quickly as possible CORJIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) CORJIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records -#else // !defined(_TARGET_ARM_) +#else // !defined(TARGET_ARM) CORJIT_FLAG_UNUSED11 = 41, -#endif // !defined(_TARGET_ARM_) +#endif // !defined(TARGET_ARM) CORJIT_FLAG_NO_INLINING = 42, // JIT should not inline any called method into this method -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) CORJIT_FLAG_HAS_ARM64_AES = 43, // ID_AA64ISAR0_EL1.AES is 1 or better CORJIT_FLAG_HAS_ARM64_ATOMICS = 44, // ID_AA64ISAR0_EL1.Atomic is 2 or better @@ -130,7 +130,7 @@ class CORJIT_FLAGS CORJIT_FLAG_HAS_ARM64_SM4 = 62, // ID_AA64ISAR0_EL1.SM4 is 1 or better CORJIT_FLAG_HAS_ARM64_SVE = 63 // ID_AA64PFR0_EL1.SVE is 1 or better -#elif defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#elif defined(TARGET_X86) || defined(TARGET_AMD64) CORJIT_FLAG_USE_SSE3 = 43, CORJIT_FLAG_USE_SSSE3 = 44, @@ -155,7 +155,7 @@ class CORJIT_FLAGS CORJIT_FLAG_UNUSED32 = 63 -#else // !defined(_TARGET_ARM64_) &&!defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) +#else // !defined(TARGET_ARM64) &&!defined(TARGET_X86) && !defined(TARGET_AMD64) CORJIT_FLAG_UNUSED12 = 43, CORJIT_FLAG_UNUSED13 = 44, @@ -179,7 +179,7 @@ class CORJIT_FLAGS CORJIT_FLAG_UNUSED31 = 62, CORJIT_FLAG_UNUSED32 = 63 -#endif // !defined(_TARGET_ARM64_) &&!defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) +#endif // !defined(TARGET_ARM64) &&!defined(TARGET_X86) && !defined(TARGET_AMD64) }; CORJIT_FLAGS() diff --git a/src/coreclr/src/inc/crosscomp.h b/src/coreclr/src/inc/crosscomp.h index 388127de3b61b..ae1824555cdc3 100644 --- a/src/coreclr/src/inc/crosscomp.h +++ b/src/coreclr/src/inc/crosscomp.h @@ -8,11 +8,11 @@ #pragma once -#if (!defined(BIT64) && defined(_TARGET_64BIT_)) || (defined(BIT64) && !defined(_TARGET_64BIT_)) +#if (!defined(HOST_64BIT) && defined(TARGET_64BIT)) || (defined(HOST_64BIT) && !defined(TARGET_64BIT)) #define CROSSBITNESS_COMPILE #endif -#if !defined(_ARM_) && defined(_TARGET_ARM_) // Non-ARM Host managing ARM related code +#if !defined(HOST_ARM) && defined(TARGET_ARM) // Non-ARM Host managing ARM related code #ifndef CROSS_COMPILE #define CROSS_COMPILE @@ -92,8 +92,8 @@ typedef struct DECLSPEC_ALIGN(8) _T_CONTEXT { // each frame function. // -#ifndef FEATURE_PAL -#ifdef _X86_ +#ifndef TARGET_UNIX +#ifdef HOST_X86 typedef struct _RUNTIME_FUNCTION { DWORD BeginAddress; DWORD UnwindData; @@ -120,8 +120,8 @@ typedef struct _UNWIND_HISTORY_TABLE { DWORD HighAddress; UNWIND_HISTORY_TABLE_ENTRY Entry[UNWIND_HISTORY_TABLE_SIZE]; } UNWIND_HISTORY_TABLE, *PUNWIND_HISTORY_TABLE; -#endif // _X86_ -#endif // !FEATURE_PAL +#endif // HOST_X86 +#endif // !TARGET_UNIX // @@ -177,7 +177,7 @@ typedef struct _T_DISPATCHER_CONTEXT { PUCHAR NonVolatileRegisters; } T_DISPATCHER_CONTEXT, *PT_DISPATCHER_CONTEXT; -#if defined(FEATURE_PAL) || defined(_X86_) +#if defined(TARGET_UNIX) || defined(HOST_X86) #define T_RUNTIME_FUNCTION RUNTIME_FUNCTION #define PT_RUNTIME_FUNCTION PRUNTIME_FUNCTION #else @@ -187,7 +187,7 @@ typedef struct _T_RUNTIME_FUNCTION { } T_RUNTIME_FUNCTION, *PT_RUNTIME_FUNCTION; #endif -#elif defined(_AMD64_) && defined(_TARGET_ARM64_) // Host amd64 managing ARM64 related code +#elif defined(HOST_AMD64) && defined(TARGET_ARM64) // Host amd64 managing ARM64 related code #ifndef CROSS_COMPILE #define CROSS_COMPILE diff --git a/src/coreclr/src/inc/crtwrap.h b/src/coreclr/src/inc/crtwrap.h index aca2437cc7ada..f5671642ee274 100644 --- a/src/coreclr/src/inc/crtwrap.h +++ b/src/coreclr/src/inc/crtwrap.h @@ -21,7 +21,7 @@ #include #include -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // CoreCLR.dll uses linker .def files to control the exported symbols. // Define DLLEXPORT macro as empty on Windows. #define DLLEXPORT diff --git a/src/coreclr/src/inc/daccess.h b/src/coreclr/src/inc/daccess.h index 33c978868a82d..3f3ed44de31c4 100644 --- a/src/coreclr/src/inc/daccess.h +++ b/src/coreclr/src/inc/daccess.h @@ -599,10 +599,10 @@ typedef SIZE_T TSIZE_T; typedef struct _DacGlobals { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX static void Initialize(); void InitializeEntries(TADDR baseAddress); -#endif // FEATURE_PAL +#endif // TARGET_UNIX // These will define all of the dac related mscorwks static and global variables #define DEFINE_DACVAR(id_type, size, id, var) id_type id; @@ -614,9 +614,9 @@ typedef struct _DacGlobals ULONG fn__DACNotifyCompilationFinished; ULONG fn__ThePreStub; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM ULONG fn__ThePreStubCompactARM; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM ULONG fn__ThePreStubPatchLabel; ULONG fn__PrecodeFixupThunk; @@ -785,10 +785,10 @@ struct _KNONVOLATILE_CONTEXT_POINTERS; BOOL DacUnwindStackFrame(T_CONTEXT * pContext, T_KNONVOLATILE_CONTEXT_POINTERS* pContextPointers); #endif // FEATURE_EH_FUNCLETS -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) // call back through data target to unwind out-of-process HRESULT DacVirtualUnwind(ULONG32 threadId, PT_CONTEXT context, PT_KNONVOLATILE_CONTEXT_POINTERS contextPointers); -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS class SString; @@ -1019,7 +1019,7 @@ class __DPtrBase : public __TPtrBase { return DPtrType(DacTAddrOffset(m_addr, val, sizeof(type))); } -#if defined (BIT64) +#if defined (HOST_64BIT) DPtrType operator+(unsigned int val) { return DPtrType(DacTAddrOffset(m_addr, val, sizeof(type))); @@ -1063,7 +1063,7 @@ class __DPtrBase : public __TPtrBase { return DPtrType(m_addr - val * sizeof(type)); } -#ifdef BIT64 +#ifdef HOST_64BIT DPtrType operator-(unsigned int val) { return DPtrType(m_addr - val * sizeof(type)); @@ -2113,7 +2113,7 @@ typedef const void* PTR_CVOID; #define S16PTR(type) type* #define S16PTRMAX(type, maxChars) type* -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) #define VPTR_VTABLE_CLASS(name, base) \ friend struct _DacGlobals; \ @@ -2147,7 +2147,7 @@ public: name(int dummy) : base(dummy) {} VPTR_ABSTRACT_VTABLE_CLASS(name, base) \ name() : base() {} -#else // FEATURE_PAL +#else // TARGET_UNIX #define VPTR_VTABLE_CLASS(name, base) #define VPTR_VTABLE_CLASS_AND_CTOR(name, base) @@ -2158,7 +2158,7 @@ public: name(int dummy) : base(dummy) {} #define VPTR_ABSTRACT_VTABLE_CLASS(name, base) #define VPTR_ABSTRACT_VTABLE_CLASS_AND_CTOR(name, base) -#endif // FEATURE_PAL +#endif // TARGET_UNIX // helper macro to make the vtables unique for DAC #define VPTR_UNIQUE(unique) virtual int MakeVTableUniqueForDAC() { return unique; } @@ -2392,19 +2392,19 @@ typedef DPTR(IMAGE_TLS_DIRECTORY) PTR_IMAGE_TLS_DIRECTORY; #include #endif -#if defined(_TARGET_X86_) && defined(FEATURE_PAL) +#if defined(TARGET_X86) && defined(TARGET_UNIX) typedef DPTR(struct _UNWIND_INFO) PTR_UNWIND_INFO; #endif -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT typedef DPTR(T_RUNTIME_FUNCTION) PTR_RUNTIME_FUNCTION; typedef DPTR(struct _UNWIND_INFO) PTR_UNWIND_INFO; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) typedef DPTR(union _UNWIND_CODE) PTR_UNWIND_CODE; -#endif // _TARGET_AMD64_ -#endif // _TARGET_64BIT_ +#endif // TARGET_AMD64 +#endif // TARGET_64BIT -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM typedef DPTR(T_RUNTIME_FUNCTION) PTR_RUNTIME_FUNCTION; #endif diff --git a/src/coreclr/src/inc/dacprivate.h b/src/coreclr/src/inc/dacprivate.h index 5ac27d8f4a9cf..eec8a372e434c 100644 --- a/src/coreclr/src/inc/dacprivate.h +++ b/src/coreclr/src/inc/dacprivate.h @@ -15,10 +15,10 @@ #include #include -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // It is unfortunate having to include this header just to get the definition of GenericModeBlock #include -#endif // FEATURE_PAL +#endif // TARGET_UNIX // // Whenever a structure is marshalled between different platforms, we need to ensure the @@ -37,7 +37,7 @@ // want to go changing the layout of, for example, structures defined in OS header files // so we explicitly opt-in with this attribute. // -#if defined(__GNUC__) && defined(_X86_) +#if defined(__GNUC__) && defined(HOST_X86) #define MSLAYOUT __attribute__((__ms_struct__)) #else #define MSLAYOUT diff --git a/src/coreclr/src/inc/dacvars.h b/src/coreclr/src/inc/dacvars.h index 0f1cac9ef6eed..7e9c2ea608d6a 100644 --- a/src/coreclr/src/inc/dacvars.h +++ b/src/coreclr/src/inc/dacvars.h @@ -226,10 +226,10 @@ DEFINE_DACVAR(ULONG, UNKNOWN_POINTER_TYPE, dac__g_pRCWCleanupList, ::g_pRCWClean DEFINE_DACVAR(ULONG, BOOL, RCWWalker__s_bIsGlobalPeggingOn, RCWWalker::s_bIsGlobalPeggingOn) #endif // FEATURE_COMINTEROP -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DEFINE_DACVAR(ULONG, SIZE_T, dac__g_runtimeLoadedBaseAddress, ::g_runtimeLoadedBaseAddress) DEFINE_DACVAR(ULONG, SIZE_T, dac__g_runtimeVirtualSize, ::g_runtimeVirtualSize) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX DEFINE_DACVAR(ULONG, SyncBlockCache *, SyncBlockCache__s_pSyncBlockCache, SyncBlockCache::s_pSyncBlockCache) @@ -239,9 +239,9 @@ DEFINE_DACVAR(ULONG, SIZE_T, dac__s_gsCookie, ::s_gsCookie) DEFINE_DACVAR_NO_DUMP(ULONG, SIZE_T, dac__g_FCDynamicallyAssignedImplementations, ::g_FCDynamicallyAssignedImplementations) -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DEFINE_DACVAR(ULONG, HANDLE, dac__g_hContinueStartupEvent, ::g_hContinueStartupEvent) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX DEFINE_DACVAR(ULONG, DWORD, CorHost2__m_dwStartupFlags, CorHost2::m_dwStartupFlags) DEFINE_DACVAR(ULONG, HRESULT, dac__g_hrFatalError, ::g_hrFatalError) diff --git a/src/coreclr/src/inc/debugmacros.h b/src/coreclr/src/inc/debugmacros.h index 377f781e4cd4e..14cba426f148a 100644 --- a/src/coreclr/src/inc/debugmacros.h +++ b/src/coreclr/src/inc/debugmacros.h @@ -201,7 +201,7 @@ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); goto LA #define _ASSERT _ASSERTE -#if defined(_DEBUG) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && !defined(TARGET_UNIX) // This function returns the EXE time stamp (effectively a random number) // Under retail it always returns 0. This is meant to be used in the diff --git a/src/coreclr/src/inc/eetwain.h b/src/coreclr/src/inc/eetwain.h index 504d320bdb132..f0083fb9079f5 100644 --- a/src/coreclr/src/inc/eetwain.h +++ b/src/coreclr/src/inc/eetwain.h @@ -32,11 +32,11 @@ #include "bitvector.h" #include "gcinfotypes.h" -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) #define USE_GC_INFO_DECODER #endif -#if (defined(_TARGET_X86_) && !defined(FEATURE_PAL)) || defined(_TARGET_AMD64_) +#if (defined(TARGET_X86) && !defined(TARGET_UNIX)) || defined(TARGET_AMD64) #define HAS_QUICKUNWIND #endif @@ -173,7 +173,7 @@ virtual void FixContext(ContextType ctxType, #endif // !FEATURE_EH_FUNCLETS #endif // #ifndef DACCESS_COMPILE -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* Gets the ambient stack pointer value at the given nesting level within the method. @@ -183,7 +183,7 @@ virtual TADDR GetAmbientSP(PREGDISPLAY pContext, DWORD dwRelOffset, DWORD nestingLevel, CodeManState *pState) = 0; -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* Get the number of bytes used for stack parameters. @@ -214,11 +214,11 @@ virtual bool UnwindStackFrame(PREGDISPLAY pContext, virtual bool IsGcSafe(EECodeInfo *pCodeInfo, DWORD dwRelOffset) = 0; -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) virtual bool HasTailCalls(EECodeInfo *pCodeInfo) = 0; -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 -#if defined(_TARGET_AMD64_) && defined(_DEBUG) +#if defined(TARGET_AMD64) && defined(_DEBUG) /* Locates the end of the last interruptible region in the given code range. Returns 0 if the entire range is uninterruptible. Returns the end point @@ -227,7 +227,7 @@ virtual bool HasTailCalls(EECodeInfo *pCodeInfo) = 0; virtual unsigned FindEndOfLastInterruptibleRegion(unsigned curOffset, unsigned endOffset, GCInfoToken gcInfoToken) = 0; -#endif // _TARGET_AMD64_ && _DEBUG +#endif // TARGET_AMD64 && _DEBUG #ifndef CROSSGEN_COMPILE /* @@ -409,7 +409,7 @@ void FixContext(ContextType ctxType, #endif // !FEATURE_EH_FUNCLETS #endif // #ifndef DACCESS_COMPILE -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* Gets the ambient stack pointer value at the given nesting level within the method. @@ -420,7 +420,7 @@ TADDR GetAmbientSP(PREGDISPLAY pContext, DWORD dwRelOffset, DWORD nestingLevel, CodeManState *pState); -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* Get the number of bytes used for stack parameters. @@ -474,12 +474,12 @@ virtual bool IsGcSafe( EECodeInfo *pCodeInfo, DWORD dwRelOffset); -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) virtual bool HasTailCalls(EECodeInfo *pCodeInfo); -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 -#if defined(_TARGET_AMD64_) && defined(_DEBUG) +#if defined(TARGET_AMD64) && defined(_DEBUG) /* Locates the end of the last interruptible region in the given code range. Returns 0 if the entire range is uninterruptible. Returns the end point @@ -489,7 +489,7 @@ virtual unsigned FindEndOfLastInterruptibleRegion(unsigned curOffset, unsigned endOffset, GCInfoToken gcInfoToken); -#endif // _TARGET_AMD64_ && _DEBUG +#endif // TARGET_AMD64 && _DEBUG #ifndef CROSSGEN_COMPILE /* @@ -518,7 +518,7 @@ bool EnumGcRefsConservative(PREGDISPLAY pRD, LPVOID hCallBack); #endif // FEATURE_CONSERVATIVE_GC -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* Return the address of the local security object reference using data that was previously cached before in UnwindStackFrame @@ -527,7 +527,7 @@ bool EnumGcRefsConservative(PREGDISPLAY pRD, static OBJECTREF* GetAddrOfSecurityObjectFromCachedInfo( PREGDISPLAY pRD, StackwalkCacheUnwindInfo * stackwalkCacheUnwindInfo); -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) virtual @@ -657,9 +657,9 @@ HRESULT FixContextForEnC(PCONTEXT pCtx, #ifdef FEATURE_EH_FUNCLETS static void EnsureCallerContextIsValid( PREGDISPLAY pRD, StackwalkCacheEntry* pCacheEntry, EECodeInfo * pCodeInfo = NULL ); static size_t GetCallerSp( PREGDISPLAY pRD ); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 static size_t GetResumeSp( PCONTEXT pContext ); -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // FEATURE_EH_FUNCLETS #ifdef DACCESS_COMPILE @@ -668,7 +668,7 @@ HRESULT FixContextForEnC(PCONTEXT pCtx, }; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 bool UnwindStackFrame(PREGDISPLAY pContext, EECodeInfo *pCodeInfo, unsigned flags, diff --git a/src/coreclr/src/inc/eexcp.h b/src/coreclr/src/inc/eexcp.h index e76426652b5d2..d4052e11ef567 100644 --- a/src/coreclr/src/inc/eexcp.h +++ b/src/coreclr/src/inc/eexcp.h @@ -128,7 +128,7 @@ inline BOOL IsDuplicateClause(EE_ILEXCEPTION_CLAUSE* pEHClause) return pEHClause->Flags & COR_ILEXCEPTION_CLAUSE_DUPLICATED; } -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Finally is the only EH construct that can be part of the execution as being fall-through. // // "Cloned" finally is a contruct that represents a finally block that is used as @@ -150,7 +150,7 @@ inline BOOL IsClonedFinally(EE_ILEXCEPTION_CLAUSE* pEHClause) (pEHClause->TryStartPC == pEHClause->HandlerStartPC) && IsFinally(pEHClause) && IsDuplicateClause(pEHClause)); } -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #endif // __eexcp_h__ diff --git a/src/coreclr/src/inc/eventtracebase.h b/src/coreclr/src/inc/eventtracebase.h index d9fee25768868..b8f50229bed62 100644 --- a/src/coreclr/src/inc/eventtracebase.h +++ b/src/coreclr/src/inc/eventtracebase.h @@ -75,7 +75,7 @@ enum EtwThreadFlags #define EVENT_PIPE_ENABLED() (FALSE) #endif -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) // // Use this macro at the least before calling the Event Macros @@ -105,7 +105,7 @@ enum EtwThreadFlags ((ProviderSymbol##_Context.IsEnabled) || EVENT_PIPE_ENABLED()) -#else //!defined(FEATURE_PAL) +#else //!defined(TARGET_UNIX) #if defined(FEATURE_PERFTRACING) #define ETW_INLINE #define ETWOnStartup(StartEventName, EndEventName) @@ -132,7 +132,7 @@ enum EtwThreadFlags #define ETW_TRACING_CATEGORY_ENABLED(Context, Level, Keyword) (ETW_CATEGORY_ENABLED(Context, Level, Keyword)) #define ETW_PROVIDER_ENABLED(ProviderSymbol) (XplatEventLogger::IsProviderEnabled(Context)) #endif // defined(FEATURE_PERFTRACING) -#endif // !defined(FEATURE_PAL) +#endif // !defined(TARGET_UNIX) #else // FEATURE_EVENT_TRACE @@ -170,7 +170,7 @@ struct ProfilerWalkHeapContext #ifdef FEATURE_EVENT_TRACE class Object; -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) /***************************************/ /* Tracing levels supported by CLR ETW */ @@ -214,7 +214,7 @@ struct ProfilingScanContext; #include #include #endif //!FEATURE_REDHAWK -#endif //!defined(FEATURE_PAL) +#endif //!defined(TARGET_UNIX) #else // FEATURE_EVENT_TRACE @@ -231,7 +231,7 @@ struct ProfilingScanContext; extern UINT32 g_nClrInstanceId; #define GetClrInstanceId() (static_cast(g_nClrInstanceId)) -#if defined(FEATURE_PAL) && (defined(FEATURE_EVENT_TRACE) || defined(FEATURE_EVENTSOURCE_XPLAT)) +#if defined(TARGET_UNIX) && (defined(FEATURE_EVENT_TRACE) || defined(FEATURE_EVENTSOURCE_XPLAT)) #define KEYWORDZERO 0x0 /***************************************/ @@ -503,7 +503,7 @@ class XplatEventLogger }; -#endif // defined(FEATURE_PAL) && (defined(FEATURE_EVENT_TRACE) || defined(FEATURE_EVENTSOURCE_XPLAT)) +#endif // defined(TARGET_UNIX) && (defined(FEATURE_EVENT_TRACE) || defined(FEATURE_EVENTSOURCE_XPLAT)) #if defined(FEATURE_EVENT_TRACE) @@ -545,7 +545,7 @@ VOID EventPipeEtwCallbackDotNETRuntimePrivate( _In_opt_ EventFilterDescriptor* FilterData, _Inout_opt_ PVOID CallbackContext); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Callback and stack support #if !defined(DONOT_DEFINE_ETW_CALLBACK) && !defined(DACCESS_COMPILE) extern "C" { @@ -595,7 +595,7 @@ extern "C" { EtwCallout(RegHandle, Descriptor, NumberOfArguments, EventData) #endif //!DONOT_DEFINE_ETW_CALLBACK && !DACCESS_COMPILE -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX #include "clretwallmain.h" #if defined(FEATURE_PERFTRACING) @@ -653,7 +653,7 @@ class Thread; namespace ETW { // Class to wrap the ETW infrastructure logic -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) class CEtwTracer { #if defined(FEATURE_EVENT_TRACE) @@ -678,7 +678,7 @@ namespace ETW } #endif // FEATURE_EVENT_TRACE }; -#endif // !defined(FEATURE_PAL) +#endif // !defined(TARGET_UNIX) class LoaderLog; class MethodLog; @@ -749,7 +749,7 @@ namespace ETW class SamplingLog { -#if defined(FEATURE_EVENT_TRACE) && !defined(FEATURE_PAL) +#if defined(FEATURE_EVENT_TRACE) && !defined(TARGET_UNIX) public: typedef enum _EtwStackWalkStatus { @@ -766,7 +766,7 @@ namespace ETW public: static ULONG SendStackTrace(MCGEN_TRACE_CONTEXT TraceContext, PCEVENT_DESCRIPTOR Descriptor, LPCGUID EventGuid); EtwStackWalkStatus GetCurrentThreadsCallStack(UINT32 *frameCount, PVOID **Stack); -#endif // FEATURE_EVENT_TRACE && !defined(FEATURE_PAL) +#endif // FEATURE_EVENT_TRACE && !defined(TARGET_UNIX) }; // Class to wrap all Loader logic for ETW @@ -1246,7 +1246,7 @@ namespace ETW #define ETWLoaderStaticLoad 0 // Static reference load #define ETWLoaderDynamicLoad 1 // Dynamic assembly load -#if defined(FEATURE_EVENT_TRACE) && !defined(FEATURE_PAL) +#if defined(FEATURE_EVENT_TRACE) && !defined(TARGET_UNIX) // // The ONE and only ONE global instantiation of this class // @@ -1413,18 +1413,18 @@ typedef struct _MCGEN_TRACE_BUFFER { return Result; }; -#endif // FEATURE_EVENT_TRACE && !defined(FEATURE_PAL) +#endif // FEATURE_EVENT_TRACE && !defined(TARGET_UNIX) #ifdef FEATURE_EVENT_TRACE -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 struct CallStackFrame { struct CallStackFrame* m_Next; SIZE_T m_ReturnAddress; }; -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // FEATURE_EVENT_TRACE -#if defined(FEATURE_EVENT_TRACE) && !defined(FEATURE_PAL) +#if defined(FEATURE_EVENT_TRACE) && !defined(TARGET_UNIX) FORCEINLINE BOOLEAN __stdcall McGenEventProviderEnabled( @@ -1459,7 +1459,7 @@ McGenEventProviderEnabled( } return FALSE; } -#endif // FEATURE_EVENT_TRACE && !defined(FEATURE_PAL) +#endif // FEATURE_EVENT_TRACE && !defined(TARGET_UNIX) #endif // !FEATURE_REDHAWK diff --git a/src/coreclr/src/inc/ex.h b/src/coreclr/src/inc/ex.h index 755434c7ebe65..548fa2200dcc3 100644 --- a/src/coreclr/src/inc/ex.h +++ b/src/coreclr/src/inc/ex.h @@ -6,15 +6,15 @@ #if !defined(_EX_H_) #define _EX_H_ -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #define EX_TRY_HOLDER \ HardwareExceptionHolder \ NativeExceptionHolderCatchAll __exceptionHolder; \ __exceptionHolder.Push(); \ -#else // FEATURE_PAL +#else // TARGET_UNIX #define EX_TRY_HOLDER -#endif // FEATURE_PAL +#endif // TARGET_UNIX #include "sstring.h" #include "crtwrap.h" diff --git a/src/coreclr/src/inc/formattype.cpp b/src/coreclr/src/inc/formattype.cpp index 037193673d5bf..7384d258067d8 100644 --- a/src/coreclr/src/inc/formattype.cpp +++ b/src/coreclr/src/inc/formattype.cpp @@ -217,11 +217,11 @@ PCCOR_SIGNATURE PrettyPrintSignature( label[strlen(label)-1] = 0; if(label[0] == '@') // it's pointer! { -#ifdef BIT64 +#ifdef HOST_64BIT pszArgName = (ParamDescriptor*)_atoi64(&label[1]); -#else // !BIT64 +#else // !HOST_64BIT pszArgName = (ParamDescriptor*)(size_t)atoi(&label[1]); -#endif // BIT64 +#endif // HOST_64BIT } } diff --git a/src/coreclr/src/inc/gcdecoder.cpp b/src/coreclr/src/inc/gcdecoder.cpp index 850d2dda39920..93cd1b3e0697a 100644 --- a/src/coreclr/src/inc/gcdecoder.cpp +++ b/src/coreclr/src/inc/gcdecoder.cpp @@ -19,7 +19,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // See: https://github.com/dotnet/diagnostics/blob/master/src/inc/gcdecoder.cpp // ****************************************************************************** -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* This file is shared between the VM and JIT/IL and SOS/Strike directories */ @@ -621,4 +621,4 @@ const unsigned callPatternTable[80] = { // # of occurences 0x07000300, // 1684 }; -#endif // _TARGET_X86_ +#endif // TARGET_X86 diff --git a/src/coreclr/src/inc/gcdump.h b/src/coreclr/src/inc/gcdump.h index 7e9ca49548eac..7fada85f1224c 100644 --- a/src/coreclr/src/inc/gcdump.h +++ b/src/coreclr/src/inc/gcdump.h @@ -20,7 +20,7 @@ #include "gcinfotypes.h" // For InfoHdr #ifndef FASTCALL -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define FASTCALL __fastcall #else #define FASTCALL @@ -37,7 +37,7 @@ class GCDump unsigned maxEncBytes = 5, bool dumpCodeOffs = true); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /*------------------------------------------------------------------------- * Dumps the InfoHdr to 'stdout' * table : Start of the GC info block @@ -59,7 +59,7 @@ class GCDump */ size_t FASTCALL DumpGCTable (PTR_CBYTE gcInfoBlock, -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 const InfoHdr& header, #endif unsigned methodSize, diff --git a/src/coreclr/src/inc/gcinfo.h b/src/coreclr/src/inc/gcinfo.h index 1569b2b75d1c2..3aff57c4a100b 100644 --- a/src/coreclr/src/inc/gcinfo.h +++ b/src/coreclr/src/inc/gcinfo.h @@ -31,7 +31,7 @@ const unsigned OFFSET_MASK = 0x3; // mask to access the low 2 bits // const unsigned byref_OFFSET_FLAG = 0x1; // the offset is an interior ptr const unsigned pinned_OFFSET_FLAG = 0x2; // the offset is a pinned ptr -#if !defined(_TARGET_X86_) || !defined(FEATURE_EH_FUNCLETS) +#if !defined(TARGET_X86) || !defined(FEATURE_EH_FUNCLETS) const unsigned this_OFFSET_FLAG = 0x2; // the offset is "this" #endif diff --git a/src/coreclr/src/inc/gcinfodecoder.h b/src/coreclr/src/inc/gcinfodecoder.h index fee5942b31b21..8f63cf23715d9 100644 --- a/src/coreclr/src/inc/gcinfodecoder.h +++ b/src/coreclr/src/inc/gcinfodecoder.h @@ -20,7 +20,7 @@ #define _max(a, b) (((a) > (b)) ? (a) : (b)) #define _min(a, b) (((a) < (b)) ? (a) : (b)) -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) #define USE_GC_INFO_DECODER #endif @@ -70,11 +70,11 @@ inline void SetIP(T_CONTEXT* context, PCODE rip) inline TADDR GetSP(T_CONTEXT* context) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return (TADDR)context->Rsp; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return (TADDR)context->Sp; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return (TADDR)context->Sp; #else _ASSERTE(!"nyi for platform"); @@ -83,11 +83,11 @@ inline TADDR GetSP(T_CONTEXT* context) inline PCODE GetIP(T_CONTEXT* context) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return (PCODE) context->Rip; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return (PCODE)context->Pc; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return (PCODE)context->Pc; #else _ASSERTE(!"nyi for platform"); @@ -203,9 +203,9 @@ enum GcInfoDecoderFlags DECODE_EDIT_AND_CONTINUE = 0x800, DECODE_REVERSE_PINVOKE_VAR = 0x1000, DECODE_RETURN_KIND = 0x2000, -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) DECODE_HAS_TAILCALLS = 0x4000, -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 }; enum GcInfoHeaderFlags @@ -220,11 +220,11 @@ enum GcInfoHeaderFlags GC_INFO_HAS_GENERICS_INST_CONTEXT_MD = 0x20, GC_INFO_HAS_GENERICS_INST_CONTEXT_THIS = 0x30, GC_INFO_HAS_STACK_BASE_REGISTER = 0x40, -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 GC_INFO_WANTS_REPORT_ONLY_LEAF = 0x80, -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) GC_INFO_HAS_TAILCALLS = 0x80, -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 GC_INFO_HAS_EDIT_AND_CONTINUE_PRESERVED_SLOTS = 0x100, GC_INFO_REVERSE_PINVOKE_FRAME = 0x200, @@ -527,9 +527,9 @@ class GcInfoDecoder bool HasMethodTableGenericsInstContext(); bool GetIsVarArg(); bool WantsReportOnlyLeaf(); -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) bool HasTailCalls(); -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 ReturnKind GetReturnKind(); UINT32 GetCodeLength(); UINT32 GetStackBaseRegister(); @@ -550,11 +550,11 @@ class GcInfoDecoder bool m_IsVarArg; bool m_GenericSecretParamIsMD; bool m_GenericSecretParamIsMT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 bool m_WantsReportOnlyLeaf; -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) bool m_HasTailCalls; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 INT32 m_SecurityObjectStackSlot; INT32 m_GSCookieStackSlot; INT32 m_ReversePInvokeFrameStackSlot; @@ -590,12 +590,12 @@ class GcInfoDecoder PREGDISPLAY pRD ); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX OBJECTREF* GetCapturedRegister( int regNum, PREGDISPLAY pRD ); -#endif // FEATURE_PAL +#endif // TARGET_UNIX OBJECTREF* GetStackSlot( INT32 spOffset, diff --git a/src/coreclr/src/inc/gcinfoencoder.h b/src/coreclr/src/inc/gcinfoencoder.h index 119f622378076..1df36dbc8ba56 100644 --- a/src/coreclr/src/inc/gcinfoencoder.h +++ b/src/coreclr/src/inc/gcinfoencoder.h @@ -436,14 +436,14 @@ class GcInfoEncoder // Number of slots preserved during EnC remap void SetSizeOfEditAndContinuePreservedArea( UINT32 size ); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Used to only report a frame once for the leaf function/funclet // instead of once for each live function/funclet on the stack. // Called only by RyuJIT (not JIT64) void SetWantsReportOnlyLeaf(); -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) void SetHasTailCalls(); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA void SetSizeOfStackOutgoingAndScratchArea( UINT32 size ); @@ -495,11 +495,11 @@ class GcInfoEncoder GcInfoArrayList m_LifetimeTransitions; bool m_IsVarArg; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) bool m_WantsReportOnlyLeaf; -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) bool m_HasTailCalls; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 INT32 m_SecurityObjectStackSlot; INT32 m_GSCookieStackSlot; UINT32 m_GSCookieValidRangeStart; diff --git a/src/coreclr/src/inc/gcinfotypes.h b/src/coreclr/src/inc/gcinfotypes.h index 5be3d14d8f76b..91c577ce31c48 100644 --- a/src/coreclr/src/inc/gcinfotypes.h +++ b/src/coreclr/src/inc/gcinfotypes.h @@ -143,21 +143,21 @@ struct GcStackSlot // RT_Unset should have a valid encoding, whose bits are actually stored in the image. // For X86, there are no free bits, and there's no RT_Unused enumeration. -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // 00 RT_Scalar // 01 RT_Object // 10 RT_ByRef // 11 RT_Float -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // 00 RT_Scalar // 01 RT_Object // 10 RT_ByRef // 11 RT_Unset -#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) // Slim Header: @@ -193,11 +193,11 @@ enum ReturnKind { RT_Object = 1, RT_ByRef = 2, -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 RT_Float = 3, // Encoding 3 means RT_Float on X86 #else RT_Unset = 3, // RT_Unset on other platforms -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Cases for Struct Return in two registers // @@ -239,9 +239,9 @@ enum ReturnKind { inline bool IsValidReturnKind(ReturnKind returnKind) { return (returnKind != RT_Illegal) -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 && (returnKind != RT_Unset) -#endif // _TARGET_X86_ +#endif // TARGET_X86 ; } @@ -276,9 +276,9 @@ inline bool IsStructReturnKind(ReturnKind returnKind) inline bool IsScalarReturnKind(ReturnKind returnKind) { return (returnKind == RT_Scalar) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 || (returnKind == RT_Float) -#endif // _TARGET_X86_ +#endif // TARGET_X86 ; } @@ -328,11 +328,11 @@ inline const char *ReturnKindToString(ReturnKind returnKind) case RT_Scalar: return "Scalar"; case RT_Object: return "Object"; case RT_ByRef: return "ByRef"; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case RT_Float: return "Float"; #else case RT_Unset: return "UNSET"; -#endif // _TARGET_X86_ +#endif // TARGET_X86 case RT_Scalar_Obj: return "{Scalar, Object}"; case RT_Scalar_ByRef: return "{Scalar, ByRef}"; case RT_Obj_Obj: return "{Object, Object}"; @@ -345,7 +345,7 @@ inline const char *ReturnKindToString(ReturnKind returnKind) } } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #include // For memcmp() #include "bitvector.h" // for ptrArgTP @@ -603,7 +603,7 @@ void FASTCALL decodeCallPattern(int pattern, #define NO_REVERSE_PINVOKE_FRAME (-1) #define NO_PSP_SYM (-1) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #ifndef TARGET_POINTER_SIZE #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target @@ -659,7 +659,7 @@ void FASTCALL decodeCallPattern(int pattern, #define LIVESTATE_RLE_RUN_ENCBASE 2 #define LIVESTATE_RLE_SKIP_ENCBASE 4 -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #ifndef TARGET_POINTER_SIZE #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target @@ -717,7 +717,7 @@ void FASTCALL decodeCallPattern(int pattern, #define LIVESTATE_RLE_RUN_ENCBASE 2 #define LIVESTATE_RLE_SKIP_ENCBASE 4 -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #ifndef TARGET_POINTER_SIZE #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target @@ -774,7 +774,7 @@ void FASTCALL decodeCallPattern(int pattern, #else -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 #ifdef PORTABILITY_WARNING PORTABILITY_WARNING("Please specialize these definitions for your platform!") #endif diff --git a/src/coreclr/src/inc/gcrefmap.h b/src/coreclr/src/inc/gcrefmap.h index d566de28f527b..5961162582faa 100644 --- a/src/coreclr/src/inc/gcrefmap.h +++ b/src/coreclr/src/inc/gcrefmap.h @@ -80,7 +80,7 @@ class GCRefMapBuilder { } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 void WriteStackPop(int stackPop) { if (stackPop < 3) @@ -205,7 +205,7 @@ class GCRefMapDecoder return m_PendingByte == 0; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 UINT ReadStackPop() { int x = GetTwoBit(); diff --git a/src/coreclr/src/inc/getproductversionnumber.h b/src/coreclr/src/inc/getproductversionnumber.h index 4bdfe90ddcb35..de90409db42ca 100644 --- a/src/coreclr/src/inc/getproductversionnumber.h +++ b/src/coreclr/src/inc/getproductversionnumber.h @@ -35,7 +35,7 @@ void inline GetProductVersionNumber(SString &szFullPath, DWORD * pdwVersionMS, DWORD * pdwVersionLS) { WRAPPER_NO_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD dwDummy = 0; DWORD dwFileInfoSize = 0; @@ -71,7 +71,7 @@ void inline GetProductVersionNumber(SString &szFullPath, DWORD * pdwVersionMS, D #else *pdwVersionMS = 0; *pdwVersionLS = 0; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } #endif // __GetProductVersionNumber_h__ diff --git a/src/coreclr/src/inc/holder.h b/src/coreclr/src/inc/holder.h index f49f342bd34e3..4ac4182717233 100644 --- a/src/coreclr/src/inc/holder.h +++ b/src/coreclr/src/inc/holder.h @@ -1175,10 +1175,10 @@ FORCEINLINE void CounterDecrease(RAW_KEYWORD(volatile) LONG* p) {InterlockedDecr typedef Wrapper> CounterHolder; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX FORCEINLINE void RegKeyRelease(HKEY k) {RegCloseKey(k);}; typedef Wrapper RegKeyHolder; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX class ErrorModeHolder { @@ -1189,7 +1189,7 @@ class ErrorModeHolder UINT OldMode() {return m_oldMode;}; }; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //----------------------------------------------------------------------------- // HKEYHolder : HKEY holder, Calls RegCloseKey on scope exit. // @@ -1244,7 +1244,7 @@ class HKEYHolder private: HKEY m_value; }; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX //---------------------------------------------------------------------------- // diff --git a/src/coreclr/src/inc/jithelpers.h b/src/coreclr/src/inc/jithelpers.h index 4dfbba733dcac..73b5d2b54cf07 100644 --- a/src/coreclr/src/inc/jithelpers.h +++ b/src/coreclr/src/inc/jithelpers.h @@ -38,15 +38,15 @@ // CORINFO_HELP_DBL2INT, CORINFO_HELP_DBL2UINT, and CORINFO_HELP_DBL2LONG get // patched for CPUs that support SSE2 (P4 and above). -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT JITHELPER(CORINFO_HELP_LLSH, JIT_LLsh, CORINFO_HELP_SIG_REG_ONLY) JITHELPER(CORINFO_HELP_LRSH, JIT_LRsh, CORINFO_HELP_SIG_REG_ONLY) JITHELPER(CORINFO_HELP_LRSZ, JIT_LRsz, CORINFO_HELP_SIG_REG_ONLY) -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT JITHELPER(CORINFO_HELP_LLSH, NULL, CORINFO_HELP_SIG_CANNOT_USE_ALIGN_STUB) JITHELPER(CORINFO_HELP_LRSH, NULL, CORINFO_HELP_SIG_CANNOT_USE_ALIGN_STUB) JITHELPER(CORINFO_HELP_LRSZ, NULL, CORINFO_HELP_SIG_CANNOT_USE_ALIGN_STUB) -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT JITHELPER(CORINFO_HELP_LMUL, JIT_LMul, CORINFO_HELP_SIG_16_STACK) JITHELPER(CORINFO_HELP_LMUL_OVF, JIT_LMulOvf, CORINFO_HELP_SIG_16_STACK) JITHELPER(CORINFO_HELP_ULMUL_OVF, JIT_ULMulOvf, CORINFO_HELP_SIG_16_STACK) @@ -108,11 +108,11 @@ JITHELPER(CORINFO_HELP_UNBOX_NULLABLE, JIT_Unbox_Nullable, CORINFO_HELP_SIG_4_STACK) JITHELPER(CORINFO_HELP_GETREFANY, JIT_GetRefAny, CORINFO_HELP_SIG_8_STACK) -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) DYNAMICJITHELPER(CORINFO_HELP_ARRADDR_ST, JIT_Stelem_Ref, CORINFO_HELP_SIG_4_STACK) #else JITHELPER(CORINFO_HELP_ARRADDR_ST, JIT_Stelem_Ref, CORINFO_HELP_SIG_4_STACK) -#endif // _TARGET_ARM_ +#endif // TARGET_ARM JITHELPER(CORINFO_HELP_LDELEMA_REF, JIT_Ldelema_Ref, CORINFO_HELP_SIG_4_STACK) // Exceptions @@ -203,7 +203,7 @@ JITHELPER(CORINFO_HELP_GETGENERICS_GCSTATIC_BASE, JIT_GetGenericsGCStaticBase,CORINFO_HELP_SIG_REG_ONLY) JITHELPER(CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE, JIT_GetGenericsNonGCStaticBase,CORINFO_HELP_SIG_REG_ONLY) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 DYNAMICJITHELPER(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, NULL, CORINFO_HELP_SIG_REG_ONLY) DYNAMICJITHELPER(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE, NULL, CORINFO_HELP_SIG_REG_ONLY) DYNAMICJITHELPER(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, NULL, CORINFO_HELP_SIG_REG_ONLY) @@ -245,13 +245,13 @@ JITHELPER(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, JIT_GetCurrentManagedThreadId, CORINFO_HELP_SIG_REG_ONLY) -#ifdef BIT64 +#ifdef HOST_64BIT JITHELPER(CORINFO_HELP_INIT_PINVOKE_FRAME, JIT_InitPInvokeFrame, CORINFO_HELP_SIG_REG_ONLY) #else DYNAMICJITHELPER(CORINFO_HELP_INIT_PINVOKE_FRAME, NULL, CORINFO_HELP_SIG_REG_ONLY) #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 JITHELPER(CORINFO_HELP_MEMSET, NULL, CORINFO_HELP_SIG_CANNOT_USE_ALIGN_STUB) JITHELPER(CORINFO_HELP_MEMCPY, NULL, CORINFO_HELP_SIG_CANNOT_USE_ALIGN_STUB) #else @@ -321,7 +321,7 @@ JITHELPER(CORINFO_HELP_EE_PERSONALITY_ROUTINE_FILTER_FUNCLET, NULL, CORINFO_HELP_SIG_UNDEF) #endif // !FEATURE_EH_FUNCLETS -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 JITHELPER(CORINFO_HELP_ASSIGN_REF_EAX, JIT_WriteBarrierEAX, CORINFO_HELP_SIG_NO_ALIGN_STUB) JITHELPER(CORINFO_HELP_ASSIGN_REF_EBX, JIT_WriteBarrierEBX, CORINFO_HELP_SIG_NO_ALIGN_STUB) JITHELPER(CORINFO_HELP_ASSIGN_REF_ECX, JIT_WriteBarrierECX, CORINFO_HELP_SIG_NO_ALIGN_STUB) @@ -368,7 +368,7 @@ JITHELPER(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, NULL, CORINFO_HELP_SIG_NO_ALIGN_STUB) -#ifndef _TARGET_ARM64_ +#ifndef TARGET_ARM64 JITHELPER(CORINFO_HELP_STACK_PROBE, JIT_StackProbe, CORINFO_HELP_SIG_REG_ONLY) #else JITHELPER(CORINFO_HELP_STACK_PROBE, NULL, CORINFO_HELP_SIG_UNDEF) diff --git a/src/coreclr/src/inc/livedatatarget.h b/src/coreclr/src/inc/livedatatarget.h index cbbc72015f8ab..a2107c035808a 100644 --- a/src/coreclr/src/inc/livedatatarget.h +++ b/src/coreclr/src/inc/livedatatarget.h @@ -14,7 +14,7 @@ // Does not include IXClrData definitions. #include -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //--------------------------------------------------------------------------------------- // @@ -101,7 +101,7 @@ class LiveProcDataTarget : public ICLRDataTarget CLRDATA_ADDRESS m_baseAddressOfEngine; }; -#endif // FEATURE_PAL +#endif // TARGET_UNIX #endif // _LIVEPROC_DATATARGET_H_ diff --git a/src/coreclr/src/inc/longfilepathwrappers.h b/src/coreclr/src/inc/longfilepathwrappers.h index 264315b9553db..23033c2196635 100644 --- a/src/coreclr/src/inc/longfilepathwrappers.h +++ b/src/coreclr/src/inc/longfilepathwrappers.h @@ -50,7 +50,7 @@ FindFirstFileExWrapper( _In_ DWORD dwAdditionalFlags ); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL CopyFileExWrapper( _In_ LPCWSTR lpExistingFileName, @@ -61,7 +61,7 @@ CopyFileExWrapper( _Inout_opt_ LPBOOL pbCancel, _In_ DWORD dwCopyFlags ); -#endif //FEATURE_PAL +#endif //TARGET_UNIX BOOL MoveFileExWrapper( diff --git a/src/coreclr/src/inc/nibblemapmacros.h b/src/coreclr/src/inc/nibblemapmacros.h index 615251484895c..7153bc6776122 100644 --- a/src/coreclr/src/inc/nibblemapmacros.h +++ b/src/coreclr/src/inc/nibblemapmacros.h @@ -25,7 +25,7 @@ // nibbles inside a DWORD from the highest bits (28..31). Because // of that we can scan backwards inside the DWORD with right shifts. -#if defined(BIT64) +#if defined(HOST_64BIT) // TODO: bump up the windows CODE_ALIGN to 16 and iron out any nibble map bugs that exist. // TODO: there is something wrong with USE_INDIRECT_CODEHEADER with CODE_ALIGN=16 # define CODE_ALIGN 4 diff --git a/src/coreclr/src/inc/optsmallperfcritical.h b/src/coreclr/src/inc/optsmallperfcritical.h index e862808e77faa..44f7c0fa485c7 100644 --- a/src/coreclr/src/inc/optsmallperfcritical.h +++ b/src/coreclr/src/inc/optsmallperfcritical.h @@ -15,7 +15,7 @@ #if defined(_MSC_VER) && !defined(_DEBUG) #pragma optimize("t", on) // optimize for speed - #if !defined(_AMD64_) // 'y' isn't an option on amd64 + #if !defined(HOST_AMD64) // 'y' isn't an option on amd64 #pragma optimize("y", on) // omit frame pointer - #endif // !defined(_TARGET_AMD64_) + #endif // !defined(TARGET_AMD64) #endif diff --git a/src/coreclr/src/inc/ostype.h b/src/coreclr/src/inc/ostype.h index 4c45f39ca75fd..18ed50a10c038 100644 --- a/src/coreclr/src/inc/ostype.h +++ b/src/coreclr/src/inc/ostype.h @@ -52,7 +52,7 @@ void InitWinRTStatus(); inline BOOL RunningOnWin8() { WRAPPER_NO_CONTRACT; -#if (!defined(_X86_) && !defined(_AMD64_)) || defined(CROSSGEN_COMPILE) +#if (!defined(HOST_X86) && !defined(HOST_AMD64)) || defined(CROSSGEN_COMPILE) return TRUE; #else if (gRunningOnStatus == RUNNING_ON_STATUS_UNINITED) @@ -94,7 +94,7 @@ inline BOOL WinRTSupported() #endif // FEATURE_COMINTEROP -#ifdef BIT64 +#ifdef HOST_64BIT inline BOOL RunningInWow64() { return FALSE; diff --git a/src/coreclr/src/inc/palclr.h b/src/coreclr/src/inc/palclr.h index ca2ae0b3bcbdf..25cd9e80879bf 100644 --- a/src/coreclr/src/inc/palclr.h +++ b/src/coreclr/src/inc/palclr.h @@ -10,7 +10,7 @@ // =========================================================================== -#if !defined(FEATURE_PAL) +#if !defined(HOST_UNIX) #ifndef __PALCLR_H__ #define __PALCLR_H__ @@ -19,11 +19,11 @@ // Unix L"" is UTF32, and on windows it's UTF16. Because of built-in assumptions on the size // of string literals, it's important to match behaviour between Unix and Windows. Unix will be defined // as u"" (char16_t) -#ifdef PLATFORM_UNIX +#ifdef HOST_UNIX #define W(str) u##str -#else // PLATFORM_UNIX +#else // HOST_UNIX #define W(str) L##str -#endif // PLATFORM_UNIX +#endif // HOST_UNIX #include @@ -70,11 +70,11 @@ // usage pattern is: // // int get_scratch_register() { -// #if defined(_TARGET_X86_) +// #if defined(TARGET_X86) // return eax; -// #elif defined(_TARGET_AMD64_) +// #elif defined(TARGET_AMD64) // return rax; -// #elif defined(_TARGET_ARM_) +// #elif defined(TARGET_ARM) // return r0; // #else // PORTABILITY_ASSERT("scratch register"); @@ -99,7 +99,7 @@ // The message in these two macros should not contain any keywords like TODO // or NYI. It should be just the brief description of the problem. -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Finished ports - compile-time errors #define PORTABILITY_WARNING(message) NEED_TO_PORT_THIS_ONE(NEED_TO_PORT_THIS_ONE) #define PORTABILITY_ASSERT(message) NEED_TO_PORT_THIS_ONE(NEED_TO_PORT_THIS_ONE) @@ -589,7 +589,7 @@ #define SET_UNALIGNED_VAL64(_pObject, _Value) SET_UNALIGNED_64(_pObject, VAL64((UINT64)_Value)) #endif -#ifdef BIT64 +#ifdef HOST_64BIT #define VALPTR(x) VAL64(x) #define GET_UNALIGNED_PTR(x) GET_UNALIGNED_64(x) #define GET_UNALIGNED_VALPTR(x) GET_UNALIGNED_VAL64(x) @@ -625,4 +625,4 @@ #include "palclr_win.h" -#endif // !defined(FEATURE_PAL) +#endif // !defined(HOST_UNIX) diff --git a/src/coreclr/src/inc/palclr_win.h b/src/coreclr/src/inc/palclr_win.h index 6b6c712fc0fe7..2d88e6fcc378f 100644 --- a/src/coreclr/src/inc/palclr_win.h +++ b/src/coreclr/src/inc/palclr_win.h @@ -85,7 +85,7 @@ -#if defined(_DEBUG_IMPL) && !defined(JIT_BUILD) && !defined(JIT64_BUILD) && !defined(_ARM_) // @ARMTODO +#if defined(_DEBUG_IMPL) && !defined(JIT_BUILD) && !defined(JIT64_BUILD) && !defined(HOST_ARM) // @ARMTODO #define WIN_PAL_TRY_HANDLER_DBG_BEGIN \ BOOL ___oldOkayToThrowValue = FALSE; \ ClrDebugState *___pState = GetClrDebugState(); \ @@ -135,10 +135,10 @@ #define WIN_PAL_ENDTRY_NAKED_DBG #endif // defined(ENABLE_CONTRACTS_IMPL) && !defined(JIT64_BUILD) -#if !defined (FEATURE_PAL) +#if !defined (TARGET_UNIX) // Native system libray handle. // In Windows, NATIVE_LIBRARY_HANDLE is the same as HMODULE. typedef HMODULE NATIVE_LIBRARY_HANDLE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // __PALCLR_WIN_H__ diff --git a/src/coreclr/src/inc/pedecoder.h b/src/coreclr/src/inc/pedecoder.h index 5dff3354274ee..42d6eca9a50f4 100644 --- a/src/coreclr/src/inc/pedecoder.h +++ b/src/coreclr/src/inc/pedecoder.h @@ -73,13 +73,13 @@ inline CHECK CheckOverflow(RVA value1, COUNT_T value2) // IMAGE_FILE_MACHINE_NATIVE // -------------------------------------------------------------------------------- -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_I386 -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_AMD64 -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_ARMNT -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_ARM64 #else #error "port me" diff --git a/src/coreclr/src/inc/pedecoder.inl b/src/coreclr/src/inc/pedecoder.inl index b6fe115509171..c9ea3f7abf64e 100644 --- a/src/coreclr/src/inc/pedecoder.inl +++ b/src/coreclr/src/inc/pedecoder.inl @@ -1294,7 +1294,7 @@ inline void PEDecoder::GetPEKindAndMachine(DWORD * pdwPEKind, DWORD *pdwMachine) if (dwCorFlags & VAL32(COMIMAGE_FLAGS_ILONLY)) { dwKind |= (DWORD)peILonly; -#ifdef BIT64 +#ifdef HOST_64BIT // compensate for shim promotion of PE32/ILONLY headers to PE32+ on WIN64 if (fIsPE32Plus && (GetMachine() == IMAGE_FILE_MACHINE_I386)) dwKind &= ~((DWORD)pe32Plus); diff --git a/src/coreclr/src/inc/peinformation.h b/src/coreclr/src/inc/peinformation.h index 7c640ab252ba2..80e7f71d67821 100644 --- a/src/coreclr/src/inc/peinformation.h +++ b/src/coreclr/src/inc/peinformation.h @@ -43,19 +43,19 @@ inline bool IsPEMSIL(PEKIND x) return ( (x == peMSIL) ); } -#ifdef BIT64 +#ifdef HOST_64BIT inline bool IsProcess32() { return false; } #else inline bool IsProcess32() { return true; } #endif -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) inline PEKIND TargetNativePEKIND() { return peI386; } -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) inline PEKIND TargetNativePEKIND() { return peAMD64; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) inline PEKIND TargetNativePEKIND() { return peARM; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) inline PEKIND TargetNativePEKIND() { return peARM64; } #else #error Need to define valid TargetNativePEKIND() diff --git a/src/coreclr/src/inc/perflog.h b/src/coreclr/src/inc/perflog.h index f6f0c7f242cf9..90fabc64d343b 100644 --- a/src/coreclr/src/inc/perflog.h +++ b/src/coreclr/src/inc/perflog.h @@ -17,7 +17,7 @@ #ifndef _PERFLOG_H_ #define _PERFLOG_H_ -#if !defined(BIT64) && !defined(DACCESS_COMPILE) +#if !defined(HOST_64BIT) && !defined(DACCESS_COMPILE) #define ENABLE_PERF_LOG #else #undef ENABLE_PERF_LOG diff --git a/src/coreclr/src/inc/readytorunhelpers.h b/src/coreclr/src/inc/readytorunhelpers.h index 422ae499f667e..c9a60afc5dda1 100644 --- a/src/coreclr/src/inc/readytorunhelpers.h +++ b/src/coreclr/src/inc/readytorunhelpers.h @@ -89,12 +89,12 @@ HELPER(READYTORUN_HELPER_DblRem, CORINFO_HELP_DBLREM, HELPER(READYTORUN_HELPER_FltRound, CORINFO_HELP_FLTROUND, ) HELPER(READYTORUN_HELPER_DblRound, CORINFO_HELP_DBLROUND, ) -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 HELPER(READYTORUN_HELPER_PersonalityRoutine, CORINFO_HELP_EE_PERSONALITY_ROUTINE, OPTIMIZEFORSIZE) HELPER(READYTORUN_HELPER_PersonalityRoutineFilterFunclet, CORINFO_HELP_EE_PERSONALITY_ROUTINE_FILTER_FUNCLET, OPTIMIZEFORSIZE) #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 HELPER(READYTORUN_HELPER_WriteBarrier_EAX, CORINFO_HELP_ASSIGN_REF_EAX, ) HELPER(READYTORUN_HELPER_WriteBarrier_EBX, CORINFO_HELP_ASSIGN_REF_EBX, ) HELPER(READYTORUN_HELPER_WriteBarrier_ECX, CORINFO_HELP_ASSIGN_REF_ECX, ) @@ -118,7 +118,7 @@ HELPER(READYTORUN_HELPER_GCPoll, CORINFO_HELP_POLL_GC, HELPER(READYTORUN_HELPER_MonitorEnter, CORINFO_HELP_MON_ENTER, ) HELPER(READYTORUN_HELPER_MonitorExit, CORINFO_HELP_MON_EXIT, ) -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) HELPER(READYTORUN_HELPER_StackProbe, CORINFO_HELP_STACK_PROBE, ) #endif diff --git a/src/coreclr/src/inc/regdisp.h b/src/coreclr/src/inc/regdisp.h index 0fd53a98796ce..ef0aee78dd768 100644 --- a/src/coreclr/src/inc/regdisp.h +++ b/src/coreclr/src/inc/regdisp.h @@ -65,7 +65,7 @@ inline void SetRegdisplaySP(REGDISPLAY_BASE *pRD, LPVOID sp) { pRD->SP = (TADDR)sp; } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) struct REGDISPLAY : public REGDISPLAY_BASE { @@ -154,9 +154,9 @@ inline TADDR GetRegdisplayStackMark(REGDISPLAY *display) { #endif } -#elif defined(_TARGET_64BIT_) +#elif defined(TARGET_64BIT) -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) typedef struct _Arm64VolatileContextPointer { union { @@ -184,9 +184,9 @@ typedef struct _Arm64VolatileContextPointer PDWORD64 X[18]; }; } Arm64VolatileContextPointer; -#endif //_TARGET_ARM64_ +#endif //TARGET_ARM64 struct REGDISPLAY : public REGDISPLAY_BASE { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 Arm64VolatileContextPointer volatileCurrContextPointers; #endif @@ -217,23 +217,23 @@ inline BOOL IsInCalleesFrames(REGDISPLAY *display, LPVOID stackPointer) inline TADDR GetRegdisplayStackMark(REGDISPLAY *display) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // On AMD64, the MemoryStackFp value is the current sp (i.e. the sp value when calling another method). _ASSERTE(GetRegdisplaySP(display) == GetSP(display->pCurrentContext)); return GetRegdisplaySP(display); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) _ASSERTE(display->IsCallerContextValid); return GetSP(display->pCallerContext); -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 PORTABILITY_ASSERT("GetRegdisplayStackMark NYI for this platform (Regdisp.h)"); return NULL; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // ResumableFrame is pushed on the stack before // starting the GC. registers r0-r3 in ResumableFrame can @@ -289,19 +289,19 @@ inline TADDR GetRegdisplayStackMark(REGDISPLAY *display) { #error "RegDisplay functions are not implemented on this platform." #endif -#if defined(_TARGET_64BIT_) || defined(_TARGET_ARM_) || (defined(_TARGET_X86_) && defined(FEATURE_EH_FUNCLETS)) +#if defined(TARGET_64BIT) || defined(TARGET_ARM) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) // This needs to be implemented for platforms that have funclets. inline LPVOID GetRegdisplayReturnValue(REGDISPLAY *display) { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) return (LPVOID)display->pCurrentContext->Rax; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return (LPVOID)display->pCurrentContext->X0; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return (LPVOID)((TADDR)display->pCurrentContext->R0); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) return (LPVOID)display->pCurrentContext->Eax; #else PORTABILITY_ASSERT("GetRegdisplayReturnValue NYI for this platform (Regdisp.h)"); @@ -313,16 +313,16 @@ inline void SyncRegDisplayToCurrentContext(REGDISPLAY* pRD) { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_64BIT_) +#if defined(TARGET_64BIT) pRD->SP = (INT_PTR)GetSP(pRD->pCurrentContext); pRD->ControlPC = INT_PTR(GetIP(pRD->pCurrentContext)); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) pRD->SP = (DWORD)GetSP(pRD->pCurrentContext); pRD->ControlPC = (DWORD)GetIP(pRD->pCurrentContext); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) pRD->SP = (DWORD)GetSP(pRD->pCurrentContext); pRD->ControlPC = (DWORD)GetIP(pRD->pCurrentContext); -#else // _TARGET_X86_ +#else // TARGET_X86 PORTABILITY_ASSERT("SyncRegDisplayToCurrentContext"); #endif @@ -330,35 +330,35 @@ inline void SyncRegDisplayToCurrentContext(REGDISPLAY* pRD) CheckRegDisplaySP(pRD); #endif // DEBUG_REGDISPLAY } -#endif // _TARGET_64BIT_ || _TARGET_ARM_ || (_TARGET_X86_ && FEATURE_EH_FUNCLETS) +#endif // TARGET_64BIT || TARGET_ARM || (TARGET_X86 && FEATURE_EH_FUNCLETS) typedef REGDISPLAY *PREGDISPLAY; #ifdef FEATURE_EH_FUNCLETS inline void FillContextPointers(PT_KNONVOLATILE_CONTEXT_POINTERS pCtxPtrs, PT_CONTEXT pCtx) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 for (int i = 0; i < 16; i++) { *(&pCtxPtrs->Rax + i) = (&pCtx->Rax + i); } -#elif defined(_TARGET_ARM64_) // _TARGET_AMD64_ +#elif defined(TARGET_ARM64) // TARGET_AMD64 for (int i = 0; i < 12; i++) { *(&pCtxPtrs->X19 + i) = (&pCtx->X19 + i); } -#elif defined(_TARGET_ARM_) // _TARGET_ARM64_ +#elif defined(TARGET_ARM) // TARGET_ARM64 // Copy over the nonvolatile integer registers (R4-R11) for (int i = 0; i < 8; i++) { *(&pCtxPtrs->R4 + i) = (&pCtx->R4 + i); } -#elif defined(_TARGET_X86_) // _TARGET_ARM_ +#elif defined(TARGET_X86) // TARGET_ARM for (int i = 0; i < 7; i++) { *(&pCtxPtrs->Edi + i) = (&pCtx->Edi + i); } -#else // _TARGET_X86_ +#else // TARGET_X86 PORTABILITY_ASSERT("FillContextPointers"); #endif // _TARGET_???_ (ELSE) } @@ -371,7 +371,7 @@ inline void FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, PT_CONTEXT pC SUPPORTS_DAC; #ifndef FEATURE_EH_FUNCLETS -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 pRD->pContext = pctx; pRD->pContextForUnwind = NULL; pRD->pEdi = &(pctx->Edi); @@ -384,7 +384,7 @@ inline void FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, PT_CONTEXT pC pRD->SP = pctx->Esp; pRD->ControlPC = (PCODE)(pctx->Eip); pRD->PCTAddr = (UINT_PTR)&(pctx->Eip); -#else // _TARGET_X86_ +#else // TARGET_X86 PORTABILITY_ASSERT("FillRegDisplay"); #endif // _TARGET_???_ (ELSE) @@ -416,7 +416,7 @@ inline void FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, PT_CONTEXT pC FillContextPointers(&pRD->ctxPtrsOne, pctx); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Fill volatile context pointers. They can be used by GC in the case of the leaf frame pRD->volatileCurrContextPointers.R0 = &pctx->R0; pRD->volatileCurrContextPointers.R1 = &pctx->R1; @@ -426,11 +426,11 @@ inline void FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, PT_CONTEXT pC pRD->ctxPtrsOne.Lr = &pctx->Lr; pRD->pPC = &pRD->pCurrentContext->Pc; -#elif defined(_TARGET_ARM64_) // _TARGET_ARM_ +#elif defined(TARGET_ARM64) // TARGET_ARM // Fill volatile context pointers. They can be used by GC in the case of the leaf frame for (int i=0; i < 18; i++) pRD->volatileCurrContextPointers.X[i] = &pctx->X[i]; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #ifdef DEBUG_REGDISPLAY pRD->_pThread = NULL; @@ -453,7 +453,7 @@ inline void CopyRegDisplay(const PREGDISPLAY pInRD, PREGDISPLAY pOutRD, T_CONTEX #ifndef FEATURE_EH_FUNCLETS -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (pInRD->pEdi != NULL) {pOutCtx->Edi = *pInRD->pEdi;} else {pInRD->pEdi = NULL;} if (pInRD->pEsi != NULL) {pOutCtx->Esi = *pInRD->pEsi;} else {pInRD->pEsi = NULL;} if (pInRD->pEbx != NULL) {pOutCtx->Ebx = *pInRD->pEbx;} else {pInRD->pEbx = NULL;} @@ -463,7 +463,7 @@ inline void CopyRegDisplay(const PREGDISPLAY pInRD, PREGDISPLAY pOutRD, T_CONTEX if (pInRD->pEdx != NULL) {pOutCtx->Edx = *pInRD->pEdx;} else {pInRD->pEdx = NULL;} pOutCtx->Esp = pInRD->SP; pOutCtx->Eip = pInRD->ControlPC; -#else // _TARGET_X86_ +#else // TARGET_X86 PORTABILITY_ASSERT("CopyRegDisplay"); #endif // _TARGET_???_ @@ -485,7 +485,7 @@ inline void CopyRegDisplay(const PREGDISPLAY pInRD, PREGDISPLAY pOutRD, T_CONTEX // the reg number is the R/M number from ModR/M byte or base in SIB byte inline size_t * getRegAddr (unsigned regNum, PTR_CONTEXT regs) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 _ASSERTE(regNum < 8); static const SIZE_T OFFSET_OF_REGISTERS[] = @@ -501,13 +501,13 @@ inline size_t * getRegAddr (unsigned regNum, PTR_CONTEXT regs) }; return (PTR_size_t)(PTR_BYTE(regs) + OFFSET_OF_REGISTERS[regNum]); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) _ASSERTE(regNum < 16); return ®s->Rax + regNum; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) _ASSERTE(regNum < 16); return (size_t *)®s->R0 + regNum; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) _ASSERTE(regNum < 31); return (size_t *)®s->X0 + regNum; #else @@ -530,7 +530,7 @@ inline void UpdateContextFromRegDisp(PREGDISPLAY pRegDisp, PT_CONTEXT pContext) #ifndef FEATURE_EH_FUNCLETS -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) pContext->ContextFlags = (CONTEXT_INTEGER | CONTEXT_CONTROL); pContext->Edi = *pRegDisp->pEdi; pContext->Esi = *pRegDisp->pEsi; @@ -541,7 +541,7 @@ inline void UpdateContextFromRegDisp(PREGDISPLAY pRegDisp, PT_CONTEXT pContext) pContext->Edx = *pRegDisp->pEdx; pContext->Esp = pRegDisp->SP; pContext->Eip = pRegDisp->ControlPC; -#else // _TARGET_X86_ +#else // TARGET_X86 PORTABILITY_ASSERT("UpdateContextFromRegDisp"); #endif // _TARGET_???_ diff --git a/src/coreclr/src/inc/securitywrapper.h b/src/coreclr/src/inc/securitywrapper.h index bda85ff40615f..96882205c1d6a 100644 --- a/src/coreclr/src/inc/securitywrapper.h +++ b/src/coreclr/src/inc/securitywrapper.h @@ -12,7 +12,7 @@ #ifndef _SECURITY_WRAPPER_H #define _SECURITY_WRAPPER_H -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #error This file should not be included on non-Windows platforms. #endif diff --git a/src/coreclr/src/inc/sigparser.h b/src/coreclr/src/inc/sigparser.h index ed1a34641a44c..eb7a6352e6519 100644 --- a/src/coreclr/src/inc/sigparser.h +++ b/src/coreclr/src/inc/sigparser.h @@ -22,7 +22,7 @@ // These macros tell us whether the arguments we see as we proceed with the signature walk are mapped // to increasing or decreasing stack addresses. This is valid only for arguments that go on the stack. //--------------------------------------------------------------------------------------- -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #define STACK_GROWS_DOWN_ON_ARGS_WALK #else #define STACK_GROWS_UP_ON_ARGS_WALK @@ -423,7 +423,7 @@ class SigParser case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: case ELEMENT_TYPE_R8: - #ifdef BIT64 + #ifdef HOST_64BIT case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: #endif // WIN64 @@ -434,10 +434,10 @@ class SigParser case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: case ELEMENT_TYPE_R4: - #ifndef BIT64 + #ifndef HOST_64BIT case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: - #endif // BIT64 + #endif // HOST_64BIT *pSize = 4; break; @@ -966,7 +966,7 @@ inline void* StackElemEndianessFixup(void* pStackElem, UINT cbSize) { case 2: pRetVal += sizeof(void*)-2; break; -#ifdef BIT64 +#ifdef HOST_64BIT case 4: pRetVal += sizeof(void*)-4; break; diff --git a/src/coreclr/src/inc/stacktrace.h b/src/coreclr/src/inc/stacktrace.h index 26333c2bc5acf..ef37dd9091c95 100644 --- a/src/coreclr/src/inc/stacktrace.h +++ b/src/coreclr/src/inc/stacktrace.h @@ -22,7 +22,7 @@ HINSTANCE LoadDbgHelp(); #define cfrMaxAssertStackLevels 20 #define cchMaxAssertExprLen 257 -#ifdef BIT64 +#ifdef HOST_64BIT #define cchMaxAssertStackLevelStringLen \ ((3 * 8) + cchMaxAssertModuleLen + cchMaxAssertSymbolLen + 13) @@ -74,7 +74,7 @@ void GetStringFromStackLevels(UINT ifrStart, UINT cfrTotal, __out_ecount(cchMaxA ******************************************************************** robch */ void GetStringFromAddr(DWORD_PTR dwAddr, __out_ecount(cchMaxAssertStackLevelStringLen) LPSTR szString); -#if defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if defined(TARGET_X86) && !defined(TARGET_UNIX) /**************************************************************************** * ClrCaptureContext * *-------------------* @@ -83,9 +83,9 @@ void GetStringFromAddr(DWORD_PTR dwAddr, __out_ecount(cchMaxAssertStackLevelStri * support this, so we need it for CoreCLR 4, if we require Win2K support ****************************************************************************/ extern "C" void __stdcall ClrCaptureContext(__out PCONTEXT ctx); -#else // _TARGET_X86_ && !FEATURE_PAL +#else // TARGET_X86 && !TARGET_UNIX #define ClrCaptureContext RtlCaptureContext -#endif // _TARGET_X86_ && !FEATURE_PAL +#endif // TARGET_X86 && !TARGET_UNIX #endif diff --git a/src/coreclr/src/inc/staticcontract.h b/src/coreclr/src/inc/staticcontract.h index 4bd3fb3f8c135..3fe749de24ef5 100644 --- a/src/coreclr/src/inc/staticcontract.h +++ b/src/coreclr/src/inc/staticcontract.h @@ -28,7 +28,7 @@ // from Contract.h to allow their inclusion in any part of the system. // -#if defined(_DEBUG) && defined(_TARGET_X86_) +#if defined(_DEBUG) && defined(TARGET_X86) #define METHOD_CANNOT_BE_FOLDED_DEBUG \ static int _noFold = 0; \ _noFold++; @@ -36,7 +36,7 @@ #define METHOD_CANNOT_BE_FOLDED_DEBUG #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // // currently, only x86 has a static contract analysis tool, so let's not @@ -111,7 +111,7 @@ #endif -#else // _TARGET_X86_ +#else // TARGET_X86 #define ANNOTATION_TRY_BEGIN { } #define ANNOTATION_TRY_END { } @@ -170,7 +170,7 @@ #define ANNOTATION_DEBUG_ONLY { } #endif -#endif // _TARGET_X86_ +#endif // TARGET_X86 #define STATIC_CONTRACT_THROWS ANNOTATION_FN_THROWS #define STATIC_CONTRACT_NOTHROW ANNOTATION_FN_NOTHROW @@ -285,7 +285,7 @@ typedef StaticContract::ScanThrowMarkerStandard ScanThrowMarker; // we use BlockMarker's only for SCAN -#if defined(_DEBUG) && defined(_TARGET_X86_) && !defined(DACCESS_COMPILE) +#if defined(_DEBUG) && defined(TARGET_X86) && !defined(DACCESS_COMPILE) template class BlockMarker diff --git a/src/coreclr/src/inc/stdmacros.h b/src/coreclr/src/inc/stdmacros.h index 8d6feb1a5a708..a9eb1d8c4da51 100644 --- a/src/coreclr/src/inc/stdmacros.h +++ b/src/coreclr/src/inc/stdmacros.h @@ -44,7 +44,7 @@ /* Portability macros */ /********************************************/ -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #define AMD64_FIRST_ARG(x) x , #define AMD64_ARG(x) , x #define AMD64_ONLY(x) x @@ -58,7 +58,7 @@ #define NOT_AMD64_ARG(x) , x #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #define X86_FIRST_ARG(x) x , #define X86_ARG(x) , x #define X86_ONLY(x) x @@ -72,7 +72,7 @@ #define NOT_X86_ARG(x) , x #endif -#ifdef BIT64 +#ifdef HOST_64BIT #define BIT64_ARG(x) , x #define BIT64_ONLY(x) x #define NOT_BIT64(x) @@ -82,9 +82,9 @@ #define BIT64_ONLY(x) #define NOT_BIT64(x) x #define NOT_BIT64_ARG(x) , x -#endif // BIT64 +#endif // HOST_64BIT -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #define ARM_FIRST_ARG(x) x , #define ARM_ARG(x) , x #define ARM_ONLY(x) x @@ -98,7 +98,7 @@ #define NOT_ARM_ARG(x) , x #endif -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 #define ARM64_FIRST_ARG(x) x , #define ARM64_ARG(x) , x #define ARM64_ONLY(x) x @@ -112,27 +112,27 @@ #define NOT_ARM64_ARG(x) , x #endif -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define LOG2_PTRSIZE 3 #else #define LOG2_PTRSIZE 2 #endif -#ifdef BIT64 +#ifdef HOST_64BIT #define INVALID_POINTER_CC 0xcccccccccccccccc #define INVALID_POINTER_CD 0xcdcdcdcdcdcdcdcd #define FMT_ADDR " %08x`%08x " #define LFMT_ADDR W(" %08x`%08x ") #define DBG_ADDR(ptr) (((UINT_PTR) (ptr)) >> 32), (((UINT_PTR) (ptr)) & 0xffffffff) -#else // BIT64 +#else // HOST_64BIT #define INVALID_POINTER_CC 0xcccccccc #define INVALID_POINTER_CD 0xcdcdcdcd #define FMT_ADDR " %08x " #define LFMT_ADDR W(" %08x ") #define DBG_ADDR(ptr) ((UINT_PTR)(ptr)) -#endif // BIT64 +#endif // HOST_64BIT -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #define ALIGN_ACCESS ((1<(pc); #else return dac_cast(pc); @@ -119,7 +119,7 @@ inline TADDR PCODEToPINSTR(PCODE pc) // on ARM, this will raise the THUMB bit. inline PCODE PINSTRToPCODE(TADDR addr) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM return DataPointerToThumbCode(addr); #else return dac_cast(addr); @@ -490,7 +490,7 @@ inline void *__cdecl operator new(size_t, void *_P) /********************************************************************************/ /* portability helpers */ -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define IN_TARGET_64BIT(x) x #define IN_TARGET_32BIT(x) #else @@ -704,9 +704,9 @@ class CCompRC m_nHashSize = 0; m_csMap = NULL; m_pResourceFile = NULL; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_pResourceDomain = NULL; -#endif // FEATURE_PAL +#endif // TARGET_UNIX }// CCompRC @@ -794,12 +794,12 @@ class CCompRC CRITSEC_COOKIE m_csMap; LPCWSTR m_pResourceFile; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Resource domain is an ANSI string identifying a native resources file static LPCSTR m_pDefaultResourceDomain; static LPCSTR m_pFallbackResourceDomain; LPCSTR m_pResourceDomain; -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Main accessors for hash HRESOURCEDLL LookupNode(LocaleID langId, BOOL &fMissing); @@ -1280,16 +1280,16 @@ class NumaNodeInfo static LPVOID VirtualAllocExNuma(HANDLE hProc, LPVOID lpAddr, SIZE_T size, DWORD allocType, DWORD prot, DWORD node); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static BOOL GetNumaProcessorNodeEx(PPROCESSOR_NUMBER proc_no, PUSHORT node_no); static bool GetNumaInfo(PUSHORT total_nodes, DWORD* max_procs_per_node); -#else // !FEATURE_PAL +#else // !TARGET_UNIX static BOOL GetNumaProcessorNodeEx(USHORT proc_no, PUSHORT node_no); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif }; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX struct CPU_Group_Info { @@ -1353,7 +1353,7 @@ class CPUGroupInfo DWORD_PTR GetCurrentProcessCpuMask(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX //****************************************************************************** // Returns the number of processors that a process has been configured to run on @@ -4296,13 +4296,13 @@ LPWSTR *SegmentCommandLine(LPCWSTR lpCmdLine, DWORD *pNumArgs); class ClrTeb { public: -#if defined(FEATURE_PAL) +#if defined(HOST_UNIX) // returns pointer that uniquely identifies the fiber static void* GetFiberPtrId() { LIMITED_METHOD_CONTRACT; - // not fiber for FEATURE_PAL - use the regular thread ID + // not fiber for HOST_UNIX - use the regular thread ID return (void *)(size_t)GetCurrentThreadId(); } @@ -4321,7 +4321,7 @@ class ClrTeb return PAL_GetStackLimit(); } -#else // !FEATURE_PAL +#else // !HOST_UNIX // returns pointer that uniquely identifies the fiber static void* GetFiberPtrId() @@ -4370,7 +4370,7 @@ class ClrTeb { return (void*) 1; } -#endif // !FEATURE_PAL +#endif // !HOST_UNIX }; #if !defined(DACCESS_COMPILE) @@ -4809,7 +4809,7 @@ namespace util * Overloaded operators for the executable heap * ------------------------------------------------------------------------ */ -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX struct CExecutable { int x; }; extern const CExecutable executable; @@ -4833,7 +4833,7 @@ template void DeleteExecutable(T *p) } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX INDEBUG(BOOL DbgIsExecutable(LPVOID lpMem, SIZE_T length);) diff --git a/src/coreclr/src/inc/volatile.h b/src/coreclr/src/inc/volatile.h index 05b4627ee1c92..960e33cc982aa 100644 --- a/src/coreclr/src/inc/volatile.h +++ b/src/coreclr/src/inc/volatile.h @@ -69,12 +69,12 @@ #error The Volatile type is currently only defined for Visual C++ and GNU C++ #endif -#if defined(__GNUC__) && !defined(_X86_) && !defined(_AMD64_) && !defined(_ARM_) && !defined(_ARM64_) +#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) #error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM or ARM64 CPUs #endif #if defined(__GNUC__) -#if defined(_ARM_) || defined(_ARM64_) +#if defined(HOST_ARM) || defined(HOST_ARM64) // This is functionally equivalent to the MemoryBarrier() macro used on ARM on Windows. #define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb ish" : : : "memory") #else @@ -90,8 +90,8 @@ // notice. // #define VOLATILE_MEMORY_BARRIER() asm volatile ("" : : : "memory") -#endif // _ARM_ || _ARM64_ -#elif (defined(_ARM_) || defined(_ARM64_)) && _ISO_VOLATILE +#endif // HOST_ARM || HOST_ARM64 +#elif (defined(HOST_ARM) || defined(HOST_ARM64)) && _ISO_VOLATILE // ARM & ARM64 have a very weak memory model and very few tools to control that model. We're forced to perform a full // memory barrier to preserve the volatile semantics. Technically this is only necessary on MP systems but we // currently don't have a cheap way to determine the number of CPUs from this header file. Revisit this if it @@ -129,7 +129,7 @@ struct RemoveVolatile // Starting at version 3.8, clang errors out on initializing of type int * to volatile int *. To fix this, we add two templates to cast away volatility // Helper structures for casting away volatileness -#if defined(_ARM64_) && defined(_MSC_VER) +#if defined(HOST_ARM64) && defined(_MSC_VER) #include #endif @@ -140,7 +140,7 @@ T VolatileLoad(T const * pt) STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY; #ifndef DACCESS_COMPILE -#if defined(_ARM64_) && defined(__GNUC__) +#if defined(HOST_ARM64) && defined(__GNUC__) T val; static const unsigned lockFreeAtomicSizeMask = (1 << 1) | (1 << 2) | (1 << 4) | (1 << 8); if((1 << sizeof(T)) & lockFreeAtomicSizeMask) @@ -152,7 +152,7 @@ T VolatileLoad(T const * pt) val = *(T volatile const *)pt; asm volatile ("dmb ishld" : : : "memory"); } -#elif defined(_ARM64_) && defined(_MSC_VER) +#elif defined(HOST_ARM64) && defined(_MSC_VER) // silence warnings on casts in branches that are not taken. #pragma warning(push) #pragma warning(disable : 4302) @@ -227,7 +227,7 @@ void VolatileStore(T* pt, T val) STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY; #ifndef DACCESS_COMPILE -#if defined(_ARM64_) && defined(__GNUC__) +#if defined(HOST_ARM64) && defined(__GNUC__) static const unsigned lockFreeAtomicSizeMask = (1 << 1) | (1 << 2) | (1 << 4) | (1 << 8); if((1 << sizeof(T)) & lockFreeAtomicSizeMask) { @@ -238,7 +238,7 @@ void VolatileStore(T* pt, T val) VOLATILE_MEMORY_BARRIER(); *(T volatile *)pt = val; } -#elif defined(_ARM64_) && defined(_MSC_VER) +#elif defined(HOST_ARM64) && defined(_MSC_VER) // silence warnings on casts in branches that are not taken. #pragma warning(push) #pragma warning(disable : 4302) @@ -553,7 +553,7 @@ class VolatilePtr : public Volatile

#else // Disable use of Volatile for GC/HandleTable code except on platforms where it's absolutely necessary. -#if defined(_MSC_VER) && !defined(_ARM_) && !defined(_ARM64_) +#if defined(_MSC_VER) && !defined(HOST_ARM) && !defined(HOST_ARM64) #define VOLATILE(T) T RAW_KEYWORD(volatile) #else #define VOLATILE(T) Volatile diff --git a/src/coreclr/src/inc/vptr_list.h b/src/coreclr/src/inc/vptr_list.h index e3ee7712497b8..93e978224947d 100644 --- a/src/coreclr/src/inc/vptr_list.h +++ b/src/coreclr/src/inc/vptr_list.h @@ -47,9 +47,9 @@ VPTR_CLASS(PEImageLayout) VPTR_CLASS(RawImageLayout) VPTR_CLASS(ConvertedImageLayout) VPTR_CLASS(MappedImageLayout) -#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_PAL) +#if !defined(CROSSGEN_COMPILE) && !defined(TARGET_UNIX) VPTR_CLASS(LoadedImageLayout) -#endif // !CROSSGEN_COMPILE && !FEATURE_PAL +#endif // !CROSSGEN_COMPILE && !TARGET_UNIX VPTR_CLASS(FlatImageLayout) #ifdef FEATURE_COMINTEROP VPTR_CLASS(ComMethodFrame) @@ -90,7 +90,7 @@ VPTR_CLASS(ExternalMethodFrame) #ifdef FEATURE_READYTORUN VPTR_CLASS(DynamicHelperFrame) #endif -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) VPTR_CLASS(UMThkCallFrame) #endif VPTR_CLASS(TailCallFrame) diff --git a/src/coreclr/src/inc/win64unwind.h b/src/coreclr/src/inc/win64unwind.h index e4cea023b2fbd..d9477a9078ece 100644 --- a/src/coreclr/src/inc/win64unwind.h +++ b/src/coreclr/src/inc/win64unwind.h @@ -24,7 +24,7 @@ typedef enum _UNWIND_OP_CODES { UWOP_SAVE_XMM128_FAR, UWOP_PUSH_MACHFRAME, -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX // UWOP_SET_FPREG_LARGE is a CLR Unix-only extension to the Windows AMD64 unwind codes. // It is not part of the standard Windows AMD64 unwind codes specification. // UWOP_SET_FPREG allows for a maximum of a 240 byte offset between RSP and the @@ -39,7 +39,7 @@ typedef enum _UNWIND_OP_CODES { // is established. Either UWOP_SET_FPREG or UWOP_SET_FPREG_LARGE can be used, but not both. UWOP_SET_FPREG_LARGE, -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX } UNWIND_OP_CODES, *PUNWIND_OP_CODES; static const UCHAR UnwindOpExtraSlotTable[] = { @@ -55,9 +55,9 @@ static const UCHAR UnwindOpExtraSlotTable[] = { 2, // UWOP_SAVE_XMM128_FAR 0, // UWOP_PUSH_MACHFRAME -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX 2, // UWOP_SET_FPREG_LARGE -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX }; // @@ -89,13 +89,13 @@ typedef union _UNWIND_CODE { #define UNW_FLAG_UHANDLER 0x2 #define UNW_FLAG_CHAININFO 0x4 -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 typedef struct _UNWIND_INFO { ULONG FunctionLength; } UNWIND_INFO, *PUNWIND_INFO; -#else // _TARGET_X86_ +#else // TARGET_X86 typedef struct _UNWIND_INFO { UCHAR Version : 3; @@ -122,5 +122,5 @@ typedef struct _UNWIND_INFO { } UNWIND_INFO, *PUNWIND_INFO; -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // _WIN64UNWIND_H_ diff --git a/src/coreclr/src/inc/winrt/ntassert.h b/src/coreclr/src/inc/winrt/ntassert.h index 33a90a1ab195e..d6a2fd17bc8f1 100644 --- a/src/coreclr/src/inc/winrt/ntassert.h +++ b/src/coreclr/src/inc/winrt/ntassert.h @@ -45,7 +45,7 @@ DbgRaiseAssertionFailure ( #endif -#if defined(_AMD64_) +#if defined(HOST_AMD64) #if defined(_M_AMD64) @@ -64,7 +64,7 @@ __int2c ( #endif // defined(_M_AMD64) -#elif defined(_X86_) +#elif defined(HOST_X86) #if defined(_M_IX86) @@ -131,7 +131,7 @@ __break( #endif // defined(_M_IA64) -#elif defined(_ARM_) +#elif defined(HOST_ARM) #if defined(_M_ARM) @@ -150,7 +150,7 @@ __emit( #endif // defined(_M_ARM) -#endif // _AMD64_, _X86_, _IA64_, _ARM_ +#endif // HOST_AMD64, HOST_X86, _IA64_, HOST_ARM #endif // !defined(_DBGRAISEASSERTIONFAILURE_) && !defined(RC_INVOKED) && !defined(MIDL_PASS) #ifdef __cplusplus diff --git a/src/coreclr/src/inc/winrt/windowsstring.h b/src/coreclr/src/inc/winrt/windowsstring.h index 743e3b10e6549..f63fcdd2ea528 100644 --- a/src/coreclr/src/inc/winrt/windowsstring.h +++ b/src/coreclr/src/inc/winrt/windowsstring.h @@ -677,7 +677,7 @@ HRESULT StringCchLength( return StringCchLengthW(wz == nullptr ? L"" : wz, size_t(STRSAFE_MAX_CCH), pcch); } -#ifdef BIT64 +#ifdef HOST_64BIT // A UINT32-specific overload with built-in overflow check. inline HRESULT StringCchLength( @@ -694,7 +694,7 @@ HRESULT StringCchLength( return SizeTToUInt32(cch, pcch); } -#endif // BIT64 +#endif // HOST_64BIT #ifndef DACCESS_COMPILE //===================================================================================================================== diff --git a/src/coreclr/src/inc/winwrap.h b/src/coreclr/src/inc/winwrap.h index 56b0fe26493fc..e692ac782952f 100644 --- a/src/coreclr/src/inc/winwrap.h +++ b/src/coreclr/src/inc/winwrap.h @@ -241,11 +241,11 @@ // //#define WszGetBinaryType GetBinaryTypeWrapper //Coresys does not seem to have this API -#if FEATURE_PAL +#if HOST_UNIX #define WszFindFirstFile FindFirstFileW #else #define WszFindFirstFile(_lpFileName_, _lpFindData_) FindFirstFileExWrapper(_lpFileName_, FindExInfoStandard, _lpFindData_, FindExSearchNameMatch, NULL, 0) -#endif //FEATURE_PAL +#endif // HOST_UNIX //***************************************************************************** // Prototypes for API's. //***************************************************************************** @@ -258,21 +258,21 @@ inline DWORD GetMaxDBCSCharByteSize() { // contract.h not visible here __annotation(W("WRAPPER ") W("GetMaxDBCSCharByteSize")); -#ifndef FEATURE_PAL +#ifndef HOST_UNIX EnsureCharSetInfoInitialized(); _ASSERTE(g_dwMaxDBCSCharByteSize != 0); return (g_dwMaxDBCSCharByteSize); -#else // FEATURE_PAL +#else // HOST_UNIX return 3; -#endif // FEATURE_PAL +#endif // HOST_UNIX } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL RunningInteractive(); -#else // !FEATURE_PAL +#else // !TARGET_UNIX #define RunningInteractive() FALSE -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifndef Wsz_mbstowcs #define Wsz_mbstowcs(szOut, szIn, iSize) WszMultiByteToWideChar(CP_ACP, 0, szIn, -1, szOut, iSize) @@ -298,7 +298,7 @@ WszCreateProcess( LPPROCESS_INFORMATION lpProcessInformation ); -#if defined(_X86_) && defined(_MSC_VER) +#if defined(HOST_X86) && defined(_MSC_VER) // // Windows SDK does not use intrinsics on x86. Redefine the interlocked operations to use intrinsics. @@ -336,9 +336,9 @@ InterlockedCompareExchangePointer ( return((PVOID)(LONG_PTR)_InterlockedCompareExchange((LONG volatile *)Destination, (LONG)(LONG_PTR)ExChange, (LONG)(LONG_PTR)Comperand)); } -#endif // _X86_ && _MSC_VER +#endif // HOST_X86 && _MSC_VER -#if defined(_ARM_) & !defined(FEATURE_PAL) +#if defined(HOST_ARM) & !defined(HOST_UNIX) // // InterlockedCompareExchangeAcquire/InterlockedCompareExchangeRelease is not mapped in SDK to the correct intrinsics. Remove once // the SDK definition is fixed (OS Bug #516255) @@ -349,7 +349,7 @@ InterlockedCompareExchangePointer ( #define InterlockedCompareExchangeRelease _InterlockedCompareExchange_rel #endif -#if defined(_X86_) & !defined(InterlockedIncrement64) +#if defined(HOST_X86) & !defined(InterlockedIncrement64) // Interlockedxxx64 that do not have intrinsics are only supported on Windows Server 2003 // or higher for X86 so define our own portable implementation @@ -415,7 +415,7 @@ __forceinline LONGLONG __InterlockedExchangeAdd64(LONGLONG volatile * Addend, LO return Old; } -#endif // _X86_ +#endif // HOST_X86 // Output printf-style formatted text to the debugger if it's present or stdout otherwise. inline void DbgWPrintf(const LPCWSTR wszFormat, ...) @@ -450,7 +450,7 @@ inline int LateboundMessageBoxW(HWND hWnd, LPCWSTR lpCaption, UINT uType) { -#ifndef FEATURE_PAL +#ifndef HOST_UNIX // User32 should exist on all systems where displaying a message box makes sense. HMODULE hGuiExtModule = WszLoadLibrary(W("user32")); if (hGuiExtModule) @@ -463,7 +463,7 @@ inline int LateboundMessageBoxW(HWND hWnd, FreeLibrary(hGuiExtModule); return result; } -#endif // !FEATURE_PAL +#endif // !HOST_UNIX // No luck. Output the caption and text to the debugger if present or stdout otherwise. if (lpText == NULL) diff --git a/src/coreclr/src/inc/zapper.h b/src/coreclr/src/inc/zapper.h index a922025c0243f..159f5fe47618f 100644 --- a/src/coreclr/src/inc/zapper.h +++ b/src/coreclr/src/inc/zapper.h @@ -71,7 +71,7 @@ class Zapper ICorJitCompiler *m_pJitCompiler; IMetaDataDispenserEx *m_pMetaDataDispenser; HMODULE m_hJitLib; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 HMODULE m_hJitLegacy; #endif diff --git a/src/coreclr/src/jit/_typeinfo.h b/src/coreclr/src/jit/_typeinfo.h index 064070928d074..d852cac6b0203 100644 --- a/src/coreclr/src/jit/_typeinfo.h +++ b/src/coreclr/src/jit/_typeinfo.h @@ -30,7 +30,7 @@ enum ti_types TI_ONLY_ENUM = TI_METHOD, // Enum values with greater value are completely described by the enumeration. }; -#if defined(_TARGET_64BIT_) +#if defined(TARGET_64BIT) #define TI_I_IMPL TI_LONG #else #define TI_I_IMPL TI_INT @@ -315,7 +315,7 @@ class typeInfo { static_assert(std::is_same::value || std::is_same::value, ""); -#ifdef _HOST_64BIT_ +#ifdef HOST_64BIT return handle == reinterpret_cast(0xcccccccccccccccc); #else return handle == reinterpret_cast(0xcccccccc); @@ -345,7 +345,7 @@ class typeInfo static typeInfo nativeInt() { typeInfo result = typeInfo(TI_I_IMPL); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT result.m_flags |= TI_FLAG_NATIVE_INT; #endif return result; @@ -396,9 +396,9 @@ class typeInfo { DWORD allFlags = TI_FLAG_DATA_MASK | TI_FLAG_BYREF | TI_FLAG_BYREF_READONLY | TI_FLAG_GENERIC_TYPE_VAR | TI_FLAG_UNINIT_OBJREF; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT allFlags |= TI_FLAG_NATIVE_INT; -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT if ((li.m_flags & allFlags) != (ti.m_flags & allFlags)) { @@ -435,12 +435,12 @@ class typeInfo { return true; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return (nodeTi.IsType(TI_I_IMPL) && tiCompatibleWith(nullptr, verTi, typeInfo::nativeInt(), true)) || (verTi.IsType(TI_I_IMPL) && tiCompatibleWith(nullptr, typeInfo::nativeInt(), nodeTi, true)); -#else // _TARGET_64BIT_ +#else // TARGET_64BIT return false; -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT } #endif // DEBUG @@ -708,7 +708,7 @@ class typeInfo // Returns true whether this is an integer or a native int. BOOL IsIntOrNativeIntType() const { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return (GetType() == TI_INT) || AreEquivalent(*this, nativeInt()); #else return IsType(TI_INT); diff --git a/src/coreclr/src/jit/armelnonjit/CMakeLists.txt b/src/coreclr/src/jit/armelnonjit/CMakeLists.txt index 2283fb1943121..cc690b6840a79 100644 --- a/src/coreclr/src/jit/armelnonjit/CMakeLists.txt +++ b/src/coreclr/src/jit/armelnonjit/CMakeLists.txt @@ -13,15 +13,15 @@ if(FEATURE_READYTORUN) endif(FEATURE_READYTORUN) if (CLR_CMAKE_HOST_ARCH_I386) - remove_definitions(-D_TARGET_X86_) - add_definitions(-D_TARGET_ARM_) + remove_definitions(-DTARGET_X86) + add_definitions(-DTARGET_ARM) add_definitions(-DARM_SOFTFP) add_definitions(-DFEATURE_EH_FUNCLETS) set(JIT_ARCH_SOURCES ${JIT_ARM_SOURCES}) set(JIT_ARCH_LINK_LIBRARIES gcinfo_arm) elseif(CLR_CMAKE_HOST_ARCH_AMD64) - remove_definitions(-D_TARGET_AMD64_) - add_definitions(-D_TARGET_ARM64_) + remove_definitions(-DTARGET_AMD64) + add_definitions(-DTARGET_ARM64) set(JIT_ARCH_SOURCES ${JIT_ARM64_SOURCES}) set(JIT_ARCH_LINK_LIBRARIES gcinfo_arm64) else() diff --git a/src/coreclr/src/jit/armelnonjit/armelnonjit.nativeproj b/src/coreclr/src/jit/armelnonjit/armelnonjit.nativeproj index 974945a9e8355..7cda1afe827b9 100644 --- a/src/coreclr/src/jit/armelnonjit/armelnonjit.nativeproj +++ b/src/coreclr/src/jit/armelnonjit/armelnonjit.nativeproj @@ -44,7 +44,7 @@ $(OutputName).def - $(ClDefines);_TARGET_ARM_=1 + $(ClDefines);TARGET_ARM=1 $(ClDefines);ALT_JIT $(SdkLibPath)\kernel32.lib;$(SdkLibPath)\user32.lib;$(SdkLibPath)\advapi32.lib;$(SdkLibPath)\oleaut32.lib;$(SdkLibPath)\uuid.lib diff --git a/src/coreclr/src/jit/assertionprop.cpp b/src/coreclr/src/jit/assertionprop.cpp index 8fb14511419da..f3f95ffe2e6d6 100644 --- a/src/coreclr/src/jit/assertionprop.cpp +++ b/src/coreclr/src/jit/assertionprop.cpp @@ -114,7 +114,7 @@ void Compiler::optAddCopies() bool isFloatParam = false; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 isFloatParam = varDsc->lvIsParam && varTypeIsFloating(typ); #endif @@ -956,9 +956,9 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, assertion.op2.vn = ValueNumStore::VNForNull(); assertion.op2.u1.iconVal = 0; assertion.op2.u1.iconFlags = 0; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT assertion.op2.u1.iconFlags |= 1; // Signify that this is really TYP_LONG -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } // // Are we making an assertion about a local variable? @@ -1080,7 +1080,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, if (op2->gtOper == GT_CNS_INT) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Do not Constant-Prop large constants for ARM // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had // target_ssize_t type. @@ -1088,15 +1088,15 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, { goto DONE_ASSERTION; // Don't make an assertion } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM assertion.op2.u1.iconVal = op2->AsIntCon()->gtIconVal; assertion.op2.u1.iconFlags = op2->GetIconHandleFlag(); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (op2->TypeGet() == TYP_LONG || op2->TypeGet() == TYP_BYREF) { assertion.op2.u1.iconFlags |= 1; // Signify that this is really TYP_LONG } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } else if (op2->gtOper == GT_CNS_LNG) { @@ -1243,10 +1243,10 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case TYP_UINT: case TYP_INT: -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT assertion.op2.u2.loBound = AssertionDsc::GetLowerBoundForIntegralType(toType); assertion.op2.u2.hiBound = AssertionDsc::GetUpperBoundForIntegralType(toType); break; @@ -1318,12 +1318,12 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, /* iconFlags should only contain bits in GTF_ICON_HDL_MASK */ assert((iconFlags & ~GTF_ICON_HDL_MASK) == 0); assertion.op2.u1.iconFlags = iconFlags; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (op2->AsOp()->gtOp1->TypeGet() == TYP_LONG) { assertion.op2.u1.iconFlags |= 1; // Signify that this is really TYP_LONG } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } // JIT case else if (optIsTreeKnownIntValue(!optLocalAssertionProp, op2, &cnsValue, &iconFlags)) @@ -1336,12 +1336,12 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, /* iconFlags should only contain bits in GTF_ICON_HDL_MASK */ assert((iconFlags & ~GTF_ICON_HDL_MASK) == 0); assertion.op2.u1.iconFlags = iconFlags; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (op2->TypeGet() == TYP_LONG) { assertion.op2.u1.iconFlags |= 1; // Signify that this is really TYP_LONG } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } else { @@ -1395,7 +1395,7 @@ bool Compiler::optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pCon *pFlags = tree->GetIconHandleFlag(); return true; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Just to be clear, get it from gtLconVal rather than // overlapping gtIconVal. else if (tree->OperGet() == GT_CNS_LNG) @@ -1424,7 +1424,7 @@ bool Compiler::optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pCon *pFlags = vnStore->IsVNHandle(vn) ? vnStore->GetHandleFlags(vn) : 0; return true; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT else if (vnType == TYP_LONG) { *pConstant = vnStore->ConstantValue(vn); @@ -2108,7 +2108,7 @@ void Compiler::optAssertionGen(GenTree* tree) { // Retrieve the 'this' arg GenTree* thisArg = gtGetThisArg(tree->AsCall()); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM) if (thisArg == nullptr) { // For tail calls we lose the this pointer in the argument list but that's OK because a null check @@ -2116,7 +2116,7 @@ void Compiler::optAssertionGen(GenTree* tree) noway_assert(tree->AsCall()->IsTailCall()); break; } -#endif // _TARGET_X86_ || _TARGET_AMD64_ || _TARGET_ARM_ +#endif // TARGET_X86 || TARGET_AMD64 || TARGET_ARM noway_assert(thisArg != nullptr); assertionInfo = optCreateAssertion(thisArg, nullptr, OAK_NOT_EQUAL); } @@ -2444,7 +2444,7 @@ GenTree* Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTree* tree) { INT64 value = vnStore->ConstantValue(vnCns); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (vnStore->IsVNHandle(vnCns)) { // Don't perform constant folding that involves a handle that needs @@ -2502,7 +2502,7 @@ GenTree* Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTree* tree) case TYP_INT: { int value = vnStore->ConstantValue(vnCns); -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (vnStore->IsVNHandle(vnCns)) { // Don't perform constant folding that involves a handle that needs @@ -2695,7 +2695,7 @@ GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion, // Constant ints are of type TYP_INT, not any of the short forms. if (varTypeIsIntegral(newTree->TypeGet())) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT var_types newType = (var_types)((curAssertion->op2.u1.iconFlags & 1) ? TYP_LONG : TYP_INT); if (newTree->TypeGet() != newType) { @@ -3441,7 +3441,7 @@ GenTree* Compiler::optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenT { constantIsEqual = (curAssertion->op2.u1.iconVal == cnsVal); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT else if (genTypeSize(cmpType) == sizeof(INT32)) { // Compare the low 32-bits only diff --git a/src/coreclr/src/jit/bitsetasshortlong.h b/src/coreclr/src/jit/bitsetasshortlong.h index 17e0e3a69cbaa..c22eefcb6f0df 100644 --- a/src/coreclr/src/jit/bitsetasshortlong.h +++ b/src/coreclr/src/jit/bitsetasshortlong.h @@ -511,7 +511,7 @@ class BitSetOpsbbJumpKind == BBJ_CALLFINALLY) #else if ((this->bbJumpKind == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) #endif { -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // On ARM, there are no retless BBJ_CALLFINALLY. assert(!(this->bbFlags & BBF_RETLESS_CALL)); #endif diff --git a/src/coreclr/src/jit/block.h b/src/coreclr/src/jit/block.h index e783e258294af..15897374c565d 100644 --- a/src/coreclr/src/jit/block.h +++ b/src/coreclr/src/jit/block.h @@ -416,7 +416,7 @@ struct BasicBlock : private LIR::Range #define BBF_HAS_NEWARRAY 0x00400000 // BB contains 'new' of an array #define BBF_HAS_NEWOBJ 0x00800000 // BB contains 'new' of an object type. -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #define BBF_FINALLY_TARGET 0x01000000 // BB is the target of a finally return: where a finally will return during // non-exceptional flow. Because the ARM calling sequence for calling a @@ -425,7 +425,7 @@ struct BasicBlock : private LIR::Range // generate correct code at the finally target, to allow for proper stack // unwind from within a non-exceptional call to a finally. -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #define BBF_BACKWARD_JUMP 0x02000000 // BB is surrounded by a backward jump/switch arc #define BBF_RETLESS_CALL 0x04000000 // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired @@ -966,9 +966,9 @@ struct BasicBlock : private LIR::Range void* bbEmitCookie; -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) void* bbUnwindNopEmitCookie; -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef VERIFIER stackDesc bbStackIn; // stack descriptor for input diff --git a/src/coreclr/src/jit/codegen.h b/src/coreclr/src/jit/codegen.h index e0d2b4da8ec31..ad74a0b79adea 100644 --- a/src/coreclr/src/jit/codegen.h +++ b/src/coreclr/src/jit/codegen.h @@ -15,7 +15,7 @@ #include "regset.h" #include "jitgcinfo.h" -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_ARM_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) #define FOREACH_REGISTER_FILE(file) \ for ((file) = &(this->intRegState); (file) != NULL; \ (file) = ((file) == &(this->intRegState)) ? &(this->floatRegState) : NULL) @@ -46,7 +46,7 @@ class CodeGen : public CodeGenInterface ssize_t* cnsPtr); private: -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Bit masks used in negating a float or double number. // This is to avoid creating more than one data constant for these bitmasks when a // method has more than one GT_NEG operation on floating point values. @@ -65,7 +65,7 @@ class CodeGen : public CodeGenInterface // Generates SSE41 code for the given tree as a round operation void genSSE41RoundOp(GenTreeOp* treeNode); -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) void genPrepForCompiler(); @@ -102,7 +102,7 @@ class CodeGen : public CodeGenInterface void genRangeCheck(GenTree* node); void genLockedInstructions(GenTreeOp* node); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH void genCodeForLockAdd(GenTreeOp* node); #endif @@ -254,7 +254,7 @@ class CodeGen : public CodeGenInterface void genClearStackVec3ArgUpperBits(); #endif // UNIX_AMD64_ABI && FEATURE_SIMD -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) bool genInstrWithConstant(instruction ins, emitAttr attr, regNumber reg1, @@ -320,7 +320,7 @@ class CodeGen : public CodeGenInterface void genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) bool genInstrWithConstant( instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insFlags flags, regNumber tmpReg); @@ -357,7 +357,7 @@ class CodeGen : public CodeGenInterface FuncletFrameInfoDsc genFuncletInfo; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // A set of information that is used by funclet prolog and epilog generation. It is collected once, before // funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the @@ -378,7 +378,7 @@ class CodeGen : public CodeGenInterface FuncletFrameInfoDsc genFuncletInfo; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // A set of information that is used by funclet prolog and epilog generation. It is collected once, before // funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the @@ -392,9 +392,9 @@ class CodeGen : public CodeGenInterface FuncletFrameInfoDsc genFuncletInfo; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Save/Restore callee saved float regs to stack void genPreserveCalleeSavedFltRegs(unsigned lclFrameSize); @@ -402,7 +402,7 @@ class CodeGen : public CodeGenInterface // Generate VZeroupper instruction to avoid AVX/SSE transition penalty void genVzeroupperIfNeeded(bool check256bitOnly = true); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH void genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& initDblRegs, const regNumber& initReg); @@ -452,19 +452,19 @@ class CodeGen : public CodeGenInterface // CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog); #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) void genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog); -#else // !defined(_TARGET_ARM64_) +#else // !defined(TARGET_ARM64) void genPopCalleeSavedRegisters(bool jmpEpilog = false); -#endif // !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_ARM64) // // Common or driving functions @@ -486,7 +486,7 @@ class CodeGen : public CodeGenInterface void genSetPSPSym(regNumber initReg, bool* pInitRegZeroed); void genUpdateCurrentFunclet(BasicBlock* block); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) void genInsertNopForUnwinder(BasicBlock* block); #endif @@ -498,7 +498,7 @@ class CodeGen : public CodeGenInterface return; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) void genInsertNopForUnwinder(BasicBlock* block) { return; @@ -509,19 +509,19 @@ class CodeGen : public CodeGenInterface void genGeneratePrologsAndEpilogs(); -#if defined(DEBUG) && defined(_TARGET_ARM64_) +#if defined(DEBUG) && defined(TARGET_ARM64) void genArm64EmitterUnitTests(); #endif -#if defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_) +#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64) void genAmd64EmitterUnitTests(); #endif -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 virtual void SetSaveFpLrWithAllCalleeSavedRegisters(bool value); virtual bool IsSaveFpLrWithAllCalleeSavedRegisters() const; bool genSaveFpLrWithAllCalleeSavedRegisters; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 //------------------------------------------------------------------------- // @@ -811,9 +811,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genCodeForTreeNode(GenTree* treeNode); void genCodeForBinary(GenTreeOp* treeNode); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) void genCodeForLongUMod(GenTreeOp* node); -#endif // _TARGET_X86_ +#endif // TARGET_X86 void genCodeForDivMod(GenTreeOp* treeNode); void genCodeForMul(GenTreeOp* treeNode); @@ -821,15 +821,15 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genLeaInstruction(GenTreeAddrMode* lea); void genSetRegToCond(regNumber dstReg, GenTree* tree); -#if defined(_TARGET_ARMARCH_) +#if defined(TARGET_ARMARCH) void genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale); -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) void genCodeForMulLong(GenTreeMultiRegOp* treeNode); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) void genLongToIntCast(GenTree* treeNode); #endif @@ -840,7 +840,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX CHECK_NONE, CHECK_SMALL_INT_RANGE, CHECK_POSITIVE, -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT CHECK_UINT_RANGE, CHECK_POSITIVE_INT_RANGE, CHECK_INT_RANGE, @@ -852,7 +852,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX COPY, ZERO_EXTEND_SMALL_INT, SIGN_EXTEND_SMALL_INT, -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT ZERO_EXTEND_INT, SIGN_EXTEND_INT, #endif @@ -917,9 +917,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genPutArgSplit(GenTreePutArgSplit* treeNode); #endif // FEATURE_ARG_SPLIT -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) unsigned getBaseVarForPutArgStk(GenTree* treeNode); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH unsigned getFirstArgWithStackSlot(); @@ -933,7 +933,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX SMT_PreserveUpper // preserve target upper bits }; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 insOpts genGetSimdInsOpt(emitAttr size, var_types elementType); #endif instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival = nullptr); @@ -973,15 +973,15 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genLoadIndTypeSIMD12(GenTree* treeNode); void genStoreLclTypeSIMD12(GenTree* treeNode); void genLoadLclTypeSIMD12(GenTree* treeNode); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 void genStoreSIMD12ToStack(regNumber operandReg, regNumber tmpReg); void genPutArgStkSIMD12(GenTree* treeNode); -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void genHWIntrinsic(GenTreeHWIntrinsic* node); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) void genHWIntrinsic_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber reg, GenTree* rmOp); void genHWIntrinsic_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, int8_t ival); void genHWIntrinsic_R_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr); @@ -1010,16 +1010,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX regNumber baseReg, regNumber offsReg, HWIntrinsicSwitchCaseBody emitSwCase); -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) // CodeGen for Long Ints void genStoreLongLclVar(GenTree* treeNode); -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) // Do liveness update for register produced by the current node in codegen after // code has been emitted for it. @@ -1065,14 +1065,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genSetRegToIcon(regNumber reg, ssize_t val, var_types type = TYP_INT, insFlags flags = INS_FLAGS_DONT_CARE); void genCodeForShift(GenTree* tree); -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) void genCodeForShiftLong(GenTree* tree); #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH void genCodeForShiftRMW(GenTreeStoreInd* storeInd); void genCodeForBT(GenTreeOp* bt); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH void genCodeForCast(GenTreeOp* tree); void genCodeForLclAddr(GenTree* tree); @@ -1092,7 +1092,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genCodeForCpObj(GenTreeObj* cpObjNode); void genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode); void genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 void genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode); #endif void genCodeForPhysReg(GenTreePhysReg* tree); @@ -1137,16 +1137,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 void genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArgVarNum); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 #ifdef FEATURE_PUT_STRUCT_ARG_STK -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 bool genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk); void genPushReg(var_types type, regNumber srcReg); void genPutArgStkFieldList(GenTreePutArgStk* putArgStk); -#endif // _TARGET_X86_ +#endif // TARGET_X86 void genPutStructArgStk(GenTreePutArgStk* treeNode); @@ -1161,7 +1161,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // FEATURE_PUT_STRUCT_ARG_STK void genCodeForStoreBlk(GenTreeBlk* storeBlkNode); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 void genCodeForInitBlkHelper(GenTreeBlk* initBlkNode); #endif void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode); @@ -1176,9 +1176,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genJmpMethod(GenTree* jmp); BasicBlock* genCallFinally(BasicBlock* block); void genCodeForJumpTrue(GenTreeOp* jtrue); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 void genCodeForJumpCompare(GenTreeOp* tree); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #if defined(FEATURE_EH_FUNCLETS) void genEHCatchRet(BasicBlock* block); @@ -1192,32 +1192,32 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool isStructReturn(GenTree* treeNode); void genStructReturn(GenTree* treeNode); -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) void genLongReturn(GenTree* treeNode); -#endif // _TARGET_X86_ || _TARGET_ARM_ +#endif // TARGET_X86 || TARGET_ARM -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) void genFloatReturn(GenTree* treeNode); -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) void genSimpleReturn(GenTree* treeNode); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 void genReturn(GenTree* treeNode); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH void genStackPointerConstantAdjustment(ssize_t spDelta); -#else // !_TARGET_ARMARCH_ +#else // !TARGET_ARMARCH void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp); -#endif // !_TARGET_ARMARCH_ +#endif // !TARGET_ARMARCH void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp); target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp); -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) void genLclHeap(GenTree* tree); @@ -1232,17 +1232,17 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX } #ifdef FEATURE_PUT_STRUCT_ARG_STK -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 bool m_pushStkArg; -#else // !_TARGET_X86_ +#else // !TARGET_X86 unsigned m_stkArgVarNum; unsigned m_stkArgOffset; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 #endif // !FEATURE_PUT_STRUCT_ARG_STK -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) void genStackPointerCheck(bool doStackPointerCheck, unsigned lvaStackPointerVar); -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(DEBUG) && defined(TARGET_XARCH) #ifdef DEBUG GenTree* lastConsumedNode; @@ -1272,7 +1272,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void instInit(); void instGen(instruction ins); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH void instNop(unsigned size); #endif @@ -1333,7 +1333,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void inst_TT_CL(instruction ins, GenTree* tree, unsigned offs = 0); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) void inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival); void inst_RV_TT_IV(instruction ins, emitAttr attr, regNumber reg1, GenTree* rmOp, int ival); void inst_RV_RV_TT(instruction ins, emitAttr size, regNumber targetReg, regNumber op1Reg, GenTree* op2, bool isRMW); @@ -1347,7 +1347,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void inst_set_SV_var(GenTree* tree); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool arm_Valid_Imm_For_Instr(instruction ins, target_ssize_t imm, insFlags flags); bool arm_Valid_Disp_For_LdSt(target_ssize_t disp, var_types type); bool arm_Valid_Imm_For_Alu(target_ssize_t imm); @@ -1375,7 +1375,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void instGen_Return(unsigned stkArgSize); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 void instGen_MemoryBarrier(insBarrier barrierType = INS_BARRIER_ISH); #else void instGen_MemoryBarrier(); @@ -1399,9 +1399,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void __cdecl instDisp(instruction ins, bool noNL, const char* fmt, ...); #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH instruction genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH // Maps a GenCondition code to a sequence of conditional jumps or other conditional instructions // such as X86's SETcc. A sequence of instructions rather than just a single one is required for @@ -1458,7 +1458,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH /***************************************************************************** * * Generate a floating-point instruction that has one operand given by diff --git a/src/coreclr/src/jit/codegenarm.cpp b/src/coreclr/src/jit/codegenarm.cpp index c1d4dbdae2518..8522e287b90ff 100644 --- a/src/coreclr/src/jit/codegenarm.cpp +++ b/src/coreclr/src/jit/codegenarm.cpp @@ -15,7 +15,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #include "codegen.h" #include "lower.h" #include "gcinfo.h" @@ -1848,4 +1848,4 @@ void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pIni #endif // USING_SCOPE_INFO } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM diff --git a/src/coreclr/src/jit/codegenarm64.cpp b/src/coreclr/src/jit/codegenarm64.cpp index dc66f0f6befa9..39b81bc1dc723 100644 --- a/src/coreclr/src/jit/codegenarm64.cpp +++ b/src/coreclr/src/jit/codegenarm64.cpp @@ -15,7 +15,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 #include "emit.h" #include "codegen.h" #include "lower.h" @@ -7984,4 +7984,4 @@ void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pIni } } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 diff --git a/src/coreclr/src/jit/codegenarmarch.cpp b/src/coreclr/src/jit/codegenarmarch.cpp index bf822a3e9896e..ab0e11f1a03df 100644 --- a/src/coreclr/src/jit/codegenarmarch.cpp +++ b/src/coreclr/src/jit/codegenarmarch.cpp @@ -15,7 +15,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_ARMARCH_ // This file is ONLY used for ARM and ARM64 architectures +#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures #include "codegen.h" #include "lower.h" @@ -203,12 +203,12 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) __fallthrough; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) case GT_ADD: case GT_SUB: @@ -225,14 +225,14 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genCodeForShift(treeNode); break; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_LSH_HI: case GT_RSH_LO: genCodeForShiftLong(treeNode); break; -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) case GT_CAST: genCodeForCast(treeNode->AsOp()); @@ -243,9 +243,9 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) GenTree* op1 = treeNode->AsOp()->gtOp1; if (varTypeIsFloating(treeNode) != varTypeIsFloating(op1)) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 inst_RV_RV(INS_fmov, targetReg, genConsumeReg(op1), targetType); -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 if (varTypeIsFloating(treeNode)) { // GT_BITCAST on ARM is only used to cast floating-point arguments to integer @@ -268,7 +268,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) inst_RV_RV_RV(INS_vmov_d2i, targetReg, otherReg, genConsumeReg(op1), EA_8BYTE); } } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 } else { @@ -316,13 +316,13 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genCodeForIndir(treeNode->AsIndir()); break; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM case GT_MUL_LONG: genCodeForMulLong(treeNode->AsMultiRegOp()); break; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case GT_MULHI: genCodeForMulHi(treeNode->AsOp()); @@ -331,7 +331,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) case GT_SWAP: genCodeForSwap(treeNode->AsOp()); break; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 case GT_JMP: genJmpMethod(treeNode); @@ -364,10 +364,10 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) case GT_GE: case GT_GT: case GT_CMP: -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case GT_TEST_EQ: case GT_TEST_NE: -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 genCodeForCompare(treeNode->AsOp()); break; @@ -375,11 +375,11 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genCodeForJumpTrue(treeNode->AsOp()); break; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case GT_JCMP: genCodeForJumpCompare(treeNode->AsOp()); break; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 case GT_JCC: genCodeForJcc(treeNode->AsCC()); @@ -429,7 +429,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) instGen_MemoryBarrier(); break; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case GT_XCHG: case GT_XADD: genLockedInstructions(treeNode->AsOp()); @@ -438,7 +438,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) case GT_CMPXCHG: genCodeForCmpXchg(treeNode->AsCmpXchg()); break; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 case GT_RELOAD: // do nothing - reload is just a marker. @@ -528,7 +528,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genCodeForArrOffset(treeNode->AsArrOffs()); break; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM case GT_CLS_VAR_ADDR: emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0); @@ -540,7 +540,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genConsumeRegs(treeNode); break; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM case GT_IL_OFFSET: // Do nothing; these nodes are simply markers for debug info. @@ -642,7 +642,7 @@ void CodeGen::genIntrinsic(GenTree* treeNode) GetEmitter()->emitInsBinary(INS_ABS, emitActualTypeSize(treeNode), treeNode, srcNode); break; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case CORINFO_INTRINSIC_Ceiling: genConsumeOperands(treeNode->AsOp()); GetEmitter()->emitInsBinary(INS_frintp, emitActualTypeSize(treeNode), treeNode, srcNode); @@ -657,7 +657,7 @@ void CodeGen::genIntrinsic(GenTree* treeNode) genConsumeOperands(treeNode->AsOp()); GetEmitter()->emitInsBinary(INS_frintn, emitActualTypeSize(treeNode), treeNode, srcNode); break; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 case CORINFO_INTRINSIC_Sqrt: genConsumeOperands(treeNode->AsOp()); @@ -755,12 +755,12 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) // If it is contained then source must be the integer constant zero if (source->isContained()) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 assert(source->OperGet() == GT_CNS_INT); assert(source->AsIntConCommon()->IconValue() == 0); emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut); -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 // There is no zero register on ARM32 unreached(); #endif // !_TARGET_ARM64 @@ -769,7 +769,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) { genConsumeReg(source); emit->emitIns_S_R(storeIns, storeAttr, source->GetRegNum(), varNumOut, argOffsetOut); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (targetType == TYP_LONG) { // This case currently only occurs for double types that are passed as TYP_LONG; @@ -780,7 +780,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) argOffsetOut += EA_4BYTE; emit->emitIns_S_R(storeIns, storeAttr, otherReg, varNumOut, argOffsetOut); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } argOffsetOut += EA_SIZE_IN_BYTES(storeAttr); assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area @@ -805,9 +805,9 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) // Setup loReg (and hiReg) from the internal registers that we reserved in lower. // regNumber loReg = treeNode->ExtractTempReg(); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 regNumber hiReg = treeNode->GetSingleTempReg(); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 regNumber addrReg = REG_NA; GenTreeLclVarCommon* varNode = nullptr; @@ -843,7 +843,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) genConsumeAddress(addrNode); addrReg = addrNode->GetRegNum(); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // If addrReg equal to loReg, swap(loReg, hiReg) // This reduces code complexity by only supporting one addrReg overwrite case if (loReg == addrReg) @@ -851,7 +851,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) loReg = hiReg; hiReg = addrReg; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } } @@ -915,20 +915,20 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) { noway_assert(!layout->HasGCPtr()); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 else { noway_assert(structSize <= 2 * TARGET_POINTER_SIZE); } noway_assert(structSize <= MAX_PASS_MULTIREG_BYTES); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 int remainingSize = structSize; unsigned structOffset = 0; unsigned nextIndex = 0; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // For a >= 16-byte structSize we will generate a ldp and stp instruction each loop // ldp x2, x3, [x0] // stp x2, x3, [sp, #16] @@ -965,7 +965,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) structOffset += (2 * TARGET_POINTER_SIZE); nextIndex += 2; } -#else // _TARGET_ARM_ +#else // TARGET_ARM // For a >= 4 byte structSize we will generate a ldr and str instruction each loop // ldr r2, [r0] // str r2, [sp, #16] @@ -996,7 +996,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) structOffset += TARGET_POINTER_SIZE; nextIndex += 1; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM // For a 12-byte structSize we will we will generate two load instructions // ldr x2, [x0] @@ -1151,7 +1151,7 @@ void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode) { var_types type = treeNode->GetRegType(regIndex); regNumber argReg = treeNode->GetRegNumByIdx(regIndex); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (type == TYP_LONG) { // We should only see long fields for DOUBLEs passed in 2 integer registers, via bitcast. @@ -1169,7 +1169,7 @@ void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode) assert(argReg == treeNode->GetRegNumByIdx(regIndex)); fieldReg = nextArgNode->AsMultiRegOp()->GetRegNumByIdx(1); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM // If child node is not already in the register we need, move it if (argReg != fieldReg) @@ -1337,11 +1337,11 @@ void CodeGen::genMultiRegCallStoreToLocal(GenTree* treeNode) { assert(treeNode->OperGet() == GT_STORE_LCL_VAR); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Longs are returned in two return registers on Arm32. // Structs are returned in four registers on ARM32 and HFAs. assert(varTypeIsLong(treeNode) || varTypeIsStruct(treeNode)); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Structs of size >=9 and <=16 are returned in two return registers on ARM64 and HFAs. assert(varTypeIsStruct(treeNode)); #endif // _TARGET_* @@ -1531,7 +1531,7 @@ void CodeGen::genCodeForNullCheck(GenTreeOp* tree) assert(!tree->gtOp1->isContained()); regNumber addrReg = genConsumeReg(tree->gtOp1); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 regNumber targetReg = REG_ZR; #else regNumber targetReg = tree->GetSingleTempReg(); @@ -1860,7 +1860,7 @@ void CodeGen::genCodeForIndir(GenTreeIndir* tree) if ((tree->gtFlags & GTF_IND_VOLATILE) != 0) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 bool addrIsInReg = tree->Addr()->isUsedFromReg(); bool addrIsAligned = ((tree->gtFlags & GTF_IND_UNALIGNED) == 0); @@ -1877,7 +1877,7 @@ void CodeGen::genCodeForIndir(GenTreeIndir* tree) ins = INS_ldar; } else -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 { emitBarrier = true; } @@ -1887,7 +1887,7 @@ void CodeGen::genCodeForIndir(GenTreeIndir* tree) if (emitBarrier) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 instGen_MemoryBarrier(INS_BARRIER_OSHLD); #else instGen_MemoryBarrier(); @@ -1923,13 +1923,13 @@ void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode) if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // issue a INS_BARRIER_ISHLD after a volatile CpBlk operation instGen_MemoryBarrier(INS_BARRIER_ISHLD); #else // issue a full memory barrier after a volatile CpBlk operation instGen_MemoryBarrier(); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } } @@ -1985,7 +1985,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node) } else { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 assert(src->IsIntegralConst(0)); srcReg = REG_ZR; #else @@ -2004,7 +2004,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node) assert(size <= INT32_MAX); assert(dstOffset < INT32_MAX - static_cast(size)); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 for (unsigned regSize = 2 * REGSIZE_BYTES; size >= regSize; size -= regSize, dstOffset += regSize) { if (dstLclNum != BAD_VAR_NUM) @@ -2039,7 +2039,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node) attr = EA_4BYTE; break; case 4: -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case 8: #endif storeIns = INS_str; @@ -2162,7 +2162,7 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node) regNumber tempReg = node->ExtractTempReg(RBM_ALLINT); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (size >= 2 * REGSIZE_BYTES) { regNumber tempReg2 = node->ExtractTempReg(RBM_ALLINT); @@ -2215,7 +2215,7 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node) attr = EA_4BYTE; break; case 4: -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case 8: #endif loadIns = INS_ldr; @@ -2247,7 +2247,7 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node) if (node->IsVolatile()) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 instGen_MemoryBarrier(INS_BARRIER_ISHLD); #else instGen_MemoryBarrier(); @@ -2331,9 +2331,9 @@ void CodeGen::genRegCopy(GenTree* treeNode) bool tgtFltReg = (varTypeIsFloating(treeNode) || varTypeIsSIMD(treeNode)); if (srcFltReg != tgtFltReg) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 inst_RV_RV(INS_fmov, targetReg, sourceReg, targetType); -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 if (varTypeIsFloating(treeNode)) { // GT_COPY from 'int' to 'float' currently can't happen. Maybe if ARM SIMD is implemented @@ -2355,7 +2355,7 @@ void CodeGen::genRegCopy(GenTree* treeNode) inst_RV_RV_RV(INS_vmov_d2i, targetReg, otherReg, genConsumeReg(op1), EA_8BYTE); } } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 } else { @@ -2450,13 +2450,13 @@ void CodeGen::genCallInstruction(GenTreeCall* call) argReg = genRegArgNext(argReg); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // A double register is modelled as an even-numbered single one if (putArgRegNode->TypeGet() == TYP_DOUBLE) { argReg = genRegArgNext(argReg); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } } #if FEATURE_ARG_SPLIT @@ -2491,10 +2491,10 @@ void CodeGen::genCallInstruction(GenTreeCall* call) { const regNumber regThis = genGetThisArgReg(call); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) const regNumber tmpReg = call->ExtractTempReg(); GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0); #endif // _TARGET_* } @@ -2637,7 +2637,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) assert(addr != nullptr); // Non-virtual direct call to known addresses -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (!arm_Valid_Imm_For_BL((ssize_t)addr)) { regNumber tmpReg = call->GetSingleTempReg(); @@ -2645,13 +2645,13 @@ void CodeGen::genCallInstruction(GenTreeCall* call) genEmitCall(emitter::EC_INDIR_R, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) NULL, retSize, ilOffset, tmpReg); } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { genEmitCall(emitter::EC_FUNC_TOKEN, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr, retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), ilOffset); } -#if 0 && defined(_TARGET_ARM64_) +#if 0 && defined(TARGET_ARM64) // Use this path if you want to load an absolute call target using // a sequence of movs followed by an indirect call (blr instruction) // If this path is enabled, we need to ensure that REG_IP0 is assigned during Lowering. @@ -2715,7 +2715,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) } else { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) { // The CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with @@ -2727,7 +2727,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) returnReg = REG_INTRET; } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (varTypeUsesFloatArgReg(returnType)) { returnReg = REG_FLOATRET; @@ -2739,7 +2739,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) if (call->GetRegNum() != returnReg) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (compiler->opts.compUseSoftFP && returnType == TYP_DOUBLE) { inst_RV_RV_RV(INS_vmov_i2d, call->GetRegNum(), returnReg, genRegArgNext(returnReg), EA_8BYTE); @@ -2831,7 +2831,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) var_types storeType = genActualType(varDsc->TypeGet()); emitAttr storeSize = emitActualTypeSize(storeType); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_LONG) { // long - at least the low half must be enregistered @@ -2844,7 +2844,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) } } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { GetEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->GetRegNum(), varNum, 0); } @@ -2893,7 +2893,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) regNumber argReg = varDsc->GetArgReg(); // incoming arg register regNumber argRegNext = REG_NA; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (varDsc->GetRegNum() != argReg) { var_types loadType = TYP_UNDEF; @@ -2984,7 +2984,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) } } -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 bool twoParts = false; var_types loadType = TYP_UNDEF; @@ -3082,7 +3082,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) { VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex); } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 } // Jmp call to a vararg method - if the method has fewer than fixed arguments that can be max size of reg, @@ -3138,7 +3138,7 @@ void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& d genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW); break; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case GenIntCastDesc::CHECK_UINT_RANGE: // We need to check if the value is not greater than 0xFFFFFFFF but this value // cannot be encoded in the immediate operand of CMP. Use TST instead to check @@ -3245,7 +3245,7 @@ void CodeGen::genIntToIntCast(GenTreeCast* cast) ins = (desc.ExtendSrcSize() == 1) ? INS_sxtb : INS_sxth; insSize = 4; break; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case GenIntCastDesc::ZERO_EXTEND_INT: ins = INS_mov; insSize = 4; @@ -3304,7 +3304,7 @@ void CodeGen::genFloatToFloatCast(GenTree* treeNode) // treeNode must be a reg assert(!treeNode->isContained()); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (srcType != dstType) { @@ -3318,7 +3318,7 @@ void CodeGen::genFloatToFloatCast(GenTree* treeNode) GetEmitter()->emitIns_R_R(INS_vmov, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum()); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (srcType != dstType) { @@ -3366,7 +3366,7 @@ void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, // Now we can actually use those slot ID's to declare live ranges. gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (compiler->opts.compDbgEnC) { @@ -3392,7 +3392,7 @@ void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize); } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 if (compiler->opts.IsReversePInvoke()) { @@ -3464,7 +3464,7 @@ void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstRe assert(varTypeIsIntegral(type)); assert(genIsValidIntReg(dstReg)); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 const GenConditionDesc& desc = GenConditionDesc::Get(condition); inst_SET(desc.jumpKind1, dstReg); @@ -3575,9 +3575,9 @@ void CodeGen::genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg else { // target = base + index<emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_FLAGS_DONT_CARE, INS_OPTS_LSL); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_OPTS_LSL); #endif } @@ -3725,7 +3725,7 @@ bool CodeGen::isStructReturn(GenTree* treeNode) noway_assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT); var_types returnType = treeNode->TypeGet(); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 return varTypeIsStruct(returnType) && (compiler->info.compRetNativeType == TYP_STRUCT); #else return varTypeIsStruct(returnType); @@ -3786,7 +3786,7 @@ void CodeGen::genStructReturn(GenTree* treeNode) // Handle SIMD genStructReturn case NYI_ARM("SIMD genStructReturn"); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 genConsumeRegs(op1); regNumber src = op1->GetRegNum(); @@ -3816,7 +3816,7 @@ void CodeGen::genStructReturn(GenTree* treeNode) GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), reg, src, i); } } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } } else // op1 must be multi-reg GT_CALL @@ -3967,4 +3967,4 @@ void CodeGen::genStructReturn(GenTree* treeNode) } // op1 must be multi-reg GT_CALL } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH diff --git a/src/coreclr/src/jit/codegencommon.cpp b/src/coreclr/src/jit/codegencommon.cpp index edc9e0564de2a..92f8fed1e1596 100644 --- a/src/coreclr/src/jit/codegencommon.cpp +++ b/src/coreclr/src/jit/codegencommon.cpp @@ -95,15 +95,15 @@ CodeGenInterface::CodeGenInterface(Compiler* theCompiler) CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) negBitmaskFlt = nullptr; negBitmaskDbl = nullptr; absBitmaskFlt = nullptr; absBitmaskDbl = nullptr; u8ToDblBitmask = nullptr; -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) -#if defined(FEATURE_PUT_STRUCT_ARG_STK) && !defined(_TARGET_X86_) +#if defined(FEATURE_PUT_STRUCT_ARG_STK) && !defined(TARGET_X86) m_stkArgVarNum = BAD_VAR_NUM; #endif @@ -136,19 +136,19 @@ CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler) // Shouldn't be used before it is set in genFnProlog() compiler->compCalleeRegsPushed = UninitializedWord(compiler); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Shouldn't be used before it is set in genFnProlog() compiler->compCalleeFPRegsSavedMask = (regMaskTP)-1; -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) #endif // DEBUG -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // This will be set before final frame layout. compiler->compVSQuirkStackPaddingNeeded = 0; // Set to true if we perform the Quirk that fixes the PPP issue compiler->compQuirkForPPPflag = false; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // Initialize the IP-mapping logic. compiler->genIPmappingList = nullptr; @@ -158,17 +158,17 @@ CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler) /* Assume that we not fully interruptible */ SetInterruptible(false); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH SetHasTailCalls(false); -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH #ifdef DEBUG genInterruptibleUsed = false; genCurDispOffset = (unsigned)-1; #endif -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 genSaveFpLrWithAllCalleeSavedRegisters = false; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } void CodeGenInterface::genMarkTreeInReg(GenTree* tree, regNumber reg) @@ -176,7 +176,7 @@ void CodeGenInterface::genMarkTreeInReg(GenTree* tree, regNumber reg) tree->SetRegNum(reg); } -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) //--------------------------------------------------------------------- // genTotalFrameSize - return the "total" size of the stack frame, including local size @@ -232,11 +232,11 @@ int CodeGenInterface::genCallerSPtoFPdelta() const assert(isFramePointerUsed()); int callerSPtoFPdelta = 0; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // On ARM, we first push the prespill registers, then store LR, then R11 (FP), and point R11 at the saved R11. callerSPtoFPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; callerSPtoFPdelta -= 2 * REGSIZE_BYTES; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) // Thanks to ebp chaining, the difference between ebp-based addresses // and caller-SP-relative addresses is just the 2 pointers: // return address @@ -259,10 +259,10 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta() const { int callerSPtoSPdelta = 0; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) callerSPtoSPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; callerSPtoSPdelta -= genTotalFrameSize(); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) callerSPtoSPdelta -= genTotalFrameSize(); callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address @@ -280,7 +280,7 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta() const return callerSPtoSPdelta; } -#endif // defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#endif // defined(TARGET_X86) || defined(TARGET_ARM) /***************************************************************************** * Should we round simple operations (assignments, arithmetic operations, etc.) @@ -546,18 +546,18 @@ regMaskTP Compiler::compHelperCallKillSet(CorInfoHelpFunc helper) switch (helper) { case CORINFO_HELP_ASSIGN_BYREF: -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) return RBM_RSI | RBM_RDI | RBM_CALLEE_TRASH_NOGC; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) return RBM_CALLEE_TRASH_WRITEBARRIER_BYREF; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) return RBM_ESI | RBM_EDI | RBM_ECX; #else NYI("Model kill set for CORINFO_HELP_ASSIGN_BYREF on target arch"); return RBM_CALLEE_TRASH; #endif -#if defined(_TARGET_ARMARCH_) +#if defined(TARGET_ARMARCH) case CORINFO_HELP_ASSIGN_REF: case CORINFO_HELP_CHECKED_ASSIGN_REF: return RBM_CALLEE_TRASH_WRITEBARRIER; @@ -584,7 +584,7 @@ regMaskTP Compiler::compHelperCallKillSet(CorInfoHelpFunc helper) NYI("Model kill set for CORINFO_HELP_PROF_FCN_TAILCALL on target arch"); #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case CORINFO_HELP_ASSIGN_REF_EAX: case CORINFO_HELP_ASSIGN_REF_ECX: case CORINFO_HELP_ASSIGN_REF_EBX: @@ -809,9 +809,9 @@ TempDsc* CodeGenInterface::getSpillTempDsc(GenTree* tree) return temp; } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Returns relocation type hint for an addr. // Note that there are no reloc hints on x86. // @@ -825,7 +825,7 @@ unsigned short CodeGenInterface::genAddrRelocTypeHint(size_t addr) { return compiler->eeGetRelocTypeHint((void*)addr); } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 // Return true if an absolute indirect data address can be encoded as IP-relative. // offset. Note that this method should be used only when the caller knows that @@ -840,7 +840,7 @@ unsigned short CodeGenInterface::genAddrRelocTypeHint(size_t addr) // bool CodeGenInterface::genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32; #else // x86: PC-relative addressing is available only for control flow instructions (jmp and call) @@ -861,7 +861,7 @@ bool CodeGenInterface::genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr) // bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32; #else // x86: PC-relative addressing is available only for control flow instructions (jmp and call) @@ -901,7 +901,7 @@ bool CodeGenInterface::genCodeIndirAddrNeedsReloc(size_t addr) return true; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // See if the code indir addr can be encoded as 32-bit displacement relative to zero. // We don't need a relocation in that case. if (genCodeIndirAddrCanBeEncodedAsZeroRelOffset(addr)) @@ -911,11 +911,11 @@ bool CodeGenInterface::genCodeIndirAddrNeedsReloc(size_t addr) // Else we need a relocation. return true; -#else //_TARGET_X86_ +#else // TARGET_X86 // On x86 there is no need to record or ask for relocations during jitting, // because all addrs fit within 32-bits. return false; -#endif //_TARGET_X86_ +#endif // TARGET_X86 } // Return true if a direct code address needs to be marked as relocatable. @@ -934,17 +934,17 @@ bool CodeGenInterface::genCodeAddrNeedsReloc(size_t addr) return true; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // By default all direct code addresses go through relocation so that VM will setup // a jump stub if addr cannot be encoded as pc-relative offset. return true; -#else //_TARGET_X86_ +#else // TARGET_X86 // On x86 there is no need for recording relocations during jitting, // because all addrs fit within 32-bits. return false; -#endif //_TARGET_X86_ +#endif // TARGET_X86 } -#endif //_TARGET_XARCH_ +#endif // TARGET_XARCH /***************************************************************************** * @@ -1040,7 +1040,7 @@ void CodeGen::genDefineInlineTempLabel(BasicBlock* label) void CodeGen::genAdjustSP(target_ssize_t delta) { -#if defined(_TARGET_X86_) && !defined(UNIX_X86_ABI) +#if defined(TARGET_X86) && !defined(UNIX_X86_ABI) if (delta == sizeof(int)) inst_RV(INS_pop, REG_ECX, TYP_INT); else @@ -1086,19 +1086,19 @@ void CodeGen::genAdjustStackLevel(BasicBlock* block) if (genStackLevel != 0) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 GetEmitter()->emitMarkStackLvl(genStackLevel); inst_RV_IV(INS_add, REG_SPBASE, genStackLevel, EA_PTRSIZE); SetStackLevel(0); -#else // _TARGET_X86_ +#else // TARGET_X86 NYI("Need emitMarkStackLvl()"); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } } #endif // !FEATURE_FIXED_OUT_ARGS } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // return size // alignmentWB is out param unsigned CodeGenInterface::InferOpSizeAlign(GenTree* op, unsigned* alignmentWB) @@ -1146,13 +1146,13 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignme LclVarDsc* varDsc = compiler->lvaTable + varNum; assert(varDsc->lvType == TYP_STRUCT); opSize = varDsc->lvSize(); -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (varDsc->lvStructDoubleAlign) { alignment = TARGET_POINTER_SIZE * 2; } else -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT { alignment = TARGET_POINTER_SIZE; } @@ -1220,7 +1220,7 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignme return opSize; } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH /***************************************************************************** * @@ -1382,7 +1382,7 @@ bool CodeGen::genCreateAddrMode(GenTree* addr, cns += op2->AsIntConCommon()->IconValue(); -#if defined(_TARGET_ARMARCH_) +#if defined(TARGET_ARMARCH) if (cns == 0) #endif { @@ -1402,7 +1402,7 @@ bool CodeGen::genCreateAddrMode(GenTree* addr, goto AGAIN; -#if SCALED_ADDR_MODES && !defined(_TARGET_ARMARCH_) +#if SCALED_ADDR_MODES && !defined(TARGET_ARMARCH) // TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index. case GT_MUL: if (op1->gtOverflow()) @@ -1425,7 +1425,7 @@ bool CodeGen::genCreateAddrMode(GenTree* addr, goto FOUND_AM; } break; -#endif // SCALED_ADDR_MODES && !defined(_TARGET_ARMARCH_) +#endif // SCALED_ADDR_MODES && !defined(TARGET_ARMARCH) default: break; @@ -1446,7 +1446,7 @@ bool CodeGen::genCreateAddrMode(GenTree* addr, switch (op1->gtOper) { -#if !defined(_TARGET_ARMARCH_) +#if !defined(TARGET_ARMARCH) // TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index. case GT_ADD: @@ -1528,7 +1528,7 @@ bool CodeGen::genCreateAddrMode(GenTree* addr, noway_assert(op2); switch (op2->gtOper) { -#if !defined(_TARGET_ARMARCH_) +#if !defined(TARGET_ARMARCH) // TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index. case GT_ADD: @@ -1607,7 +1607,7 @@ bool CodeGen::genCreateAddrMode(GenTree* addr, rv1 = op1; rv2 = op2; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 assert(cns == 0); #endif @@ -1708,7 +1708,7 @@ bool CodeGen::genCreateAddrMode(GenTree* addr, return true; } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH //------------------------------------------------------------------------ // genEmitGSCookieCheck: Generate code to check that the GS cookie // wasn't thrashed by a buffer overrun. Common code for ARM32 and ARM64. @@ -1754,7 +1754,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN, regGSConst); genDefineTempLabel(gsCheckBlk); } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH /***************************************************************************** * @@ -1904,7 +1904,7 @@ void CodeGen::genCheckOverflow(GenTree* tree) emitJumpKind jumpKind; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (tree->OperGet() == GT_MUL) { jumpKind = EJ_ne; @@ -1914,11 +1914,11 @@ void CodeGen::genCheckOverflow(GenTree* tree) { bool isUnsignedOverflow = ((tree->gtFlags & GTF_UNSIGNED) != 0); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) jumpKind = isUnsignedOverflow ? EJ_jb : EJ_jo; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) jumpKind = isUnsignedOverflow ? EJ_lo : EJ_vs; @@ -1930,7 +1930,7 @@ void CodeGen::genCheckOverflow(GenTree* tree) } } -#endif // defined(_TARGET_ARMARCH_) +#endif // defined(TARGET_ARMARCH) } // Jump to the block which will throw the expection @@ -1983,7 +1983,7 @@ void CodeGen::genUpdateCurrentFunclet(BasicBlock* block) } } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) void CodeGen::genInsertNopForUnwinder(BasicBlock* block) { // If this block is the target of a finally return, we need to add a preceding NOP, in the same EH region, @@ -2111,9 +2111,9 @@ void CodeGen::genGenerateCode(void** codePtr, ULONG* nativeSizeOfCode) printf("unknown architecture"); } -#if defined(_TARGET_WINDOWS_) +#if defined(TARGET_WINDOWS) printf(" - Windows"); -#elif defined(_TARGET_UNIX_) +#elif defined(TARGET_UNIX) printf(" - Unix"); #endif @@ -2277,9 +2277,9 @@ void CodeGen::genGenerateCode(void** codePtr, ULONG* nativeSizeOfCode) bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ? -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) trackedStackPtrsContig = false; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous trackedStackPtrsContig = !compiler->opts.compDbgEnC && !compiler->compIsProfilerHookNeeded(); #else @@ -2968,7 +2968,7 @@ void CodeGen::genReportEH() // bool CodeGenInterface::genUseOptimizedWriteBarriers(GCInfo::WriteBarrierForm wbf) { -#if defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS +#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS #ifdef DEBUG return (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method. #else @@ -2998,7 +2998,7 @@ bool CodeGenInterface::genUseOptimizedWriteBarriers(GCInfo::WriteBarrierForm wbf // bool CodeGenInterface::genUseOptimizedWriteBarriers(GenTree* tgt, GenTree* assignVal) { -#if defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS +#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS #ifdef DEBUG GCInfo::WriteBarrierForm wbf = compiler->codeGen->gcInfo.gcIsWriteBarrierCandidate(tgt, assignVal); return (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method. @@ -3292,11 +3292,11 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere // Check if this is an HFA register arg and return the HFA type if (varDsc.lvIsHfaRegArg()) { -#if defined(_TARGET_WINDOWS_) +#if defined(TARGET_WINDOWS) // Cannot have hfa types on windows arm targets // in vararg methods. assert(!compiler->info.compIsVarArgs); -#endif // defined(_TARGET_WINDOWS_) +#endif // defined(TARGET_WINDOWS) return varDsc.GetHfaType(); } return compiler->mangleVarArgsType(varDsc.lvType); @@ -3365,12 +3365,12 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere // Change regType to the HFA type when we have a HFA argument if (varDsc->lvIsHfaRegArg()) { -#if defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) if (compiler->info.compIsVarArgs) { assert(!"Illegal incoming HFA arg encountered in Vararg method."); } -#endif // defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) regType = varDsc->GetHfaType(); } @@ -3529,7 +3529,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere #endif // FEATURE_MULTIREG_ARGS } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM int lclSize = compiler->lvaLclSize(varNum); if (lclSize > REGSIZE_BYTES) @@ -3548,7 +3548,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere regArgTab[regArgNum + i].varNum = varNum; regArgTab[regArgNum + i].slot = i + 1; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM for (int i = 0; i < slots; i++) { @@ -3574,29 +3574,29 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 noway_assert(varDsc->lvType == TYP_STRUCT); -#else // !_TARGET_X86_ +#else // !TARGET_X86 // For LSRA, it may not be in regArgMaskLive if it has a zero // refcnt. This is in contrast with the non-LSRA case in which all // non-tracked args are assumed live on entry. noway_assert((varDsc->lvRefCnt() == 0) || (varDsc->lvType == TYP_STRUCT) || (varDsc->lvAddrExposed && compiler->info.compIsVarArgs) || (varDsc->lvAddrExposed && compiler->opts.compUseSoftFP)); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } // Mark it as processed and be done with it regArgTab[regArgNum + i].processed = true; goto NON_DEP; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On the ARM when the varDsc is a struct arg (or pre-spilled due to varargs) the initReg/xtraReg // could be equal to GetArgReg(). The pre-spilled registers are also not considered live either since // they've already been spilled. // if ((regSet.rsMaskPreSpillRegs(false) & genRegMask(regNum)) == 0) -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { #if !defined(UNIX_AMD64_ABI) noway_assert(xtraReg != (varDsc->GetArgReg() + i)); @@ -3625,7 +3625,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere goto NON_DEP; } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if ((i == 1) && varTypeIsStruct(varDsc) && (varDsc->GetOtherReg() == regNum)) { goto NON_DEP; @@ -3640,7 +3640,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere { goto NON_DEP; } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) regArgTab[regArgNum + i].circular = true; } else @@ -3699,7 +3699,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere { destRegNum = varDsc->GetRegNum(); } -#if FEATURE_MULTIREG_ARGS && defined(FEATURE_SIMD) && defined(_TARGET_64BIT_) +#if FEATURE_MULTIREG_ARGS && defined(FEATURE_SIMD) && defined(TARGET_64BIT) else { assert(regArgTab[argNum].slot == 2); @@ -3712,7 +3712,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere change = true; continue; } -#elif !defined(_TARGET_64BIT_) +#elif !defined(TARGET_64BIT) else if (regArgTab[argNum].slot == 2 && genActualType(varDsc->TypeGet()) == TYP_LONG) { destRegNum = varDsc->GetOtherReg(); @@ -3723,7 +3723,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere assert(varDsc->TypeGet() == TYP_DOUBLE); destRegNum = REG_NEXT(varDsc->GetRegNum()); } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) noway_assert(destRegNum != REG_NA); if (genRegMask(destRegNum) & regArgMaskLive) { @@ -3801,7 +3801,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere noway_assert(varNum < compiler->lvaCount); varDsc = compiler->lvaTable + varNum; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT // If this arg is never on the stack, go to the next one. if (varDsc->lvType == TYP_LONG) { @@ -3815,7 +3815,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere } } else -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT { // If this arg is never on the stack, go to the next one. if (!regArgTab[argNum].stackArg) @@ -3824,7 +3824,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere } } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (varDsc->lvType == TYP_DOUBLE) { if (regArgTab[argNum].slot == 2) @@ -3857,14 +3857,14 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere #endif // !UNIX_AMD64_ABI if (varDsc->lvIsHfaRegArg()) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On ARM32 the storeType for HFA args is always TYP_FLOAT storeType = TYP_FLOAT; slotSize = (unsigned)emitActualTypeSize(storeType); -#else // _TARGET_ARM64_ +#else // TARGET_ARM64 storeType = genActualType(varDsc->GetHfaType()); slotSize = (unsigned)emitActualTypeSize(storeType); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } } else // Not a struct type @@ -3872,9 +3872,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere storeType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet())); } size = emitActualTypeSize(storeType); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 noway_assert(genTypeSize(storeType) == TARGET_POINTER_SIZE); -#endif //_TARGET_X86_ +#endif // TARGET_X86 regNumber srcRegNum = genMapRegArgNumToRegNum(argNum, storeType); @@ -3911,7 +3911,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere regArgTab[argNum].processed = true; regArgMaskLive &= ~genRegMask(srcRegNum); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (storeType == TYP_DOUBLE) { regArgTab[argNum + 1].processed = true; @@ -3964,7 +3964,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere // We pick the lowest avail register number regMaskTP tempMask = genFindLowestBit(fpAvailMask); xtraReg = genRegNumFromMask(tempMask); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) // This case shouldn't occur on x86 since NYI gets converted to an assert NYI("Homing circular FP registers via xtraReg"); #endif @@ -4006,7 +4006,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere emitAttr size = EA_PTRSIZE; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // // The following code relies upon the target architecture having an // 'xchg' instruction which directly swaps the values held in two registers. @@ -4053,11 +4053,11 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere #endif // USING_SCOPE_INFO } else -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH { var_types destMemType = varDscDest->TypeGet(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool cycleAllDouble = true; // assume the best unsigned iter = begReg; @@ -4082,7 +4082,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere { destMemType = TYP_FLOAT; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (destMemType == TYP_REF) { @@ -4131,7 +4131,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere /* mark 'src' as processed */ noway_assert(srcReg < argMax); regArgTab[srcReg].processed = true; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (size == EA_8BYTE) regArgTab[srcReg + 1].processed = true; #endif @@ -4143,7 +4143,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere varDscDest = varDscSrc; destMemType = varDscDest->TypeGet(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (!cycleAllDouble && destMemType == TYP_DOUBLE) { destMemType = TYP_FLOAT; @@ -4185,7 +4185,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere /* mark the beginning register as processed */ regArgTab[srcReg].processed = true; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (size == EA_8BYTE) regArgTab[srcReg + 1].processed = true; #endif @@ -4230,8 +4230,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere #endif // defined(UNIX_AMD64_ABI) noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg); -#ifndef _TARGET_64BIT_ -#ifndef _TARGET_ARM_ +#ifndef TARGET_64BIT +#ifndef TARGET_ARM // Right now we think that incoming arguments are not pointer sized. When we eventually // understand the calling convention, this still won't be true. But maybe we'll have a better // idea of how to ignore it. @@ -4239,7 +4239,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere // On Arm, a long can be passed in register noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == TARGET_POINTER_SIZE); #endif -#endif //_TARGET_64BIT_ +#endif // TARGET_64BIT noway_assert(varDsc->lvIsInReg() && !regArgTab[argNum].circular); @@ -4251,15 +4251,15 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere { destRegNum = varDsc->GetRegNum(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (genActualType(destMemType) == TYP_DOUBLE && regArgTab[argNum + 1].processed) { // The second half of the double has already been processed! Treat this as a single. destMemType = TYP_FLOAT; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT else if (regArgTab[argNum].slot == 2 && genActualType(destMemType) == TYP_LONG) { assert(genActualType(varDsc->TypeGet()) == TYP_LONG || genActualType(varDsc->TypeGet()) == TYP_DOUBLE); @@ -4314,8 +4314,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere destMemType = TYP_FLOAT; destRegNum = REG_NEXT(varDsc->GetRegNum()); } -#endif // !_TARGET_64BIT_ -#if (defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD) +#endif // !TARGET_64BIT +#if (defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64)) && defined(FEATURE_SIMD) else { assert(regArgTab[argNum].slot == 2); @@ -4326,7 +4326,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere noway_assert(regNum != destRegNum); continue; } -#endif // (defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)) && defined(FEATURE_SIMD) +#endif // (defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64)) && defined(FEATURE_SIMD) noway_assert(destRegNum != REG_NA); if (destRegNum != regNum) { @@ -4335,7 +4335,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere * which is guaranteed to happen since we have no circular dependencies. */ regMaskTP destMask = genRegMask(destRegNum); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Don't process the double until both halves of the destination are clear. if (genActualType(destMemType) == TYP_DOUBLE) { @@ -4353,7 +4353,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere emitAttr size = emitActualTypeSize(destMemType); -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) if (varTypeIsSIMD(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2) { // For a SIMD type that is passed in two integer registers, @@ -4377,7 +4377,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere regArgMaskLive &= ~genRegMask(regNum); #if FEATURE_MULTIREG_ARGS int argRegCount = 1; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (genActualType(destMemType) == TYP_DOUBLE) { argRegCount = 2; @@ -4398,7 +4398,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere destRegNum = regNum; } #endif // defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD) -#if defined(_TARGET_ARM64_) && defined(FEATURE_SIMD) +#if defined(TARGET_ARM64) && defined(FEATURE_SIMD) if (varTypeIsSIMD(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2) { // For a SIMD type that is passed in two integer registers, @@ -4414,7 +4414,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere noway_assert(genIsValidFloatReg(destRegNum)); GetEmitter()->emitIns_R_R_I(INS_mov, EA_8BYTE, destRegNum, nextRegNum, 1); } -#endif // defined(_TARGET_ARM64_) && defined(FEATURE_SIMD) +#endif // defined(TARGET_ARM64) && defined(FEATURE_SIMD) // Mark the rest of the argument registers corresponding to this multi-reg type as // being processed and no longer live. @@ -4466,7 +4466,7 @@ void CodeGen::genEnregisterIncomingStackArgs() and hence here we need to load it from its prespilled location. */ bool isPrespilledForProfiling = false; -#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED) +#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) isPrespilledForProfiling = compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(varNum, regSet.rsMaskPreSpillRegs(false)); #endif @@ -4720,7 +4720,7 @@ void CodeGen::genCheckUseBlockInit() // model this very well, but have left the logic as is for now. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT genUseBlockInit = (genInitStkLclCnt > (largeGcStructs + 8)); @@ -4728,7 +4728,7 @@ void CodeGen::genCheckUseBlockInit() genUseBlockInit = (genInitStkLclCnt > (largeGcStructs + 4)); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT if (genUseBlockInit) { @@ -4741,7 +4741,7 @@ void CodeGen::genCheckUseBlockInit() maskCalleeRegArgMask &= ~RBM_SECRET_STUB_PARAM; } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // If we're going to use "REP STOS", remember that we will trash EDI // For fastcall we will have to save ECX, EAX // so reserve two extra callee saved @@ -4773,8 +4773,8 @@ void CodeGen::genCheckUseBlockInit() regSet.rsSetRegsModified(RBM_EBX); } -#endif // _TARGET_XARCH_ -#ifdef _TARGET_ARM_ +#endif // TARGET_XARCH +#ifdef TARGET_ARM // // On the Arm if we are using a block init to initialize, then we // must force spill R4/R5/R6 so that we can use them during @@ -4787,7 +4787,7 @@ void CodeGen::genCheckUseBlockInit() regSet.rsSetRegsModified(RBM_R5); if (forceSpillRegCount > 2) regSet.rsSetRegsModified(RBM_R6); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } } @@ -4796,7 +4796,7 @@ void CodeGen::genCheckUseBlockInit() * Push any callee-saved registers we have used */ -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed) #else void CodeGen::genPushCalleeSavedRegisters() @@ -4804,12 +4804,12 @@ void CodeGen::genPushCalleeSavedRegisters() { assert(compiler->compGeneratingProlog); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // x86/x64 doesn't support push of xmm/ymm regs, therefore consider only integer registers for pushing onto stack // here. Space for float registers to be preserved is stack allocated and saved as part of prolog sequence and not // here. regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_INT_CALLEE_SAVED; -#else // !defined(_TARGET_XARCH_) +#else // !defined(TARGET_XARCH) regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; #endif @@ -4820,7 +4820,7 @@ void CodeGen::genPushCalleeSavedRegisters() } #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // On X86/X64 we have already pushed the FP (frame-pointer) prior to calling this method if (isFramePointerUsed()) { @@ -4828,7 +4828,7 @@ void CodeGen::genPushCalleeSavedRegisters() } #endif -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // On ARM we push the FP (frame-pointer) here along with all other callee saved registers if (isFramePointerUsed()) rsPushRegs |= RBM_FPBASE; @@ -4855,7 +4855,7 @@ void CodeGen::genPushCalleeSavedRegisters() rsPushRegs |= RBM_LR; // We must save the return address (in the LR register) regSet.rsMaskCalleeSaved = rsPushRegs; -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH #ifdef DEBUG if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs)) @@ -4868,7 +4868,7 @@ void CodeGen::genPushCalleeSavedRegisters() } #endif // DEBUG -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) regMaskTP maskPushRegsFloat = rsPushRegs & RBM_ALLFLOAT; regMaskTP maskPushRegsInt = rsPushRegs & ~maskPushRegsFloat; @@ -4883,7 +4883,7 @@ void CodeGen::genPushCalleeSavedRegisters() genPushFltRegs(maskPushRegsFloat); compiler->unwindPushMaskFloat(maskPushRegsFloat); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // See the document "ARM64 JIT Frame Layout" and/or "ARM64 Exception Data" for more details or requirements and // options. Case numbers in comments here refer to this document. See also Compiler::lvaAssignFrameOffsets() // for pictures of the general frame layouts, and CodeGen::genFuncletProlog() implementations (per architecture) @@ -5360,7 +5360,7 @@ void CodeGen::genPushCalleeSavedRegisters() assert(offset == totalFrameSize); -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) // Push backwards so we match the order we will pop them in the epilog // and all the other code that expects it to be in this order. for (regNumber reg = REG_INT_LAST; rsPushRegs != RBM_NONE; reg = REG_PREV(reg)) @@ -5386,7 +5386,7 @@ void CodeGen::genPushCalleeSavedRegisters() #endif // _TARGET_* } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) void CodeGen::genPushFltRegs(regMaskTP regMask) { @@ -5567,7 +5567,7 @@ regMaskTP CodeGen::genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskC } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM /***************************************************************************** * @@ -5603,7 +5603,7 @@ void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& } else { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Do we have a double register initialized to 0? if (dblInitReg != REG_NA) { @@ -5615,11 +5615,11 @@ void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& // Copy from int. inst_RV_RV(INS_vmov_i2f, reg, initReg, TYP_FLOAT, EA_4BYTE); } -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) // XORPS is the fastest and smallest way to initialize a XMM register to zero. inst_RV_RV(INS_xorps, reg, reg, TYP_DOUBLE); dblInitReg = reg; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // We will just zero out the entire vector register. This sets it to a double/float zero value GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B); #else // _TARGET_* @@ -5638,7 +5638,7 @@ void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& } else { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Do we have a float register initialized to 0? if (fltInitReg != REG_NA) { @@ -5650,11 +5650,11 @@ void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& // Copy from int. inst_RV_RV_RV(INS_vmov_i2d, reg, initReg, initReg, EA_8BYTE); } -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) // XORPS is the fastest and smallest way to initialize a XMM register to zero. inst_RV_RV(INS_xorps, reg, reg, TYP_DOUBLE); fltInitReg = reg; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // We will just zero out the entire vector register. This sets it to a double/float zero value GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B); #else // _TARGET_* @@ -5671,7 +5671,7 @@ void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& * Restore any callee-saved registers we have used */ -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) bool CodeGen::genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog) { @@ -5731,7 +5731,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) compiler->unwindPopMaskInt(maskPopRegsInt); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog) { @@ -5966,7 +5966,7 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog) } } -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) { @@ -6001,7 +6001,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) } #endif // !defined(UNIX_AMD64_ABI) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (regSet.rsRegsModified(RBM_R12)) { popCount++; @@ -6022,7 +6022,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) popCount++; inst_RV(INS_pop, REG_R15, TYP_I_IMPL); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // Amd64/x86 doesn't support push/pop of xmm registers. // These will get saved to stack separately after allocating @@ -6032,7 +6032,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) noway_assert(compiler->compCalleeRegsPushed == popCount); } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) { @@ -6077,16 +6077,16 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) // Return the register to use. On ARM64, we never touch the initReg, and always just return REG_ZR. regNumber CodeGen::genGetZeroReg(regNumber initReg, bool* pInitRegZeroed) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 return REG_ZR; -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 if (*pInitRegZeroed == false) { instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg); *pInitRegZeroed = true; } return initReg; -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 } /*----------------------------------------------------------------------------- @@ -6108,7 +6108,7 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, if (genUseBlockInit) { assert(untrLclHi > untrLclLo); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH /* Generate the following code: @@ -6150,7 +6150,7 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg as we will zero it and maybe use it for // a large constant. -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (compiler->compLocallocUsed) { @@ -6174,12 +6174,12 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, rAddr = genRegNumFromMask(regMask); availMask &= ~regMask; -#else // !define(_TARGET_ARM_) +#else // !define(TARGET_ARM) rAddr = initReg; *pInitRegZeroed = false; -#endif // !defined(_TARGET_ARM_) +#endif // !defined(TARGET_ARM) bool useLoop = false; unsigned uCntBytes = untrLclHi - untrLclLo; @@ -6205,11 +6205,11 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, // rAddr is not a live incoming argument reg assert((genRegMask(rAddr) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (arm_Valid_Imm_For_Add(untrLclLo, INS_FLAGS_DONT_CARE)) -#else // !_TARGET_ARM_ +#else // !TARGET_ARM if (emitter::emitIns_valid_imm_for_add(untrLclLo, EA_PTRSIZE)) -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM { GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, rAddr, genFramePointerReg(), untrLclLo); } @@ -6229,44 +6229,44 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, instGen_Set_Reg_To_Imm(EA_PTRSIZE, rCnt, (ssize_t)uCntSlots / 2); } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) rZero1 = genGetZeroReg(initReg, pInitRegZeroed); instGen_Set_Reg_To_Zero(EA_PTRSIZE, rZero2); target_ssize_t stmImm = (target_ssize_t)(genRegMask(rZero1) | genRegMask(rZero2)); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (!useLoop) { while (uCntBytes >= REGSIZE_BYTES * 2) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM GetEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm); -#else // !_TARGET_ARM_ +#else // !TARGET_ARM GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES, INS_OPTS_POST_INDEX); -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM uCntBytes -= REGSIZE_BYTES * 2; } } else // useLoop is true { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM GetEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm); // zero stack slots GetEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, rCnt, 1, INS_FLAGS_SET); -#else // !_TARGET_ARM_ +#else // !TARGET_ARM GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES, INS_OPTS_POST_INDEX); // zero stack slots GetEmitter()->emitIns_R_R_I(INS_subs, EA_PTRSIZE, rCnt, rCnt, 1); -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM GetEmitter()->emitIns_J(INS_bhi, NULL, -3); uCntBytes %= REGSIZE_BYTES * 2; } if (uCntBytes >= REGSIZE_BYTES) // check and zero the last register-sized stack slot (odd number) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, rZero1, rAddr, 0); -#else // _TARGET_ARM_ +#else // TARGET_ARM if ((uCntBytes - REGSIZE_BYTES) == 0) { GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, rAddr, 0); @@ -6275,20 +6275,20 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, { GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, rAddr, REGSIZE_BYTES, INS_OPTS_POST_INDEX); } -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM uCntBytes -= REGSIZE_BYTES; } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (uCntBytes > 0) { assert(uCntBytes == sizeof(int)); GetEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, REG_ZR, rAddr, 0); uCntBytes -= sizeof(int); } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 noway_assert(uCntBytes == 0); -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) /* Generate the following code: @@ -6418,14 +6418,14 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, zeroReg, varNum, i); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT assert(i == lclSize || (i + sizeof(int) == lclSize)); if (i != lclSize) { GetEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, zeroReg, varNum, i); i += sizeof(int); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT assert(i == lclSize); } } @@ -6484,7 +6484,7 @@ void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed regNumber reg; bool isPrespilledForProfiling = false; -#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED) +#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) isPrespilledForProfiling = compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(contextArg, regSet.rsMaskPreSpillRegs(false)); #endif @@ -6498,7 +6498,7 @@ void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed { if (isFramePointerUsed()) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // lvStkOffs is always valid for incoming stack-arguments, even if the argument // will become enregistered. // On Arm compiler->compArgSize doesn't include r11 and lr sizes and hence we need to add 2*REGSIZE_BYTES @@ -6521,10 +6521,10 @@ void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed regSet.verifyRegUsed(reg); } -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) genInstrWithConstant(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(), compiler->lvaCachedGenericContextArgOffset(), rsGetRsvdReg()); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // ARM's emitIns_R_R_I automatically uses the reserved register if necessary. GetEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(), compiler->lvaCachedGenericContextArgOffset()); @@ -6638,7 +6638,7 @@ void CodeGen::genPrologPadForReJit() { assert(compiler->compGeneratingProlog); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (!compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_REJIT_NOPS)) { return; @@ -6797,7 +6797,7 @@ void CodeGen::genFinalizeFrame() // Set various registers as "modified" for special code generation scenarios: Edit & Continue, P/Invoke calls, etc. CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (compiler->compTailCallUsed) { @@ -6808,9 +6808,9 @@ void CodeGen::genFinalizeFrame() regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Make sure that callee-saved registers used by call to a stack probing helper generated for very large stack // frames // (see `getVeryLargeFrameSize`) are pushed on stack. @@ -6825,7 +6825,7 @@ void CodeGen::genFinalizeFrame() { regSet.rsSetRegsModified(regSet.rsMaskResvd); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef DEBUG if (verbose) @@ -6841,15 +6841,15 @@ void CodeGen::genFinalizeFrame() { // We always save FP. noway_assert(isFramePointerUsed()); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // On x64 we always save exactly RBP, RSI and RDI for EnC. regMaskTP okRegs = (RBM_CALLEE_TRASH | RBM_FPBASE | RBM_RSI | RBM_RDI); regSet.rsSetRegsModified(RBM_RSI | RBM_RDI); noway_assert((regSet.rsGetModifiedRegsMask() & ~okRegs) == 0); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // On x86 we save all callee saved regs so the saved reg area size is consistent regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } /* If we have any pinvoke calls, we might potentially trash everything */ @@ -6879,7 +6879,7 @@ void CodeGen::genFinalizeFrame() regMaskTP maskCalleeRegsPushed = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH if (isFramePointerUsed()) { // For a FP based frame we have to push/pop the FP register @@ -6896,7 +6896,7 @@ void CodeGen::genFinalizeFrame() // maskCalleeRegsPushed |= RBM_LR; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // TODO-ARM64-Bug?: enable some variant of this for FP on ARM64? regMaskTP maskPushRegsFloat = maskCalleeRegsPushed & RBM_ALLFLOAT; regMaskTP maskPushRegsInt = maskCalleeRegsPushed & ~maskPushRegsFloat; @@ -6941,16 +6941,16 @@ void CodeGen::genFinalizeFrame() maskCalleeRegsPushed |= maskExtraRegs; } } -#endif // _TARGET_ARM_ -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARM +#endif // TARGET_ARMARCH -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Compute the count of callee saved float regs saved on stack. // On Amd64 we push only integer regs. Callee saved float (xmm6-xmm15) // regs are stack allocated and preserved in their stack locations. compiler->compCalleeFPRegsSavedMask = maskCalleeRegsPushed & RBM_FLT_CALLEE_SAVED; maskCalleeRegsPushed &= ~RBM_FLT_CALLEE_SAVED; -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) compiler->compCalleeRegsPushed = genCountBits(maskCalleeRegsPushed); @@ -6994,7 +6994,7 @@ void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData) { assert(compiler->compGeneratingProlog); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (delta == 0) { @@ -7015,7 +7015,7 @@ void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData) compiler->unwindSetFrameReg(REG_FPBASE, delta); } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) assert(arm_Valid_Imm_For_Add_SP(delta)); GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta); @@ -7025,7 +7025,7 @@ void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData) compiler->unwindPadding(); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (delta == 0) { @@ -7120,10 +7120,10 @@ void CodeGen::genFnProlog() instGen(INS_nop); instGen(INS_BREAKPOINT); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // Avoid asserts in the unwind info because these instructions aren't accounted for. compiler->unwindPadding(); -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH } #endif // DEBUG @@ -7279,11 +7279,11 @@ void CodeGen::genFnProlog() // previous frame pointer. Thus, stkOffs can't be zero. CLANG_FORMAT_COMMENT_ANCHOR; -#if !defined(_TARGET_AMD64_) +#if !defined(TARGET_AMD64) // However, on amd64 there is no requirement to chain frame pointers. noway_assert(!isFramePointerUsed() || loOffs != 0); -#endif // !defined(_TARGET_AMD64_) +#endif // !defined(TARGET_AMD64) // printf(" Untracked tmp at [EBP-%04X]\n", -stkOffs); @@ -7312,7 +7312,7 @@ void CodeGen::genFnProlog() } #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On the ARM we will spill any incoming struct args in the first instruction in the prolog // Ditto for all enregistered user arguments in a varargs method. // These registers will be available to use for the initReg. We just remove @@ -7351,16 +7351,16 @@ void CodeGen::genFnProlog() } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // If we have a variable sized frame (compLocallocUsed is true) // then using REG_SAVED_LOCALLOC_SP in the prolog is not allowed if (compiler->compLocallocUsed) { excludeMask |= RBM_SAVED_LOCALLOC_SP; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (compiler->compLclFrameSize >= compiler->getVeryLargeFrameSize()) { // We currently must use REG_EAX on x86 here @@ -7368,7 +7368,7 @@ void CodeGen::genFnProlog() assert(initReg == REG_EAX); } else -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH { tempMask = initRegs & ~excludeMask & ~regSet.rsMaskResvd; @@ -7395,7 +7395,7 @@ void CodeGen::genFnProlog() noway_assert(!compiler->compMethodRequiresPInvokeFrame() || (initReg != REG_PINVOKE_FRAME)); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // If we are a varargs call, in order to set up the arguments correctly this // must be done in a 2 step process. As per the x64 ABI: // a) The caller sets up the argument shadow space (just before the return @@ -7410,9 +7410,9 @@ void CodeGen::genFnProlog() GetEmitter()->spillIntArgRegsToShadowSlots(); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM /*------------------------------------------------------------------------- * * Now start emitting the part of the prolog which sets up the frame @@ -7423,19 +7423,19 @@ void CodeGen::genFnProlog() inst_IV(INS_push, (int)regSet.rsMaskPreSpillRegs(true)); compiler->unwindPushMaskInt(regSet.rsMaskPreSpillRegs(true)); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (doubleAlignOrFramePointerUsed()) { inst_RV(INS_push, REG_FPBASE, TYP_REF); compiler->unwindPush(REG_FPBASE); #ifdef USING_SCOPE_INFO psiAdjustStackLevel(REGSIZE_BYTES); -#endif // USING_SCOPE_INFO -#ifndef _TARGET_AMD64_ // On AMD64, establish the frame pointer after the "sub rsp" +#endif // USING_SCOPE_INFO +#ifndef TARGET_AMD64 // On AMD64, establish the frame pointer after the "sub rsp" genEstablishFramePointer(0, /*reportUnwindData*/ true); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 #if DOUBLE_ALIGN if (compiler->genDoubleAlign()) @@ -7447,9 +7447,9 @@ void CodeGen::genFnProlog() } #endif // DOUBLE_ALIGN } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // Probe large frames now, if necessary, since genPushCalleeSavedRegisters() will allocate the frame. Note that // for arm64, genAllocLclFrame only probes the frame; it does not actually allocate it (it does not change SP). // For arm64, we are probing the frame before the callee-saved registers are saved. The 'initReg' might have @@ -7461,11 +7461,11 @@ void CodeGen::genFnProlog() genAllocLclFrame(compiler->compLclFrameSize, REG_SCRATCH, &ignoreInitRegZeroed, intRegState.rsCalleeRegArgMaskLiveIn); genPushCalleeSavedRegisters(initReg, &initRegZeroed); -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 genPushCalleeSavedRegisters(); -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool needToEstablishFP = false; int afterLclFrameSPtoFPdelta = 0; if (doubleAlignOrFramePointerUsed()) @@ -7485,7 +7485,7 @@ void CodeGen::genFnProlog() needToEstablishFP = false; } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM //------------------------------------------------------------------------- // @@ -7494,44 +7494,44 @@ void CodeGen::genFnProlog() //------------------------------------------------------------------------- CLANG_FORMAT_COMMENT_ANCHOR; -#ifndef _TARGET_ARM64_ +#ifndef TARGET_ARM64 regMaskTP maskStackAlloc = RBM_NONE; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (maskStackAlloc == RBM_NONE) { genAllocLclFrame(compiler->compLclFrameSize, initReg, &initRegZeroed, intRegState.rsCalleeRegArgMaskLiveIn); } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 //------------------------------------------------------------------------- -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (compiler->compLocallocUsed) { GetEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, REG_SAVED_LOCALLOC_SP, REG_SPBASE); regSet.verifyRegUsed(REG_SAVED_LOCALLOC_SP); compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0); } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Preserve callee saved float regs to stack. genPreserveCalleeSavedFltRegs(compiler->compLclFrameSize); -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Establish the AMD64 frame pointer after the OS-reported prolog. if (doubleAlignOrFramePointerUsed()) { bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC; genEstablishFramePointer(compiler->codeGen->genSPtoFPdelta(), reportUnwindData); } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 //------------------------------------------------------------------------- // @@ -7539,13 +7539,13 @@ void CodeGen::genFnProlog() // //------------------------------------------------------------------------- -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (needToEstablishFP) { genEstablishFramePointer(afterLclFrameSPtoFPdelta, /*reportUnwindData*/ false); needToEstablishFP = false; // nobody uses this later, but set it anyway, just to be explicit } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (compiler->info.compPublishStubParam) { @@ -7727,7 +7727,7 @@ void CodeGen::genFnProlog() initRegZeroed = false; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // This is needed only for Arm since it can use a zero initialized int register // to initialize vfp registers. if (!initRegZeroed) @@ -7735,7 +7735,7 @@ void CodeGen::genFnProlog() instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg); initRegZeroed = true; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM genZeroInitFltRegs(initFltRegs, initDblRegs, initReg); } @@ -7774,7 +7774,7 @@ void CodeGen::genFnProlog() } #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On non-x86 the VARARG cookie does not need any special treatment. // Load up the VARARG argument pointer register so it doesn't get clobbered. @@ -7821,9 +7821,9 @@ void CodeGen::genFnProlog() } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) if (compiler->opts.compStackCheckOnRet) { noway_assert(compiler->lvaReturnSpCheck != 0xCCCCCCCC && @@ -7831,7 +7831,7 @@ void CodeGen::genFnProlog() compiler->lvaTable[compiler->lvaReturnSpCheck].lvOnFrame); GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnSpCheck, 0); } -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(DEBUG) && defined(TARGET_XARCH) GetEmitter()->emitEndProlog(); compiler->unwindEndProlog(); @@ -7849,7 +7849,7 @@ void CodeGen::genFnProlog() * Please consult the "debugger team notification" comment in genFnProlog(). */ -#if defined(_TARGET_ARMARCH_) +#if defined(TARGET_ARMARCH) void CodeGen::genFnEpilog(BasicBlock* block) { @@ -7898,7 +7898,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // We delay starting the unwind codes until we have an instruction which we know // needs an unwind code. In particular, for large stack frames in methods without // localloc, the sequence might look something like this: @@ -7984,11 +7984,11 @@ void CodeGen::genFnEpilog(BasicBlock* block) noway_assert(!genUsedPopToReturn); } -#else // _TARGET_ARM64_ +#else // TARGET_ARM64 compiler->unwindBegEpilog(); genPopCalleeSavedRegistersAndFreeLclFrame(jmpEpilog); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 if (jmpEpilog) { @@ -8020,7 +8020,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) assert(methHnd != nullptr); assert(addrInfo.addr != nullptr); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH emitter::EmitCallType callType; void* addr; regNumber indCallReg; @@ -8084,7 +8084,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) addr, 0, // argSize EA_UNKNOWN, // retSize -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) EA_UNKNOWN, // secondRetSize #endif gcInfo.gcVarPtrSetCur, @@ -8098,7 +8098,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) true); // isJump // clang-format on CLANG_FORMAT_COMMENT_ANCHOR; -#endif //_TARGET_ARMARCH_ +#endif // TARGET_ARMARCH } #if FEATURE_FASTTAILCALL else @@ -8148,7 +8148,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) } else { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (!genUsedPopToReturn) { // If we did not use a pop to return, then we did a "pop {..., lr}" instead of "pop {..., pc}", @@ -8156,16 +8156,16 @@ void CodeGen::genFnEpilog(BasicBlock* block) inst_RV(INS_bx, REG_LR, TYP_I_IMPL); compiler->unwindBranch16(); } -#else // _TARGET_ARM64_ +#else // TARGET_ARM64 inst_RV(INS_ret, REG_LR, TYP_I_IMPL); compiler->unwindReturn(REG_LR); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } compiler->unwindEndEpilog(); } -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) void CodeGen::genFnEpilog(BasicBlock* block) { @@ -8239,7 +8239,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) if (compiler->compLclFrameSize) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* Add 'compiler->compLclFrameSize' to ESP */ /* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */ @@ -8298,21 +8298,21 @@ void CodeGen::genFnEpilog(BasicBlock* block) { if (compiler->compLclFrameSize != 0) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // AMD64 can't use "mov esp, ebp", according to the ABI specification describing epilogs. So, // do an LEA to "pop off" the frame allocation. needLea = true; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // We will just generate "mov esp, ebp" and be done with it. needMovEspEbp = true; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } } else if (compiler->compLclFrameSize == 0) { // do nothing before popping the callee-saved registers } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else if (compiler->compLclFrameSize == REGSIZE_BYTES) { // "pop ecx" will make ESP point to the callee-saved registers @@ -8330,7 +8330,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { int offset; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // lea esp, [ebp + compiler->compLclFrameSize - genSPtoFPdelta] // // Case 1: localloc not used. @@ -8364,15 +8364,15 @@ void CodeGen::genFnEpilog(BasicBlock* block) genPopCalleeSavedRegisters(); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 assert(!needMovEspEbp); // "mov esp, ebp" is not allowed in AMD64 epilogs -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 if (needMovEspEbp) { // mov esp, ebp inst_RV_RV(INS_mov, REG_SPBASE, REG_FPBASE); } -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 // pop ebp inst_RV(INS_pop, REG_EBP, TYP_I_IMPL); @@ -8469,7 +8469,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) #if FEATURE_FASTTAILCALL else { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Fast tail call. GenTreeCall* call = jmpNode->AsCall(); gtCallTypes callType = (gtCallTypes)call->gtCallType; @@ -8513,7 +8513,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) #else assert(!"Fast tail call as epilog+jmp"); unreached(); -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 } #endif // FEATURE_FASTTAILCALL } @@ -8521,7 +8521,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { unsigned stkArgSize = 0; // Zero on all platforms except x86 -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) bool fCalleePop = true; // varargs has caller pop @@ -8540,7 +8540,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand } -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* Return, popping our arguments (if any) */ instGen_Return(stkArgSize); @@ -8553,7 +8553,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) #if defined(FEATURE_EH_FUNCLETS) -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM /***************************************************************************** * @@ -8881,7 +8881,7 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() } } -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) /***************************************************************************** * @@ -9153,11 +9153,11 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() #endif // DEBUG } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Look in CodeGenArm64.cpp -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) /***************************************************************************** * @@ -9359,7 +9359,7 @@ void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed) noway_assert(isFramePointerUsed()); // We need an explicit frame pointer -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // We either generate: // add r1, r11, 8 @@ -9400,7 +9400,7 @@ void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed) GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, regTmp, regBase, callerSPOffs); GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) int SPtoCallerSPdelta = -genCallerSPtoInitialSPdelta(); @@ -9412,7 +9412,7 @@ void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed) GetEmitter()->emitIns_R_R_Imm(INS_add, EA_PTRSIZE, regTmp, REG_SPBASE, SPtoCallerSPdelta); GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // The PSP sym value is Initial-SP, not Caller-SP! // We assume that RSP is Initial-SP when this function is called. That is, the stack frame @@ -9506,7 +9506,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Save compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working // down the stack to the largest register number stored at [RSP+offset-(genCountBits(regMask)-1)*XMM_REG_SIZE] // Here offset = 16-byte aligned offset after pushing integer registers. @@ -9529,17 +9529,17 @@ void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize) return; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0; unsigned offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES; // Offset is 16-byte aligned since we use movaps for preserving xmm regs. assert((offset % 16) == 0); instruction copyIns = ins_Copy(TYP_FLOAT); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES; instruction copyIns = INS_movupd; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg)) { @@ -9580,13 +9580,13 @@ void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize) return; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0; instruction copyIns = ins_Copy(TYP_FLOAT); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 unsigned firstFPRegPadding = 0; instruction copyIns = INS_movupd; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 unsigned offset; regNumber regBase; @@ -9603,10 +9603,10 @@ void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize) offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Offset is 16-byte aligned since we use movaps for restoring xmm regs assert((offset % 16) == 0); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg)) { @@ -9655,7 +9655,7 @@ void CodeGen::genVzeroupperIfNeeded(bool check256bitOnly /* = true*/) } } -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) //----------------------------------------------------------------------------------- // IsMultiRegReturnedType: Returns true if the type is returned in multiple registers @@ -9676,7 +9676,7 @@ bool Compiler::IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass) structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(hClass, &howToReturnStruct); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 return (varTypeIsStruct(returnType) && (howToReturnStruct != SPK_PrimitiveType)); #else return (varTypeIsStruct(returnType)); @@ -9722,7 +9722,7 @@ var_types Compiler::GetHfaType(CORINFO_CLASS_HANDLE hClass) { #ifdef FEATURE_HFA CorInfoType corType = info.compCompHnd->getHFAType(hClass); -#if defined(_TARGET_ARM64_) && defined(FEATURE_SIMD) +#if defined(TARGET_ARM64) && defined(FEATURE_SIMD) if (corType == CORINFO_TYPE_VALUECLASS) { // This is a vector type. @@ -9733,7 +9733,7 @@ var_types Compiler::GetHfaType(CORINFO_CLASS_HANDLE hClass) compFloatingPointUsed = true; } else -#endif // _TARGET_ARM64_ && FEATURE_SIMD +#endif // TARGET_ARM64 && FEATURE_SIMD if (corType != CORINFO_TYPE_UNDEF) { result = JITtype2varType(corType); @@ -9759,20 +9759,20 @@ var_types Compiler::GetHfaType(CORINFO_CLASS_HANDLE hClass) unsigned Compiler::GetHfaCount(CORINFO_CLASS_HANDLE hClass) { assert(IsHfa(hClass)); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // A HFA of doubles is twice as large as an HFA of singles for ARM32 // (i.e. uses twice the number of single precison registers) return info.compCompHnd->getClassSize(hClass) / REGSIZE_BYTES; -#else // _TARGET_ARM64_ +#else // TARGET_ARM64 var_types hfaType = GetHfaType(hClass); unsigned classSize = info.compCompHnd->getClassSize(hClass); // Note that the retail build issues a warning about a potential divsion by zero without the Max function unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType))); return classSize / elemSize; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH //------------------------------------------------------------------------ // genMapShiftInsToShiftByConstantIns: Given a general shift/rotate instruction, @@ -9824,7 +9824,7 @@ instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shi return shiftByConstantIns; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH //------------------------------------------------------------------------------------------------ // // getFirstArgWithStackSlot - returns the first argument with stack slot on the caller's frame. @@ -9842,7 +9842,7 @@ instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shi // unsigned CodeGen::getFirstArgWithStackSlot() { -#if defined(UNIX_AMD64_ABI) || defined(_TARGET_ARMARCH_) +#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARMARCH) unsigned baseVarNum = 0; // Iterate over all the lvParam variables in the Lcl var table until we find the first one // that's passed on the stack. @@ -9864,13 +9864,13 @@ unsigned CodeGen::getFirstArgWithStackSlot() assert(varDsc != nullptr); return baseVarNum; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) return 0; #else // _TARGET_X86 // Not implemented for x86. NYI_X86("getFirstArgWithStackSlot not yet implemented for x86."); return BAD_VAR_NUM; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } //------------------------------------------------------------------------ @@ -10259,7 +10259,7 @@ void CodeGen::genSetScopeInfo(unsigned which, unsigned ilVarNum = compiler->compMap2ILvarNum(varNum); noway_assert((int)ilVarNum != ICorDebugInfo::UNKNOWN_ILNUM); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Non-x86 platforms are allowed to access all arguments directly // so we don't need this code. @@ -10296,7 +10296,7 @@ void CodeGen::genSetScopeInfo(unsigned which, varLoc->vlFixedVarArg.vlfvOffset = offset; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 VarName name = nullptr; @@ -10981,7 +10981,7 @@ GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value) return i; } -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) //------------------------------------------------------------------------ // genLongReturn: Generates code for long return statement for x86 and arm. // @@ -11017,7 +11017,7 @@ void CodeGen::genLongReturn(GenTree* treeNode) inst_RV_RV(ins_Copy(targetType), REG_LNGRET_HI, hiRetVal->GetRegNum(), TYP_INT); } } -#endif // _TARGET_X86_ || _TARGET_ARM_ +#endif // TARGET_X86 || TARGET_ARM //------------------------------------------------------------------------ // genReturn: Generates code for return statement. @@ -11047,13 +11047,13 @@ void CodeGen::genReturn(GenTree* treeNode) } #endif // DEBUG -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) if (targetType == TYP_LONG) { genLongReturn(treeNode); } else -#endif // _TARGET_X86_ || _TARGET_ARM_ +#endif // TARGET_X86 || TARGET_ARM { if (isStructReturn(treeNode)) { @@ -11073,16 +11073,16 @@ void CodeGen::genReturn(GenTree* treeNode) // exit point where it is actually dead. genConsumeReg(op1); -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) genSimpleReturn(treeNode); -#else // !_TARGET_ARM64_ -#if defined(_TARGET_X86_) +#else // !TARGET_ARM64 +#if defined(TARGET_X86) if (varTypeIsFloating(treeNode)) { genFloatReturn(treeNode); } else -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if (varTypeIsFloating(treeNode) && (compiler->opts.compUseSoftFP || compiler->info.compIsVarArgs)) { if (targetType == TYP_FLOAT) @@ -11097,7 +11097,7 @@ void CodeGen::genReturn(GenTree* treeNode) } } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { regNumber retReg = varTypeIsFloating(treeNode) ? REG_FLOATRET : REG_INTRET; if (op1->GetRegNum() != retReg) @@ -11105,7 +11105,7 @@ void CodeGen::genReturn(GenTree* treeNode) inst_RV_RV(ins_Move_Extend(targetType, true), retReg, op1->GetRegNum(), targetType); } } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 } } @@ -11188,7 +11188,7 @@ void CodeGen::genReturn(GenTree* treeNode) } #endif // PROFILING_SUPPORTED -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) bool doStackPointerCheck = compiler->opts.compStackCheckOnRet; #if defined(FEATURE_EH_FUNCLETS) @@ -11207,10 +11207,10 @@ void CodeGen::genReturn(GenTree* treeNode) #endif // !FEATURE_EH_FUNCLETS genStackPointerCheck(doStackPointerCheck, compiler->lvaReturnSpCheck); -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(DEBUG) && defined(TARGET_XARCH) } -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) //------------------------------------------------------------------------ // genStackPointerCheck: Generate code to check the stack pointer against a saved value. @@ -11239,7 +11239,7 @@ void CodeGen::genStackPointerCheck(bool doStackPointerCheck, unsigned lvaStackPo } } -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(DEBUG) && defined(TARGET_XARCH) unsigned CodeGenInterface::getCurrentStackLevel() const { diff --git a/src/coreclr/src/jit/codegeninterface.h b/src/coreclr/src/jit/codegeninterface.h index cc00fc8e5da6b..f9cb28e446e39 100644 --- a/src/coreclr/src/jit/codegeninterface.h +++ b/src/coreclr/src/jit/codegeninterface.h @@ -108,9 +108,9 @@ class CodeGenInterface bool m_genAlignLoops; private: -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) static const insFlags instInfo[INS_count]; -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) static const BYTE instInfo[INS_count]; #else #error Unsupported target architecture @@ -188,15 +188,15 @@ class CodeGenInterface int genSPtoFPdelta() const; int genTotalFrameSize() const; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 virtual void SetSaveFpLrWithAllCalleeSavedRegisters(bool value) = 0; virtual bool IsSaveFpLrWithAllCalleeSavedRegisters() const = 0; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 regNumber genGetThisArgReg(GenTreeCall* call) const; -#ifdef _TARGET_XARCH_ -#ifdef _TARGET_AMD64_ +#ifdef TARGET_XARCH +#ifdef TARGET_AMD64 // There are no reloc hints on x86 unsigned short genAddrRelocTypeHint(size_t addr); #endif @@ -357,7 +357,7 @@ class CodeGenInterface m_cgInterruptible = value; } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH bool GetHasTailCalls() { @@ -367,13 +367,13 @@ class CodeGenInterface { m_cgHasTailCalls = value; } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH private: bool m_cgInterruptible; -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH bool m_cgHasTailCalls; -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH // The following will be set to true if we've determined that we need to // generate a full-blown pointer register map for the current method. diff --git a/src/coreclr/src/jit/codegenlinear.cpp b/src/coreclr/src/jit/codegenlinear.cpp index 656c048068f41..45b2e9afefa23 100644 --- a/src/coreclr/src/jit/codegenlinear.cpp +++ b/src/coreclr/src/jit/codegenlinear.cpp @@ -144,7 +144,7 @@ void CodeGen::genCodeForBBlist() compiler->fgSafeBasicBlockCreation = false; #endif // DEBUG -#if defined(DEBUG) && defined(_TARGET_X86_) +#if defined(DEBUG) && defined(TARGET_X86) // Check stack pointer on call stress mode is not compatible with fully interruptible GC. REVIEW: why? // @@ -153,9 +153,9 @@ void CodeGen::genCodeForBBlist() compiler->opts.compStackCheckOnCall = false; } -#endif // defined(DEBUG) && defined(_TARGET_X86_) +#endif // defined(DEBUG) && defined(TARGET_X86) -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) // Check stack pointer on return stress mode is not compatible with fully interruptible GC. REVIEW: why? // It is also not compatible with any function that makes a tailcall: we aren't smart enough to only @@ -166,7 +166,7 @@ void CodeGen::genCodeForBBlist() compiler->opts.compStackCheckOnRet = false; } -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(DEBUG) && defined(TARGET_XARCH) // Prepare the blocks for exception handling codegen: mark the blocks that needs labels. genPrepForEHCodegen(); @@ -301,7 +301,7 @@ void CodeGen::genCodeForBBlist() } } -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) genInsertNopForUnwinder(block); #endif @@ -309,7 +309,7 @@ void CodeGen::genCodeForBBlist() genUpdateCurrentFunclet(block); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (ShouldAlignLoops() && block->bbFlags & BBF_LOOP_HEAD) { GetEmitter()->emitLoopAlign(); @@ -513,11 +513,11 @@ void CodeGen::genCodeForBBlist() // (it's as good as any, but better than the prologue, which can only be a single instruction // group) then use COMPlus_JitLateDisasm=* to see if the late disassembler // thinks the instructions are the same as we do. -#if defined(_TARGET_AMD64_) && defined(LATE_DISASM) +#if defined(TARGET_AMD64) && defined(LATE_DISASM) genAmd64EmitterUnitTests(); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) genArm64EmitterUnitTests(); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } #endif // defined(DEBUG) @@ -603,7 +603,7 @@ void CodeGen::genCodeForBBlist() /* Both stacks should always be empty on exit from a basic block */ noway_assert(genStackLevel == 0); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // On AMD64, we need to generate a NOP after a call that is the last instruction of the block, in several // situations, to support proper exception handling semantics. This is mostly to ensure that when the stack // walker computes an instruction pointer for a frame, that instruction pointer is in the correct EH region. @@ -661,7 +661,7 @@ void CodeGen::genCodeForBBlist() } } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 /* Do we need to generate a jump or return? */ @@ -950,7 +950,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()]; // TODO-Cleanup: The following code could probably be further merged and cleaned up. -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // Load local variable from its home location. // In most cases the tree type will indicate the correct type to use for the load. // However, if it is NOT a normalizeOnLoad lclVar (i.e. NOT a small int that always gets @@ -977,7 +977,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) { inst_RV_TT(ins_Load(treeType, compiler->isSIMDTypeLocalAligned(lcl->GetLclNum())), dstReg, unspillTree); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) var_types targetType = unspillTree->gtType; if (targetType != genActualType(varDsc->lvType) && !varTypeIsGC(targetType) && !varDsc->lvNormalizeOnLoad()) { @@ -990,7 +990,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) // Load local variable from its home location. inst_RV_TT(ins, dstReg, unspillTree, 0, attr); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) var_types targetType = unspillTree->gtType; instruction ins = ins_Load(targetType, compiler->isSIMDTypeLocalAligned(lcl->GetLclNum())); emitAttr attr = emitTypeSize(targetType); @@ -1126,7 +1126,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) unspillTree->gtFlags &= ~GTF_SPILLED; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (unspillTree->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = unspillTree->AsMultiRegOp(); @@ -1153,7 +1153,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) unspillTree->gtFlags &= ~GTF_SPILLED; } -#endif //_TARGET_ARM_ +#endif // TARGET_ARM #endif // FEATURE_ARG_SPLIT else { @@ -1356,14 +1356,14 @@ void CodeGen::genConsumeAddrMode(GenTreeAddrMode* addr) void CodeGen::genConsumeRegs(GenTree* tree) { -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (tree->OperGet() == GT_LONG) { genConsumeRegs(tree->gtGetOp1()); genConsumeRegs(tree->gtGetOp2()); return; } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) if (tree->isUsedFromSpillTemp()) { @@ -1379,7 +1379,7 @@ void CodeGen::genConsumeRegs(GenTree* tree) { genConsumeAddress(tree); } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH else if (tree->OperIsLocalRead()) { // A contained lcl var must be living on stack and marked as reg optional, or not be a @@ -1412,7 +1412,7 @@ void CodeGen::genConsumeRegs(GenTree* tree) } } #endif // FEATURE_HW_INTRINSICS -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH else { #ifdef FEATURE_SIMD @@ -1556,10 +1556,10 @@ void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode, // Otherwise load the op1 (GT_ADDR) into the dstReg to copy the struct on the stack by value. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(dstReg != REG_SPBASE); inst_RV_RV(INS_mov, dstReg, REG_SPBASE); -#else // !_TARGET_X86_ +#else // !TARGET_X86 GenTree* dstAddr = putArgNode; if (dstAddr->GetRegNum() != dstReg) { @@ -1569,7 +1569,7 @@ void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode, assert(m_stkArgVarNum != BAD_VAR_NUM); GetEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, dstReg, m_stkArgVarNum, putArgNode->getArgOffset()); } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 if (srcAddr->GetRegNum() != srcReg) { @@ -1639,7 +1639,7 @@ void CodeGen::genConsumeArgSplitStruct(GenTreePutArgSplit* putArgNode) // The x86 version of this is in codegenxarch.cpp, and doesn't take an // outArgVarNum, as it pushes its args onto the stack. // -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArgVarNum) { assert(putArgStk->gtOp1->OperIs(GT_FIELD_LIST)); @@ -1676,7 +1676,7 @@ void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArg #endif } } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 //------------------------------------------------------------------------ // genSetBlockSize: Ensure that the block size is in the given register @@ -1896,7 +1896,7 @@ void CodeGen::genProduceReg(GenTree* tree) } } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (tree->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = tree->AsMultiRegOp(); @@ -1913,7 +1913,7 @@ void CodeGen::genProduceReg(GenTree* tree) } } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #endif // FEATURE_ARG_SPLIT else { @@ -2027,9 +2027,9 @@ void CodeGen::genEmitCall(int callType, regNumber base, bool isJump) { -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) int argSize = 0; -#endif // !defined(_TARGET_X86_) +#endif // !defined(TARGET_X86) GetEmitter()->emitIns_Call(emitter::EmitCallType(callType), methHnd, INDEBUG_LDISASM_COMMA(sigInfo) @@ -2058,9 +2058,9 @@ void CodeGen::genEmitCall(int callType, MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), IL_OFFSETX ilOffset) { -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) int argSize = 0; -#endif // !defined(_TARGET_X86_) +#endif // !defined(TARGET_X86) genConsumeAddress(indir->Addr()); GetEmitter()->emitIns_Call(emitter::EmitCallType(callType), @@ -2108,12 +2108,12 @@ void CodeGen::genCodeForCast(GenTreeOp* tree) // Casts int32/uint32/int64/uint64 --> float/double genIntToFloatCast(tree); } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT else if (varTypeIsLong(tree->gtOp1)) { genLongToIntCast(tree); } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT else { // Casts int <--> int @@ -2163,7 +2163,7 @@ CodeGen::GenIntCastDesc::GenIntCastDesc(GenTreeCast* cast) m_extendSrcSize = castSize; } } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // castType cannot be (U)LONG on 32 bit targets, such casts should have been decomposed. // srcType cannot be a small int type since it's the "actual type" of the cast operand. // This means that widening casts do not occur on 32 bit targets. @@ -2240,7 +2240,7 @@ CodeGen::GenIntCastDesc::GenIntCastDesc(GenTreeCast* cast) } } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) //------------------------------------------------------------------------ // genStoreLongLclVar: Generate code to store a non-enregistered long lclVar // @@ -2292,7 +2292,7 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode) genTypeSize(TYP_INT)); } } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) //------------------------------------------------------------------------ // genCodeForJumpTrue: Generate code for a GT_JTRUE node. @@ -2313,7 +2313,7 @@ void CodeGen::genCodeForJumpTrue(GenTreeOp* jtrue) condition = GenCondition::Swap(condition); } -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if ((condition.GetCode() == GenCondition::FNEU) && (relop->gtGetOp1()->GetRegNum() == relop->gtGetOp2()->GetRegNum())) { diff --git a/src/coreclr/src/jit/codegenxarch.cpp b/src/coreclr/src/jit/codegenxarch.cpp index 2d7173ac2bf43..977a9e274fc23 100644 --- a/src/coreclr/src/jit/codegenxarch.cpp +++ b/src/coreclr/src/jit/codegenxarch.cpp @@ -16,7 +16,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma warning(disable : 4310) // cast truncates constant value - happens for (int8_t)0xb1 #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH #include "emit.h" #include "codegen.h" #include "lower.h" @@ -72,7 +72,7 @@ void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed) if (compiler->gsGlobalSecurityCookieAddr == nullptr) { noway_assert(compiler->gsGlobalSecurityCookieVal != 0); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if ((int)compiler->gsGlobalSecurityCookieVal != compiler->gsGlobalSecurityCookieVal) { // initReg = #GlobalSecurityCookieVal64; [frame.GSSecurityCookie] = initReg @@ -161,13 +161,13 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) // ... all other cases. else { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // For x64, structs that are not returned in registers are always // returned in implicit RetBuf. If we reached here, we should not have // a RetBuf and the return type should not be a struct. assert(compiler->info.compRetBuffArg == BAD_VAR_NUM); assert(!varTypeIsStruct(compiler->info.compRetNativeType)); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // For x86 Windows we can't make such assertions since we generate code for returning of // the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise @@ -198,13 +198,13 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // It doesn't matter which register we pick, since we're going to save and restore it // around the check. // TODO-CQ: Can we optimize the choice of register to avoid doing the push/pop sometimes? regGSCheck = REG_EAX; regMaskGSCheck = RBM_EAX; -#else // !_TARGET_X86_ +#else // !TARGET_X86 // Tail calls from methods that need GS check: We need to preserve registers while // emitting GS cookie check for a tail prefixed call or a jmp. To emit GS cookie // check, we might need a register. This won't be an issue for jmp calls for the @@ -235,7 +235,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) // address and hence it can neither be a VSD call nor PInvoke calli with cookie // parameter. Therefore, in case of jmp calls it is safe to use R11. regGSCheck = REG_R11; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } regMaskTP byrefPushedRegs = RBM_NONE; @@ -244,7 +244,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) if (compiler->gsGlobalSecurityCookieAddr == nullptr) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'. // Otherwise, load the value into a reg and use 'cmp mem64, reg64'. if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal) @@ -253,7 +253,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) GetEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0); } else -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) { assert((int)compiler->gsGlobalSecurityCookieVal == (ssize_t)compiler->gsGlobalSecurityCookieVal); GetEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0, @@ -682,7 +682,7 @@ void CodeGen::genCodeForMulHi(GenTreeOp* treeNode) genProduceReg(treeNode); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //------------------------------------------------------------------------ // genCodeForLongUMod: Generate code for a tree of the form // `(umod (gt_long x y) (const int))` @@ -765,7 +765,7 @@ void CodeGen::genCodeForLongUMod(GenTreeOp* node) } genProduceReg(node); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 //------------------------------------------------------------------------ // genCodeForDivMod: Generate code for a DIV or MOD operation. @@ -779,13 +779,13 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode) GenTree* dividend = treeNode->gtOp1; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsLong(dividend->TypeGet())) { genCodeForLongUMod(treeNode); return; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 GenTree* divisor = treeNode->gtOp2; genTreeOps oper = treeNode->OperGet(); @@ -873,7 +873,7 @@ void CodeGen::genCodeForBinary(GenTreeOp* treeNode) else { isValidOper |= treeNode->OperIs(GT_AND, GT_OR, GT_XOR); -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT isValidOper |= treeNode->OperIs(GT_ADD_LO, GT_ADD_HI, GT_SUB_LO, GT_SUB_HI); #endif } @@ -995,7 +995,7 @@ void CodeGen::genCodeForBinary(GenTreeOp* treeNode) if (treeNode->gtOverflowEx()) { -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI); #else assert(oper == GT_ADD || oper == GT_SUB); @@ -1352,7 +1352,7 @@ void CodeGen::genStructReturn(GenTree* treeNode) #endif } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) //------------------------------------------------------------------------ // genFloatReturn: Generates code for float return statement for x86. @@ -1399,7 +1399,7 @@ void CodeGen::genFloatReturn(GenTree* treeNode) regSet.tmpRlsTemp(t); } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 //------------------------------------------------------------------------ // genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE/GT_CMP node. @@ -1576,7 +1576,7 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) void CodeGen::genCodeForTreeNode(GenTree* treeNode) { regNumber targetReg; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (treeNode->TypeGet() == TYP_LONG) { // All long enregistered nodes will have been decomposed into their @@ -1584,7 +1584,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) targetReg = REG_NA; } else -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) { targetReg = treeNode->GetRegNum(); } @@ -1653,9 +1653,9 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) break; case GT_CNS_INT: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(!treeNode->IsIconHandle(GTF_ICON_TLS_HDL)); -#endif // _TARGET_X86_ +#endif // TARGET_X86 __fallthrough; case GT_CNS_DBL: @@ -1693,12 +1693,12 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) __fallthrough; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) case GT_ADD: case GT_SUB: @@ -1722,14 +1722,14 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genCodeForShift(treeNode); break; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_LSH_HI: case GT_RSH_LO: genCodeForShiftLong(treeNode); break; -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) case GT_CAST: genCodeForCast(treeNode->AsOp()); @@ -1810,7 +1810,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) break; case GT_MULHI: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case GT_MUL_LONG: #endif genCodeForMulHi(treeNode->AsOp()); @@ -2028,7 +2028,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genProduceReg(treeNode); break; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_LONG: assert(treeNode->isUsedFromReg()); genConsumeRegs(treeNode); @@ -2181,7 +2181,7 @@ void CodeGen::genMultiRegCallStoreToLocal(GenTree* treeNode) varDsc->SetRegNum(REG_STK); } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) // Longs are returned in two return registers on x86. assert(varTypeIsLong(treeNode)); @@ -2226,9 +2226,9 @@ void CodeGen::genMultiRegCallStoreToLocal(GenTree* treeNode) } varDsc->SetRegNum(REG_STK); -#else // !UNIX_AMD64_ABI && !_TARGET_X86_ +#else // !UNIX_AMD64_ABI && !TARGET_X86 assert(!"Unreached"); -#endif // !UNIX_AMD64_ABI && !_TARGET_X86_ +#endif // !UNIX_AMD64_ABI && !TARGET_X86 } //------------------------------------------------------------------------ @@ -2290,7 +2290,7 @@ void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pIni // Frame size >= 0x3000 assert(frameSize >= compiler->getVeryLargeFrameSize()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 int spOffset = -(int)frameSize; if (compiler->info.compPublishStubParam) @@ -2315,7 +2315,7 @@ void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pIni { GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG); } -#else // !_TARGET_X86_ +#else // !TARGET_X86 static_assert_no_msg((RBM_STACK_PROBE_HELPER_ARG & (RBM_SECRET_STUB_PARAM | RBM_DEFAULT_HELPER_CALL_TARGET)) == RBM_NONE); @@ -2334,7 +2334,7 @@ void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pIni static_assert_no_msg((RBM_STACK_PROBE_HELPER_TRASH & RBM_STACK_PROBE_HELPER_ARG) == RBM_NONE); GetEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, REG_STACK_PROBE_HELPER_ARG); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 if (initReg == REG_STACK_PROBE_HELPER_ARG) { @@ -2382,7 +2382,7 @@ void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTm // function that does a probe, which will in turn call this function. assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (regTmp != REG_NA) { // For x86, some cases don't want to use "sub ESP" because we don't want the emitter to track the adjustment @@ -2395,7 +2395,7 @@ void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTm inst_RV_RV(INS_mov, REG_SPBASE, regTmp, TYP_I_IMPL); } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { inst_RV_IV(INS_sub, REG_SPBASE, -spDelta, EA_PTRSIZE); } @@ -2705,11 +2705,11 @@ void CodeGen::genLclHeap(GenTree* tree) bool initMemOrLargeAlloc = compiler->info.compInitMem || (amount >= compiler->eeGetPageSize()); // must be >= not > -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 bool needRegCntRegister = true; -#else // !_TARGET_X86_ +#else // !TARGET_X86 bool needRegCntRegister = initMemOrLargeAlloc; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 if (needRegCntRegister) { @@ -2865,7 +2865,7 @@ void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode) switch (storeBlkNode->gtBlkOpKind) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case GenTreeBlk::BlkOpKindHelper: assert(!storeBlkNode->gtBlkOpGcUnsafe); if (isCopyBlk) @@ -2877,7 +2877,7 @@ void CodeGen::genCodeForStoreBlk(GenTreeBlk* storeBlkNode) genCodeForInitBlkHelper(storeBlkNode); } break; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 case GenTreeBlk::BlkOpKindRepInstr: #ifndef JIT32_GCENCODER assert(!storeBlkNode->gtBlkOpGcUnsafe); @@ -3025,7 +3025,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node) { emit->emitIns_R_R(INS_mov_i2xmm, EA_PTRSIZE, srcXmmReg, srcIntReg); emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // For x86, we need one more to convert it from 8 bytes to 16 bytes. emit->emitIns_R_R(INS_punpckldq, EA_16BYTE, srcXmmReg, srcXmmReg); #endif @@ -3069,7 +3069,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node) } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 //------------------------------------------------------------------------ // genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call // @@ -3088,7 +3088,7 @@ void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode) genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #ifdef FEATURE_PUT_STRUCT_ARG_STK // Generate code for a load from some address + offset @@ -3331,11 +3331,11 @@ void CodeGen::genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode) // unsigned CodeGen::genMove8IfNeeded(unsigned size, regNumber longTmpReg, GenTree* srcAddr, unsigned offset) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 instruction longMovIns = INS_movq; -#else // !_TARGET_X86_ +#else // !TARGET_X86 instruction longMovIns = INS_mov; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 if ((size & 8) != 0) { genCodeForLoadOffset(longMovIns, EA_8BYTE, longTmpReg, srcAddr, offset); @@ -3474,7 +3474,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode) regNumber xmmTmpReg = REG_NA; regNumber intTmpReg = REG_NA; regNumber longTmpReg = REG_NA; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On x86 we use an XMM register for both 16 and 8-byte chunks, but if it's // less than 16 bytes, we will just be using pushes if (size >= 8) @@ -3486,7 +3486,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode) { intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT); } -#else // !_TARGET_X86_ +#else // !TARGET_X86 // On x64 we use an XMM register only for 16-byte chunks. if (size >= XMM_REGSIZE_BYTES) { @@ -3497,16 +3497,16 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode) intTmpReg = putArgNode->GetSingleTempReg(RBM_ALLINT); longTmpReg = intTmpReg; } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 // If the size of this struct is larger than 16 bytes // let's use SSE2 to be able to do 16 byte at a time // loads and stores. if (size >= XMM_REGSIZE_BYTES) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(!m_pushStkArg); -#endif // _TARGET_X86_ +#endif // TARGET_X86 size_t slots = size / XMM_REGSIZE_BYTES; assert(putArgNode->gtGetOp1()->isContained()); @@ -3530,7 +3530,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode) // Fill the remainder (15 bytes or less) if there's one. if ((size & 0xf) != 0) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (m_pushStkArg) { // This case is currently supported only for the case where the total size is @@ -3546,7 +3546,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode) pushedBytes += genMove8IfNeeded(size, longTmpReg, src->AsOp()->gtOp1, 0); } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { offset += genMove8IfNeeded(size, longTmpReg, src->AsOp()->gtOp1, offset); offset += genMove4IfNeeded(size, intTmpReg, src->AsOp()->gtOp1, offset); @@ -3783,7 +3783,7 @@ void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) gcInfo.gcMarkRegSetNpt(RBM_RDI); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 //---------------------------------------------------------------------------------- // genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call // @@ -3802,7 +3802,7 @@ void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode) genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // generate code do a switch statement based on a table of ip-relative offsets void CodeGen::genTableBasedSwitch(GenTree* treeNode) @@ -4307,7 +4307,7 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type) case GT_XOR: ins = INS_xor; break; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_ADD_LO: ins = INS_add; break; @@ -4326,7 +4326,7 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type) case GT_RSH_LO: ins = INS_shrd; break; -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) default: unreached(); break; @@ -4397,7 +4397,7 @@ void CodeGen::genCodeForShift(GenTree* tree) genProduceReg(tree); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //------------------------------------------------------------------------ // genCodeForShiftLong: Generates the code sequence for a GenTree node that // represents a three operand bit shift or rotate operation (<>Lo). @@ -4595,14 +4595,14 @@ void CodeGen::genCodeForLclVar(GenTreeLclVar* tree) if (!isRegCandidate && !(tree->gtFlags & GTF_SPILLED)) { -#if defined(FEATURE_SIMD) && defined(_TARGET_X86_) +#if defined(FEATURE_SIMD) && defined(TARGET_X86) // Loading of TYP_SIMD12 (i.e. Vector3) variable if (tree->TypeGet() == TYP_SIMD12) { genLoadLclTypeSIMD12(tree); return; } -#endif // defined(FEATURE_SIMD) && defined(_TARGET_X86_) +#endif // defined(FEATURE_SIMD) && defined(TARGET_X86) GetEmitter()->emitIns_R_S(ins_Load(tree->TypeGet(), compiler->isSIMDTypeLocalAligned(tree->GetLclNum())), emitTypeSize(tree), tree->GetRegNum(), tree->GetLclNum(), 0); @@ -4674,13 +4674,13 @@ void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* tree) // Ensure that lclVar nodes are typed correctly. assert(!varDsc->lvNormalizeOnStore() || (targetType == genActualType(varDsc->TypeGet()))); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (targetType == TYP_LONG) { genStoreLongLclVar(tree); return; } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) #ifdef FEATURE_SIMD // storing of TYP_SIMD12 (i.e. Vector3) field @@ -4775,14 +4775,14 @@ void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node) assert(varTypeIsIntegral(index->TypeGet())); regNumber tmpReg = REG_NA; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT tmpReg = node->GetSingleTempReg(); #endif // Generate the bounds check if necessary. if ((node->gtFlags & GTF_INX_RNGCHK) != 0) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case that the index // is a native int on a 64-bit platform, we will need to widen the array length and then compare. if (index->TypeGet() == TYP_I_IMPL) @@ -4791,7 +4791,7 @@ void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node) GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, indexReg, tmpReg); } else -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { GetEmitter()->emitIns_R_AR(INS_cmp, EA_4BYTE, indexReg, baseReg, static_cast(node->gtLenOffset)); } @@ -4799,14 +4799,14 @@ void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node) genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL, node->gtIndRngFailBB); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (index->TypeGet() != TYP_I_IMPL) { // LEA needs 64-bit operands so we need to widen the index if it's TYP_INT. GetEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, tmpReg, indexReg); indexReg = tmpReg; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT // Compute the address of the array element. unsigned scale = node->gtElemSize; @@ -4821,13 +4821,13 @@ void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node) break; default: -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // IMUL treats its immediate operand as signed so scale can't be larger than INT32_MAX. // The VM doesn't allow such large array elements but let's be sure. noway_assert(scale <= INT32_MAX); -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT tmpReg = node->GetSingleTempReg(); -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT GetEmitter()->emitIns_R_I(emitter::inst3opImulForReg(tmpReg), EA_PTRSIZE, indexReg, static_cast(scale)); @@ -5234,7 +5234,7 @@ bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarri { assert(writeBarrierForm != GCInfo::WBF_NoBarrier); -#if defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS +#if defined(TARGET_X86) && NOGC_WRITE_BARRIERS if (!genUseOptimizedWriteBarriers(writeBarrierForm)) { return false; @@ -5307,9 +5307,9 @@ bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarri EA_PTRSIZE); // retSize return true; -#else // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS +#else // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS return false; -#endif // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS +#endif // !defined(TARGET_X86) || !NOGC_WRITE_BARRIERS } // Produce code for a GT_CALL node @@ -5401,7 +5401,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) #endif // FEATURE_VARARG } -#if defined(_TARGET_X86_) || defined(UNIX_AMD64_ABI) +#if defined(TARGET_X86) || defined(UNIX_AMD64_ABI) // The call will pop its arguments. // for each putarg_stk: ssize_t stackArgBytes = 0; @@ -5422,19 +5422,19 @@ void CodeGen::genCallInstruction(GenTreeCall* call) { GenTreeObj* obj = source->AsObj(); unsigned argBytes = roundUp(obj->GetLayout()->GetSize(), TARGET_POINTER_SIZE); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // If we have an OBJ, we must have created a copy if the original arg was not a // local and was not a multiple of TARGET_POINTER_SIZE. // Note that on x64/ux this will be handled by unrolling in genStructPutArgUnroll. assert((argBytes == obj->GetLayout()->GetSize()) || obj->Addr()->IsLocalAddrExpr()); -#endif // _TARGET_X86_ +#endif // TARGET_X86 assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes); } #endif // FEATURE_PUT_STRUCT_ARG_STK #endif // DEBUG } } -#endif // defined(_TARGET_X86_) || defined(UNIX_AMD64_ABI) +#endif // defined(TARGET_X86) || defined(UNIX_AMD64_ABI) // Insert a null check on "this" pointer if asked. if (call->NeedsNullCheck()) @@ -5520,7 +5520,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) } } -#if defined(DEBUG) && defined(_TARGET_X86_) +#if defined(DEBUG) && defined(TARGET_X86) // Store the stack pointer so we can check it after the call. if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC) { @@ -5529,7 +5529,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) compiler->lvaTable[compiler->lvaCallSpCheck].lvOnFrame); GetEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaCallSpCheck, 0); } -#endif // defined(DEBUG) && defined(_TARGET_X86_) +#endif // defined(DEBUG) && defined(TARGET_X86) bool fPossibleSyncHelperCall = false; CorInfoHelpFunc helperNum = CORINFO_HELP_UNDEF; @@ -5543,7 +5543,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) (void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) bool fCallerPop = call->CallerPop(); #ifdef UNIX_X86_ABI @@ -5569,7 +5569,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) { argSizeForEmitter = -stackArgBytes; } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) // When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here // if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE @@ -5592,7 +5592,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) if (target != nullptr) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT)) { // On x86, we need to generate a very specific pattern for indirect VSD calls: @@ -5759,7 +5759,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) var_types returnType = call->TypeGet(); if (returnType != TYP_VOID) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsFloating(returnType)) { // Spill the value from the fp stack. @@ -5770,7 +5770,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) call->gtFlags &= ~GTF_SPILL; } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { regNumber returnReg; @@ -5810,7 +5810,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) { // The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with @@ -5819,7 +5819,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) returnReg = REG_PINVOKE_TCB; } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (varTypeIsFloating(returnType)) { returnReg = REG_FLOATRET; @@ -5846,7 +5846,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) gcInfo.gcMarkRegSetNpt(RBM_INTRET); } -#if defined(DEBUG) && defined(_TARGET_X86_) +#if defined(DEBUG) && defined(TARGET_X86) if (compiler->opts.compStackCheckOnCall && call->gtCallType == CT_USER_FUNC) { noway_assert(compiler->lvaCallSpCheck != 0xCCCCCCCC && @@ -5871,7 +5871,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) instGen(INS_BREAKPOINT); genDefineTempLabel(sp_check); } -#endif // defined(DEBUG) && defined(_TARGET_X86_) +#endif // defined(DEBUG) && defined(TARGET_X86) #if !defined(FEATURE_EH_FUNCLETS) //------------------------------------------------------------------------- @@ -5905,7 +5905,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) unsigned stackAdjustBias = 0; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Is the caller supposed to pop the arguments? if (fCallerPop && (stackArgBytes != 0)) { @@ -5913,7 +5913,7 @@ void CodeGen::genCallInstruction(GenTreeCall* call) } SubtractStackLevel(stackArgBytes); -#endif // _TARGET_X86_ +#endif // TARGET_X86 genRemoveAlignmentAfterCall(call, stackAdjustBias); } @@ -6123,7 +6123,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) } } -#if FEATURE_VARARG && defined(_TARGET_AMD64_) +#if FEATURE_VARARG && defined(TARGET_AMD64) // In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg // register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to // be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point @@ -6156,7 +6156,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) #endif // FEATURE_VARARG } -#if FEATURE_VARARG && defined(_TARGET_AMD64_) +#if FEATURE_VARARG && defined(TARGET_AMD64) // Jmp call to a vararg method - if the method has fewer than 4 fixed arguments, // load the remaining arg registers (both int and float) from the corresponding // shadow stack slots. This is for the reason that we don't know the number and type @@ -6322,7 +6322,7 @@ void CodeGen::genCompareInt(GenTree* treeNode) // contained so it doesn't handle other kind of operands. It could do more but on x86 that results // in additional register constrains and that may be worse than wasting 3 bytes on an immediate. if ( -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 (!op1->isUsedFromReg() || isByteReg(op1->GetRegNum())) && #endif (op2->IsCnsIntOrI() && genSmallTypeCanRepresentValue(TYP_UBYTE, op2->AsIntCon()->IconValue()))) @@ -6392,7 +6392,7 @@ void CodeGen::genCompareInt(GenTree* treeNode) } } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) //------------------------------------------------------------------------ // genLongToIntCast: Generate code for long to int casts on x86. // @@ -6496,7 +6496,7 @@ void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& d genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW); break; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case GenIntCastDesc::CHECK_UINT_RANGE: { // We need to check if the value is not greater than 0xFFFFFFFF but this value @@ -6589,7 +6589,7 @@ void CodeGen::genIntToIntCast(GenTreeCast* cast) ins = INS_movsx; insSize = desc.ExtendSrcSize(); break; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case GenIntCastDesc::ZERO_EXTEND_INT: // We can skip emitting this zero extending move if the previous instruction zero extended implicitly if ((srcReg == dstReg) && compiler->opts.OptimizationEnabled()) @@ -6713,10 +6713,10 @@ void CodeGen::genIntToFloatCast(GenTree* treeNode) var_types srcType = op1->TypeGet(); assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType)); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) // We expect morph to replace long to float/double casts with helper calls noway_assert(!varTypeIsLong(srcType)); -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) // Since xarch emitter doesn't handle reporting gc-info correctly while casting away gc-ness we // ensure srcType of a cast is non gc-type. Codegen should never see BYREF as source type except @@ -6904,7 +6904,7 @@ void CodeGen::genCkfinite(GenTree* treeNode) genConsumeReg(op1); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Copy the floating-point value to an integer register. If we copied a float to a long, then // right-shift the value so the high 32 bits of the floating-point value sit in the low 32 @@ -6930,7 +6930,7 @@ void CodeGen::genCkfinite(GenTree* treeNode) inst_RV_RV(ins_Copy(targetType), targetReg, op1->GetRegNum(), targetType); } -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT // If the target type is TYP_DOUBLE, we want to extract the high 32 bits into the register. // There is no easy way to do this. To not require an extra register, we'll use shuffles @@ -6954,7 +6954,7 @@ void CodeGen::genCkfinite(GenTree* treeNode) // je // shufps targetReg, targetReg, 0xB1 // ZWXY => WZYX // - // For TYP_FLOAT, it's the same as _TARGET_64BIT_: + // For TYP_FLOAT, it's the same as TARGET_64BIT: // mov_xmm2i tmpReg, targetReg // tmpReg <= low 32 bits // and tmpReg, // cmp tmpReg, @@ -7001,12 +7001,12 @@ void CodeGen::genCkfinite(GenTree* treeNode) inst_RV_RV_IV(INS_shufps, EA_16BYTE, targetReg, targetReg, (int8_t)0xb1); } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT genProduceReg(treeNode); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 int CodeGenInterface::genSPtoFPdelta() const { int delta; @@ -7113,7 +7113,7 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta() const assert(callerSPtoSPdelta <= 0); return callerSPtoSPdelta; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 //----------------------------------------------------------------------------------------- // genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask" @@ -7565,7 +7565,7 @@ void CodeGen::genAlignStackBeforeCall(GenTreeCall* call) // void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #if defined(UNIX_X86_ABI) // Put back the stack pointer if there was any padding for stack alignment unsigned padStkAlign = call->fgArgInfo->GetStkAlign(); @@ -7583,12 +7583,12 @@ void CodeGen::genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias) genAdjustSP(bias); } #endif // !UNIX_X86_ABI_ -#else // _TARGET_X86_ +#else // TARGET_X86 assert(bias == 0); #endif // !_TARGET_X86 } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //--------------------------------------------------------------------- // genAdjustStackForPutArgStk: @@ -7885,7 +7885,7 @@ void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk) AddStackLevel(currentOffset); } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 //--------------------------------------------------------------------- // genPutArgStk - generate code for passing an arg on the stack. @@ -7902,7 +7902,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk) GenTree* data = putArgStk->gtOp1; var_types targetType = genActualType(data->TypeGet()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 genAlignStackBeforeCall(putArgStk); @@ -7942,7 +7942,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk) genConsumeReg(data); genPushReg(targetType, data->GetRegNum()); } -#else // !_TARGET_X86_ +#else // !TARGET_X86 { unsigned baseVarNum = getBaseVarForPutArgStk(putArgStk); @@ -7989,7 +7989,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk) argOffset); } } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } //--------------------------------------------------------------------- @@ -8024,7 +8024,7 @@ void CodeGen::genPutArgReg(GenTreeOp* tree) genProduceReg(tree); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // genPushReg: Push a register value onto the stack and adjust the stack level // // Arguments: @@ -8064,7 +8064,7 @@ void CodeGen::genPushReg(var_types type, regNumber srcReg) } AddStackLevel(size); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if defined(FEATURE_PUT_STRUCT_ARG_STK) // genStoreRegToStackArg: Store a register value into the stack argument area @@ -8112,14 +8112,14 @@ void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset } else #endif // FEATURE_SIMD -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (type == TYP_LONG) { assert(genIsValidFloatReg(srcReg)); ins = INS_movq; } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { assert((varTypeIsFloating(type) && genIsValidFloatReg(srcReg)) || (varTypeIsIntegralOrI(type) && genIsValidIntReg(srcReg))); @@ -8129,7 +8129,7 @@ void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset size = genTypeSize(type); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (m_pushStkArg) { genPushReg(type, srcReg); @@ -8138,10 +8138,10 @@ void CodeGen::genStoreRegToStackArg(var_types type, regNumber srcReg, int offset { GetEmitter()->emitIns_AR_R(ins, attr, srcReg, REG_SPBASE, offset); } -#else // !_TARGET_X86_ +#else // !TARGET_X86 assert(m_stkArgVarNum != BAD_VAR_NUM); GetEmitter()->emitIns_S_R(ins, attr, srcReg, m_stkArgVarNum, m_stkArgOffset + offset); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } //--------------------------------------------------------------------- @@ -8162,13 +8162,13 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) GenTree* source = putArgStk->gtGetOp1(); var_types targetType = source->TypeGet(); -#if defined(_TARGET_X86_) && defined(FEATURE_SIMD) +#if defined(TARGET_X86) && defined(FEATURE_SIMD) if (putArgStk->isSIMD12()) { genPutArgStkSIMD12(putArgStk); return; } -#endif // defined(_TARGET_X86_) && defined(FEATURE_SIMD) +#endif // defined(TARGET_X86) && defined(FEATURE_SIMD) if (varTypeIsSIMD(targetType)) { @@ -8204,7 +8204,7 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) // No need to disable GC the way COPYOBJ does. Here the refs are copied in atomic operations always. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On x86, any struct that has contains GC references must be stored to the stack using `push` instructions so // that the emitter properly detects the need to update the method's GC information. // @@ -8250,7 +8250,7 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) } AddStackLevel(TARGET_POINTER_SIZE); } -#else // !defined(_TARGET_X86_) +#else // !defined(TARGET_X86) // Consume these registers. // They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing"). @@ -8323,7 +8323,7 @@ void CodeGen::genPutStructArgStk(GenTreePutArgStk* putArgStk) } assert(numGCSlotsCopied == layout->GetGCPtrCount()); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } } #endif // defined(FEATURE_PUT_STRUCT_ARG_STK) @@ -8588,7 +8588,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, } else { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero, // load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to // make the call. @@ -8648,7 +8648,7 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, // After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time. //#define ALL_XARCH_EMITTER_UNIT_TESTS -#if defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_) +#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64) void CodeGen::genAmd64EmitterUnitTests() { if (!verbose) @@ -8748,11 +8748,11 @@ void CodeGen::genAmd64EmitterUnitTests() printf("*************** End of genAmd64EmitterUnitTests()\n"); } -#endif // defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_) +#endif // defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64) #ifdef PROFILING_SUPPORTED -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //----------------------------------------------------------------------------------- // genProfilingEnterCallback: Generate the profiling function enter callback. @@ -8916,9 +8916,9 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper) SetStackLevel(saveStackLvl2); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 //----------------------------------------------------------------------------------- // genProfilingEnterCallback: Generate the profiling function enter callback. @@ -9259,8 +9259,8 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper) #endif // !defined(UNIX_AMD64_ABI) } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif // PROFILING_SUPPORTED -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH diff --git a/src/coreclr/src/jit/compiler.cpp b/src/coreclr/src/jit/compiler.cpp index fbfde069e289d..35d0c0dbefac8 100644 --- a/src/coreclr/src/jit/compiler.cpp +++ b/src/coreclr/src/jit/compiler.cpp @@ -65,7 +65,7 @@ MethodSet* Compiler::s_pJitMethodSet = nullptr; */ #ifdef FEATURE_JIT_METHOD_PERF -#if defined(_HOST_X86_) || defined(_HOST_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) #if defined(_MSC_VER) @@ -93,7 +93,7 @@ inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) #endif -#elif defined(_HOST_ARM_) || defined(_HOST_ARM64_) +#elif defined(HOST_ARM) || defined(HOST_ARM64) // If this doesn't work please see ../gc/gc.cpp for additional ARM // info (and possible solutions). @@ -548,19 +548,19 @@ var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS // Start by determining if we have an HFA/HVA with a single element. #ifdef FEATURE_HFA -#if defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) // Arm64 Windows VarArg methods arguments will not classify HFA types, they will need to be treated // as if they are not HFA types. if (!isVarArg) -#endif // defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) { switch (structSize) { case 4: case 8: -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case 16: -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 { var_types hfaType; #ifdef ARM_SOFTFP @@ -605,28 +605,28 @@ var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS useType = TYP_SHORT; break; -#if !defined(_TARGET_XARCH_) || defined(UNIX_AMD64_ABI) +#if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 3: useType = TYP_INT; break; -#endif // !_TARGET_XARCH_ || UNIX_AMD64_ABI +#endif // !TARGET_XARCH || UNIX_AMD64_ABI -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case 4: // We dealt with the one-float HFA above. All other 4-byte structs are handled as INT. useType = TYP_INT; break; -#if !defined(_TARGET_XARCH_) || defined(UNIX_AMD64_ABI) +#if !defined(TARGET_XARCH) || defined(UNIX_AMD64_ABI) case 5: case 6: case 7: useType = TYP_I_IMPL; break; -#endif // !_TARGET_XARCH_ || UNIX_AMD64_ABI -#endif // _TARGET_64BIT_ +#endif // !TARGET_XARCH || UNIX_AMD64_ABI +#endif // TARGET_64BIT case TARGET_POINTER_SIZE: { @@ -694,7 +694,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, // Determine if we can pass the struct as a primitive type. // Note that on x86 we never pass structs as primitive types (unless the VM unwraps them for us). -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 #ifdef UNIX_AMD64_ABI // An 8-byte struct may need to be passed in a floating point register @@ -729,7 +729,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 // Did we change this struct type into a simple "primitive" type? // @@ -749,13 +749,13 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, // Arm64 Windows VarArg methods arguments will not classify HFA/HVA types, they will need to be treated // as if they are not HFA/HVA types. var_types hfaType; -#if defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) if (isVarArg) { hfaType = TYP_UNDEF; } else -#endif // defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) { hfaType = GetHfaType(clsHnd); } @@ -792,7 +792,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, useType = TYP_UNKNOWN; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); @@ -815,7 +815,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, useType = TYP_UNKNOWN; } -#elif defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#elif defined(TARGET_X86) || defined(TARGET_ARM) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI @@ -835,14 +835,14 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, // and can't be passed in multiple registers CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI) +#if defined(TARGET_X86) || defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) // Otherwise we pass this struct by value on the stack // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI howToPassStruct = SPK_ByValue; useType = TYP_STRUCT; -#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) // Otherwise we pass this struct by reference to a copy // setup wbPassType and useType indicate that this is passed using one register (by reference to a copy) @@ -985,7 +985,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, } } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled // // getPrimitiveTypeForStruct will return TYP_UNKNOWN for a struct that is an HFA of two floats @@ -1050,7 +1050,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, assert(structDesc.passedInRegisters == false); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct assert(structSize > TARGET_POINTER_SIZE); @@ -1072,7 +1072,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, useType = TYP_UNKNOWN; } -#elif defined(_TARGET_ARM_) || defined(_TARGET_X86_) +#elif defined(TARGET_ARM) || defined(TARGET_X86) // Otherwise we return this struct using a return buffer // setup wbPassType and useType indicate that this is returned using a return buffer register @@ -2041,7 +2041,7 @@ VarName Compiler::compVarName(regNumber reg, bool isFloatReg) const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM isFloatReg = genIsValidFloatReg(reg); #endif @@ -2085,7 +2085,7 @@ const char* Compiler::compRegNameForSize(regNumber reg, size_t size) { "cl", "cx" }, { "dl", "dx" }, { "bl", "bx" }, -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 { "spl", "sp" }, // ESP { "bpl", "bp" }, // EBP { "sil", "si" }, // ESI @@ -2098,7 +2098,7 @@ const char* Compiler::compRegNameForSize(regNumber reg, size_t size) { "r13b", "r13w" }, { "r14b", "r14w" }, { "r15b", "r15w" }, -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 }; // clang-format on @@ -2158,13 +2158,13 @@ void Compiler::compSetProcessor() const JitFlags& jitFlags = *opts.jitFlags; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) info.genCPU = CPU_ARM; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) info.genCPU = CPU_ARM64; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) info.genCPU = CPU_X64; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4)) info.genCPU = CPU_X86_PENTIUM_4; else @@ -2176,19 +2176,19 @@ void Compiler::compSetProcessor() // CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.compUseCMOV = true; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV); #ifdef DEBUG if (opts.compUseCMOV) opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50); #endif // DEBUG -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Instruction set flags for Intel hardware intrinsics -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH opts.compSupportsISA = 0; #ifdef FEATURE_CORECLR @@ -2202,16 +2202,16 @@ void Compiler::compSetProcessor() if (JitConfig.EnableSSE()) { opts.setSupportedISA(InstructionSet_SSE); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.setSupportedISA(InstructionSet_SSE_X64); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (JitConfig.EnableSSE2()) { opts.setSupportedISA(InstructionSet_SSE2); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.setSupportedISA(InstructionSet_SSE2_X64); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AES) && JitConfig.EnableAES()) { @@ -2236,23 +2236,23 @@ void Compiler::compSetProcessor() if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE41) && JitConfig.EnableSSE41()) { opts.setSupportedISA(InstructionSet_SSE41); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.setSupportedISA(InstructionSet_SSE41_X64); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE42) && JitConfig.EnableSSE42()) { opts.setSupportedISA(InstructionSet_SSE42); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.setSupportedISA(InstructionSet_SSE42_X64); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_POPCNT) && JitConfig.EnablePOPCNT()) { opts.setSupportedISA(InstructionSet_POPCNT); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.setSupportedISA(InstructionSet_POPCNT_X64); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX) && JitConfig.EnableAVX()) @@ -2279,9 +2279,9 @@ void Compiler::compSetProcessor() if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_LZCNT) && JitConfig.EnableLZCNT()) { opts.setSupportedISA(InstructionSet_LZCNT); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.setSupportedISA(InstructionSet_LZCNT_X64); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } // We currently need to also check that AVX is supported as that controls the support for the VEX encoding @@ -2289,9 +2289,9 @@ void Compiler::compSetProcessor() if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI1) && JitConfig.EnableBMI1() && compSupports(InstructionSet_AVX)) { opts.setSupportedISA(InstructionSet_BMI1); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.setSupportedISA(InstructionSet_BMI1_X64); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } // We currently need to also check that AVX is supported as that controls the support for the VEX encoding @@ -2299,9 +2299,9 @@ void Compiler::compSetProcessor() if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI2) && JitConfig.EnableBMI2() && compSupports(InstructionSet_AVX)) { opts.setSupportedISA(InstructionSet_BMI2); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 opts.setSupportedISA(InstructionSet_BMI2_X64); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } #else // !FEATURE_CORECLR if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT)) @@ -2340,9 +2340,9 @@ void Compiler::compSetProcessor() codeGen->GetEmitter()->SetContains256bitAVX(false); } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) if (JitConfig.EnableHWIntrinsic()) { // Dummy ISAs for simplifying the JIT code @@ -2463,17 +2463,17 @@ void Compiler::compSetProcessor() #ifdef PROFILING_SUPPORTED // A Dummy routine to receive Enter/Leave/Tailcall profiler callbacks. // These are used when complus_JitEltHookEnabled=1 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP) { return; } -#else //! _TARGET_AMD64_ +#else //! TARGET_AMD64 void DummyProfilerELTStub(UINT_PTR ProfilerHandle) { return; } -#endif //!_TARGET_AMD64_ +#endif //! TARGET_AMD64 #endif // PROFILING_SUPPORTED @@ -2810,10 +2810,10 @@ void Compiler::compInitOptions(JitFlags* jitFlags) opts.compJitELTHookEnabled = false; #endif // PROFILING_SUPPORTED -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // 0 is default: use the appropriate frame type based on the function. opts.compJitSaveFpLrWithCalleeSavedRegisters = 0; -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) #ifdef DEBUG opts.dspInstrs = false; @@ -3047,7 +3047,7 @@ void Compiler::compInitOptions(JitFlags* jitFlags) opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5); #endif -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) enum { STACK_CHECK_ON_RETURN = 0x1, @@ -3061,10 +3061,10 @@ void Compiler::compInitOptions(JitFlags* jitFlags) dwJitStackChecks = STACK_CHECK_ALL; } opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0; -#endif // defined(_TARGET_X86_) -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(TARGET_X86) +#endif // defined(DEBUG) && defined(TARGET_XARCH) #if MEASURE_MEM_ALLOC s_dspMemStats = (JitConfig.DisplayMemStats() != 0); @@ -3138,7 +3138,7 @@ void Compiler::compInitOptions(JitFlags* jitFlags) opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC); #ifdef DEBUG -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Whether encoding of absolute addr as PC-rel offset is enabled opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0); #endif @@ -3146,10 +3146,10 @@ void Compiler::compInitOptions(JitFlags* jitFlags) opts.compProcedureSplitting = jitFlags->IsSet(JitFlags::JIT_FLAG_PROCSPLIT); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // TODO-ARM64-NYI: enable hot/cold splitting opts.compProcedureSplitting = false; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #ifdef DEBUG opts.compProcedureSplittingEH = opts.compProcedureSplitting; @@ -3309,12 +3309,12 @@ void Compiler::compInitOptions(JitFlags* jitFlags) #endif // UNIX_AMD64_ABI #endif -#if defined(DEBUG) && defined(_TARGET_ARM64_) +#if defined(DEBUG) && defined(TARGET_ARM64) if ((s_pJitMethodSet == nullptr) || s_pJitMethodSet->IsActiveMethod(info.compFullName, info.compMethodHash())) { opts.compJitSaveFpLrWithCalleeSavedRegisters = JitConfig.JitSaveFpLrWithCalleeSavedRegisters(); } -#endif // defined(DEBUG) && defined(_TARGET_ARM64_) +#endif // defined(DEBUG) && defined(TARGET_ARM64) } #ifdef DEBUG @@ -3854,7 +3854,7 @@ void Compiler::compSetOptimizationLevel() codeGen->setFrameRequired(true); } -#if !defined(_TARGET_AMD64_) +#if !defined(TARGET_AMD64) // The VM sets JitFlags::JIT_FLAG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or // (2) the function is marked "noinline". The reason for #2 is that people mark functions // noinline to ensure the show up on in a stack walk. But for AMD64, we don't need a frame @@ -3884,7 +3884,7 @@ void Compiler::compSetOptimizationLevel() fgCanRelocateEHRegions = true; } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // Function compRsvdRegCheck: // given a curState to use for calculating the total frame size // it will return true if the REG_OPT_RSVD should be reserved so @@ -3923,13 +3923,13 @@ bool Compiler::compRsvdRegCheck(FrameLayoutState curState) noway_assert(frameSize >= calleeSavedRegMaxSz); -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // TODO-ARM64-CQ: update this! JITDUMP(" Returning true (ARM64)\n\n"); return true; // just always assume we'll need it, for now -#else // _TARGET_ARM_ +#else // TARGET_ARM // frame layout: // @@ -4050,9 +4050,9 @@ bool Compiler::compRsvdRegCheck(FrameLayoutState curState) // JITDUMP(" Returning false\n\n"); return false; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH //------------------------------------------------------------------------ // compGetTieringName: get a string describing tiered compilation settings @@ -4415,21 +4415,21 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags } #endif // DEBUG -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) if (opts.compStackCheckOnRet) { lvaReturnSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnSpCheck")); lvaTable[lvaReturnSpCheck].lvType = TYP_I_IMPL; } -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(DEBUG) && defined(TARGET_XARCH) -#if defined(DEBUG) && defined(_TARGET_X86_) +#if defined(DEBUG) && defined(TARGET_X86) if (opts.compStackCheckOnCall) { lvaCallSpCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallSpCheck")); lvaTable[lvaCallSpCheck].lvType = TYP_I_IMPL; } -#endif // defined(DEBUG) && defined(_TARGET_X86_) +#endif // defined(DEBUG) && defined(TARGET_X86) // Filter out unimported BBs fgRemoveEmptyBlocks(); @@ -4545,14 +4545,14 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags DBEXEC(VERBOSE, fgDispBasicBlocks(true)); #endif -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (fgNeedToAddFinallyTargetBits) { // We previously wiped out the BBF_FINALLY_TARGET bits due to some morphing; add them back. fgAddFinallyTargetFlags(); fgNeedToAddFinallyTargetBits = false; } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Decide the kind of code we want to generate fgSetOptions(); @@ -4798,7 +4798,7 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Check if we need to add the Quirk for the PPP backward compat issue compQuirkForPPPflag = compQuirkForPPP(); #endif @@ -4845,13 +4845,13 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags // call and register argument info, flowgraph and loop info, etc. compJitStats(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (compLocallocUsed) { // We reserve REG_SAVED_LOCALLOC_SP to store SP on entry for stack unwinding codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM // Assign registers to variables, etc. @@ -4991,7 +4991,7 @@ void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo) { } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Check if we need to add the Quirk for the PPP backward compat issue. // This Quirk addresses a compatibility issue between the new RyuJit and the previous JIT64. // A backward compatibity issue called 'PPP' exists where a PInvoke call passes a 32-byte struct @@ -5072,7 +5072,7 @@ bool Compiler::compQuirkForPPP() } return false; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 /*****************************************************************************/ @@ -5169,9 +5169,9 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd, compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a")); if (compJitFuncInfoFile == nullptr) { -#if defined(DEBUG) && !defined(FEATURE_PAL) // no 'perror' in the PAL +#if defined(DEBUG) && !defined(TARGET_UNIX) // no 'perror' in the PAL perror("Failed to open JitFuncInfoLogFile"); -#endif // defined(DEBUG) && !defined(FEATURE_PAL) +#endif // defined(DEBUG) && !defined(TARGET_UNIX) } } } @@ -5194,7 +5194,7 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd, // Match OS for compMatchedVM CORINFO_EE_INFO* eeInfo = eeGetEEInfo(); -#ifdef _TARGET_UNIX_ +#ifdef TARGET_UNIX info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); #else info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); @@ -5211,13 +5211,13 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd, if (!info.compMatchedVM) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Currently nothing needs to be done. There are no ARM flags that conflict with other flags. -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // The x86/x64 architecture capabilities flags overlap with the ARM64 ones. Set a reasonable architecture // target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this @@ -5245,7 +5245,7 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd, compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SM4); compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SVE); -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) } compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject; @@ -5675,7 +5675,7 @@ void Compiler::compCompileFinish() { if (compJitHaltMethod()) { -#if !defined(_HOST_UNIX_) +#if !defined(HOST_UNIX) // TODO-UNIX: re-enable this when we have an OS that supports a pop-up dialog // Don't do an assert, but just put up the dialog box so we get just-in-time debugger @@ -8769,7 +8769,7 @@ void cTreeFlags(Compiler* comp, GenTree* tree) break; case GT_MUL: -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h index 103b8360eb4be..098a170705e2d 100644 --- a/src/coreclr/src/jit/compiler.h +++ b/src/coreclr/src/jit/compiler.h @@ -442,9 +442,9 @@ class LclVarDsc unsigned char lvIsTemp : 1; // Short-lifetime compiler temp -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #if OPT_BOOL_OPS unsigned char lvIsBoolean : 1; // set if variable is boolean @@ -458,10 +458,10 @@ class LclVarDsc unsigned char lvVolatileHint : 1; // hint for AssertionProp #endif -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? -#endif // !_TARGET_64BIT_ -#ifdef _TARGET_64BIT_ +#endif // !TARGET_64BIT +#ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG @@ -579,10 +579,10 @@ class LclVarDsc assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) switch (_lvHfaElemKind) { case HFA_ELEM_NONE: @@ -604,7 +604,7 @@ class LclVarDsc unreached(); } assert(slots <= 4); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 return slots; } @@ -622,9 +622,9 @@ class LclVarDsc // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. @@ -654,7 +654,7 @@ class LclVarDsc ///////////////////// -#if defined(_TARGET_64BIT_) +#if defined(TARGET_64BIT) regNumber GetOtherReg() const { @@ -668,7 +668,7 @@ class LclVarDsc assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT regNumber GetOtherReg() const { @@ -680,7 +680,7 @@ class LclVarDsc _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT ///////////////////// @@ -811,11 +811,11 @@ class LclVarDsc // Otherwise lvPromoted is valid. bool lvPromotedStruct() { -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); -#else // defined(_TARGET_64BIT_) +#else // defined(TARGET_64BIT) return lvPromoted; -#endif // defined(_TARGET_64BIT_) +#endif // defined(TARGET_64BIT) } unsigned lvSize() const // Size needed for storage representation. Only used for structs or TYP_BLK. @@ -834,7 +834,7 @@ class LclVarDsc assert(varTypeIsStruct(lvType) || (lvType == TYP_BLK) || (lvPromoted && lvUnusedStruct)); -#if defined(FEATURE_SIMD) && !defined(_TARGET_64BIT_) +#if defined(FEATURE_SIMD) && !defined(TARGET_64BIT) // For 32-bit architectures, we make local variable SIMD12 types 16 bytes instead of just 12. We can't do // this for arguments, which must be passed according the defined ABI. We don't want to do this for // dependently promoted struct fields, but we don't know that here. See lvaMapSimd12ToSimd16(). @@ -844,7 +844,7 @@ class LclVarDsc assert(lvExactSize == 12); return 16; } -#endif // defined(FEATURE_SIMD) && !defined(_TARGET_64BIT_) +#endif // defined(FEATURE_SIMD) && !defined(TARGET_64BIT) return roundUp(lvExactSize, TARGET_POINTER_SIZE); } @@ -1290,7 +1290,7 @@ struct FuncInfoDsc // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; @@ -1303,16 +1303,16 @@ struct FuncInfoDsc BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section @@ -1322,18 +1322,18 @@ struct FuncInfoDsc // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) jitstd::vector* cfiCodes; -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. @@ -1549,14 +1549,14 @@ struct fgArgTabEntry // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (!IsHfaArg()) { @@ -1580,7 +1580,7 @@ struct fgArgTabEntry #endif // FEATURE_HFA } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; @@ -1590,7 +1590,7 @@ struct fgArgTabEntry { return isBackFilled; } -#else // !_TARGET_ARM_ +#else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } @@ -1599,7 +1599,7 @@ struct fgArgTabEntry { return false; } -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM bool isPassedInRegisters() { @@ -1649,14 +1649,14 @@ struct fgArgTabEntry #ifdef FEATURE_HFA if (IsHfaRegArg()) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) @@ -1670,7 +1670,7 @@ struct fgArgTabEntry size <<= 1; } #endif // FEATURE_SIMD -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } #endif // FEATURE_HFA return size; @@ -1688,7 +1688,7 @@ struct fgArgTabEntry } regNumber argReg = GetRegNum(0); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; @@ -1719,7 +1719,7 @@ struct fgArgTabEntry // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (numSlots == 1))); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) @@ -1727,7 +1727,7 @@ struct fgArgTabEntry isPassedAsPrimitiveType = true; } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } @@ -2024,9 +2024,9 @@ class Compiler friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT friend class DecomposeLongs; -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX @@ -2314,11 +2314,11 @@ class Compiler // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 return false; -#else // _TARGET_X86_ +#else // TARGET_X86 return compHndBBtabCount > 0; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? @@ -2967,7 +2967,7 @@ class Compiler #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables @@ -3012,7 +3012,7 @@ class Compiler DNER_DepField, // It is a field of a dependently promoted struct DNER_NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set DNER_MinOptsGC, // It is a GC Ref and we are compiling MinOpts -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) DNER_LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER @@ -3023,10 +3023,10 @@ class Compiler void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments -#endif // _TARGET_X86_ +#endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame @@ -3049,26 +3049,26 @@ class Compiler PhasedVar lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On architectures whose ABIs allow structs to be passed in registers, struct promotion will sometimes // require us to "rematerialize" a struct from it's separate constituent field variables. Packing several sub-word // field variables into an argument register is a hard problem. It's easier to reserve a word of memory into which // such field can be copied, after which the assembled memory word can be read into the register. We will allocate // this variable to be this scratch word whenever struct promotion occurs. unsigned lvaPromotedStructAssemblyScratchVar; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(DEBUG) && defined(TARGET_XARCH) -#if defined(DEBUG) && defined(_TARGET_X86_) +#if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. -#endif // defined(DEBUG) && defined(_TARGET_X86_) +#endif // defined(DEBUG) && defined(TARGET_X86) unsigned lvaGenericsContextUseCount; @@ -3108,9 +3108,9 @@ class Compiler //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgsWithInitialReg(); @@ -3122,7 +3122,7 @@ class Compiler #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif @@ -3243,7 +3243,7 @@ class Compiler #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); @@ -3259,7 +3259,7 @@ class Compiler // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { @@ -3268,7 +3268,7 @@ class Compiler assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } @@ -3343,9 +3343,9 @@ class Compiler void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool GetRequiresScratchVar(); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM private: bool CanPromoteStructVar(unsigned lclNum); @@ -3360,9 +3360,9 @@ class Compiler Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool requiresScratchVar; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef DEBUG typedef JitHashTable, var_types> @@ -3373,9 +3373,9 @@ class Compiler StructPromotionHelper* structPromotionHelper; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) void lvaPromoteLongVars(); -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); @@ -3390,9 +3390,9 @@ class Compiler assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); -#if defined(_TARGET_64BIT_) +#if defined(TARGET_64BIT) assert(varDsc->lvSize() == 16); -#endif // defined(_TARGET_64BIT_) +#endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. lvSize() // already does this calculation. However, we also need to prevent mapping types if the var is a @@ -3634,7 +3634,7 @@ class Compiler GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, var_types baseType); GenTree* addRangeCheckIfNeeded(NamedIntrinsic intrinsic, GenTree* lastOp, bool mustExpand); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, @@ -3680,7 +3680,7 @@ class Compiler CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, @@ -4023,7 +4023,7 @@ class Compiler void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif @@ -4379,11 +4379,11 @@ class Compiler BasicBlock* canonicalBlock, flowList* predEdge); -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, @@ -5003,9 +5003,9 @@ class Compiler BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); @@ -6896,7 +6896,7 @@ class Compiler bool raIsVarargsStackArg(unsigned lclNum) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 LclVarDsc* varDsc = &lvaTable[lclNum]; @@ -6904,11 +6904,11 @@ class Compiler return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); -#else // _TARGET_X86_ +#else // TARGET_X86 return false; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } /* @@ -7023,7 +7023,7 @@ class Compiler // Returns the frame size at which we will generate a loop to probe the stack. target_size_t getVeryLargeFrameSize() { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // The looping probe code is 40 bytes, whereas the straight-line probing for // the (0x2000..0x3000) case is 44, so use looping for anything 0x2000 bytes // or greater, to generate smaller code. @@ -7043,10 +7043,10 @@ class Compiler public: VirtualStubParamInfo(bool isCoreRTABI) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; @@ -7057,7 +7057,7 @@ class Compiler reg = REG_R11; regMask = RBM_R11; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; @@ -7068,7 +7068,7 @@ class Compiler reg = REG_R4; regMask = RBM_R4; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else @@ -7100,7 +7100,7 @@ class Compiler bool generateCFIUnwindCodes() { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) return IsTargetAbi(CORINFO_CORERT_ABI); #else return false; @@ -7289,7 +7289,7 @@ class Compiler codeGen->SetInterruptible(value); } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH bool GetHasTailCalls() { @@ -7299,7 +7299,7 @@ class Compiler { codeGen->SetHasTailCalls(value); } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH #if DOUBLE_ALIGN const bool genDoubleAlign() @@ -7379,14 +7379,14 @@ class Compiler // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. @@ -7470,7 +7470,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); @@ -7480,9 +7480,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. @@ -7492,7 +7492,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. @@ -7509,16 +7509,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); -#if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && defined(FEATURE_EH_FUNCLETS)) +#if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); -#endif // _TARGET_AMD64_ || (_TARGET_X86_ && FEATURE_EH_FUNCLETS) +#endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); @@ -7529,14 +7529,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); @@ -7553,7 +7553,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX const CFI_CODE* const pCfiCode); #endif -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) @@ -7575,7 +7575,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (compSupports(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; @@ -7604,7 +7604,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 return true; #else return featureSIMD; @@ -7649,7 +7649,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; @@ -7660,7 +7660,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; @@ -7671,7 +7671,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; @@ -7682,7 +7682,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() @@ -7921,7 +7921,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Creates a GT_SIMD tree for Abs intrinsic. GenTree* impSIMDAbs(CORINFO_CLASS_HANDLE typeHnd, var_types baseType, unsigned simdVectorSize, GenTree* op1); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. @@ -7950,7 +7950,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX SIMDIntrinsicID impSIMDIntegralRelOpGreaterThanOrEqual( CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, var_types baseType, GenTree** op1, GenTree** op2); -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); @@ -7989,7 +7989,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; @@ -7999,7 +7999,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); @@ -8028,7 +8028,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; @@ -8038,7 +8038,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); @@ -8055,7 +8055,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // AVX2: 32-byte Vector and Vector256 unsigned int maxSIMDStructBytes() { -#if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_XARCH_) +#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compSupports(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; @@ -8208,7 +8208,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compSupports(InstructionSet isa) const { -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; @@ -8217,7 +8217,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool canUseVexEncoding() const { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH return compSupports(InstructionSet_AVX); #else return false; @@ -8313,7 +8313,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX { JitFlags* jitFlags; // all flags passed from the EE -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t compSupportsISA; void setSupportedISA(InstructionSet isa) { @@ -8418,7 +8418,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // true if we must generate code compatible with JIT32 quirks bool IsJit32Compat() { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return jitFlags->IsSet(JitFlags::JIT_FLAG_DESKTOP_QUIRKS); #else return false; @@ -8428,7 +8428,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // true if we must generate code compatible with Jit64 quirks bool IsJit64Compat() { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) return jitFlags->IsSet(JitFlags::JIT_FLAG_DESKTOP_QUIRKS); #elif !defined(FEATURE_CORECLR) return true; @@ -8452,17 +8452,17 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compGcChecks; // Check arguments and return values to ensure they are sane #endif -#if defined(DEBUG) && defined(_TARGET_XARCH_) +#if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. -#endif // defined(DEBUG) && defined(_TARGET_XARCH_) +#endif // defined(DEBUG) && defined(TARGET_XARCH) -#if defined(DEBUG) && defined(_TARGET_X86_) +#if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. -#endif // defined(DEBUG) && defined(_TARGET_X86_) +#endif // defined(DEBUG) && defined(TARGET_X86) bool compNeedSecurityCheck; // This flag really means where or not a security object needs // to be allocated on the stack. @@ -8483,7 +8483,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG @@ -8553,11 +8553,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compTailCallLoopOpt; #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; @@ -8880,11 +8880,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // to be returned in RAX. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return (info.compRetBuffArg != BAD_VAR_NUM); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 return (compIsProfilerHookNeeded()) && (info.compRetBuffArg != BAD_VAR_NUM); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } // Returns true if the method returns a value in more than one return register @@ -8893,7 +8893,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // On x86 only 64-bit longs are returned in multiple registers return varTypeIsLong(info.compRetNativeType); #else // targets: X64-UNIX, ARM64 or ARM32 @@ -8970,14 +8970,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 Phases previousCompletedPhase; // the most recently completed phase @@ -8994,11 +8994,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) @@ -9199,7 +9199,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags* compileFlags); @@ -9218,7 +9218,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 bool compQuirkForPPP(); // Check if this method should be Quirked for the PPP issue #endif public: @@ -9426,7 +9426,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX static bool mayNeedShadowCopy(LclVarDsc* varDsc) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines @@ -9452,7 +9452,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; -#else // !defined(_TARGET_AMD64_) +#else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } @@ -10560,7 +10560,7 @@ extern unsigned fatal_NYI; * Codegen */ -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; @@ -10579,9 +10579,9 @@ const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; @@ -10604,12 +10604,12 @@ const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) const instruction INS_BREAKPOINT = INS_brk; #else const instruction INS_BREAKPOINT = INS_bkpt; @@ -10618,7 +10618,7 @@ const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 /*****************************************************************************/ diff --git a/src/coreclr/src/jit/compiler.hpp b/src/coreclr/src/jit/compiler.hpp index 25d8d4defbd46..429a583ee5c70 100644 --- a/src/coreclr/src/jit/compiler.hpp +++ b/src/coreclr/src/jit/compiler.hpp @@ -671,14 +671,14 @@ inline var_types genSignedType(var_types type) inline bool isRegParamType(var_types type) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return (type <= TYP_INT || type == TYP_REF || type == TYP_BYREF); -#else // !_TARGET_X86_ +#else // !TARGET_X86 return true; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) /*****************************************************************************/ // Returns true if 'type' is a struct that can be enregistered for call args // or can be returned by value in multiple registers. @@ -730,7 +730,7 @@ inline bool Compiler::VarTypeIsMultiByteAndCanEnreg( return result; } -#endif //_TARGET_AMD64_ || _TARGET_ARM64_ +#endif // TARGET_AMD64 || TARGET_ARM64 /*****************************************************************************/ @@ -1188,14 +1188,14 @@ inline GenTree* Compiler::gtNewFieldRef(var_types typ, CORINFO_FIELD_HANDLE fldH { unsigned lclNum = obj->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvFieldAccessed = 1; -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // These structs are passed by reference; we should probably be able to treat these // as non-global refs, but downstream logic expects these to be marked this way. if (lvaTable[lclNum].lvIsParam) { tree->gtFlags |= GTF_GLOB_REF; } -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) } else { @@ -1339,14 +1339,14 @@ inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate) assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_LARGE); assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || (gtDebugFlags & GTF_DEBUG_NODE_LARGE)); -#if defined(_HOST_64BIT_) && !defined(_TARGET_64BIT_) +#if defined(HOST_64BIT) && !defined(TARGET_64BIT) if (gtOper == GT_CNS_LNG && oper == GT_CNS_INT) { // When casting from LONG to INT, we need to force cast of the value, // if the host architecture represents INT and LONG with the same data size. AsLngCon()->gtLconVal = (INT64)(INT32)AsLngCon()->gtLconVal; } -#endif // defined(_HOST_64BIT_) && !defined(_TARGET_64BIT_) +#endif // defined(HOST_64BIT) && !defined(TARGET_64BIT) SetOperRaw(oper); @@ -1372,7 +1372,7 @@ inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate) AsIntCon()->gtFieldSeq = nullptr; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (oper == GT_MUL_LONG) { // We sometimes bash GT_MUL to GT_MUL_LONG, which converts it from GenTreeOp to GenTreeMultiRegOp. @@ -1428,7 +1428,7 @@ inline void GenTree::SetOperResetFlags(genTreeOps oper) inline void GenTree::ChangeOperConst(genTreeOps oper) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT assert(oper != GT_CNS_LNG); // We should never see a GT_CNS_LNG for a 64-bit target! #endif assert(OperIsConst(oper)); // use ChangeOper() instead @@ -1779,10 +1779,10 @@ inline void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler* comp, R bool doubleWeight = lvIsTemp; -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // and, for the time being, implict byref params doubleWeight |= lvIsImplicitByRef; -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) if (doubleWeight && (weight * 2 > weight)) { @@ -1986,19 +1986,19 @@ inline int Compiler::lvaCachedGenericContextArgOffset() // Arguments: // varNum - The variable to inquire about. Positive for user variables // or arguments, negative for spill-temporaries. -// mustBeFPBased - [_TARGET_ARM_ only] True if the base register must be FP. +// mustBeFPBased - [TARGET_ARM only] True if the base register must be FP. // After FINAL_FRAME_LAYOUT, if false, it also requires SP base register. -// pBaseReg - [_TARGET_ARM_ only] Out arg. *pBaseReg is set to the base +// pBaseReg - [TARGET_ARM only] Out arg. *pBaseReg is set to the base // register to use. -// addrModeOffset - [_TARGET_ARM_ only] The mode offset within the variable that we need to address. +// addrModeOffset - [TARGET_ARM only] The mode offset within the variable that we need to address. // For example, for a large struct local, and a struct field reference, this will be the offset // of the field. Thus, for V02 + 0x28, if V02 itself is at offset SP + 0x10 // then addrModeOffset is what gets added beyond that, here 0x28. -// isFloatUsage - [_TARGET_ARM_ only] True if the instruction being generated is a floating +// isFloatUsage - [TARGET_ARM only] True if the instruction being generated is a floating // point instruction. This requires using floating-point offset restrictions. // Note that a variable can be non-float, e.g., struct, but accessed as a // float local field. -// pFPbased - [non-_TARGET_ARM_] Out arg. Set *FPbased to true if the +// pFPbased - [non-TARGET_ARM] Out arg. Set *FPbased to true if the // variable is addressed off of FP, false if it's addressed // off of SP. // @@ -2006,7 +2006,7 @@ inline int Compiler::lvaCachedGenericContextArgOffset() // Returns the variable offset from the given base register. // inline -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM int Compiler::lvaFrameAddress( int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage) @@ -2027,7 +2027,7 @@ inline assert((unsigned)varNum < lvaCount); varDsc = lvaTable + varNum; bool isPrespilledArg = false; -#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED) +#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) isPrespilledArg = varDsc->lvIsParam && compIsProfilerHookNeeded() && lvaIsPreSpilled(varNum, codeGen->regSet.rsMaskPreSpillRegs(false)); #endif @@ -2036,16 +2036,16 @@ inline // check that this has a valid stack location. if (lvaDoneFrameLayout > REGALLOC_FRAME_LAYOUT && !varDsc->lvOnFrame) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI // On amd64, every param has a stack location, except on Unix-like systems. assert(varDsc->lvIsParam); #endif // UNIX_AMD64_ABI -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // For other targets, a stack parameter that is enregistered or prespilled // for profiling on ARM will have a stack location. assert((varDsc->lvIsParam && !varDsc->lvIsRegArg) || isPrespilledArg); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } FPbased = varDsc->lvFramePointerBased; @@ -2062,7 +2062,7 @@ inline #if DOUBLE_ALIGN assert(FPbased == (isFramePointerUsed() || (genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg))); #else -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(FPbased == isFramePointerUsed()); #endif #endif @@ -2127,7 +2127,7 @@ inline // Worst case FP based offset. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM varOffset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta(); #else varOffset = -(codeGen->genTotalFrameSize()); @@ -2136,7 +2136,7 @@ inline } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (FPbased) { if (mustBeFPBased) @@ -2333,11 +2333,11 @@ inline unsigned Compiler::compMapILargNum(unsigned ILargNum) // inline var_types Compiler::mangleVarArgsType(var_types type) { -#if defined(_TARGET_ARMARCH_) +#if defined(TARGET_ARMARCH) if (opts.compUseSoftFP -#if defined(_TARGET_WINDOWS_) +#if defined(TARGET_WINDOWS) || info.compIsVarArgs -#endif // defined(_TARGET_WINDOWS_) +#endif // defined(TARGET_WINDOWS) ) { switch (type) @@ -2350,7 +2350,7 @@ inline var_types Compiler::mangleVarArgsType(var_types type) break; } } -#endif // defined(_TARGET_ARMARCH_) +#endif // defined(TARGET_ARMARCH) return type; } @@ -2358,7 +2358,7 @@ inline var_types Compiler::mangleVarArgsType(var_types type) #if FEATURE_VARARG inline regNumber Compiler::getCallArgIntRegister(regNumber floatReg) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 switch (floatReg) { case REG_XMM0: @@ -2372,16 +2372,16 @@ inline regNumber Compiler::getCallArgIntRegister(regNumber floatReg) default: unreached(); } -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // How will float args be passed for RyuJIT/x86? NYI("getCallArgIntRegister for RyuJIT/x86"); return REG_NA; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } inline regNumber Compiler::getCallArgFloatRegister(regNumber intReg) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 switch (intReg) { case REG_RCX: @@ -2395,11 +2395,11 @@ inline regNumber Compiler::getCallArgFloatRegister(regNumber intReg) default: unreached(); } -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // How will float args be passed for RyuJIT/x86? NYI("getCallArgFloatRegister for RyuJIT/x86"); return REG_NA; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } #endif // FEATURE_VARARG @@ -2764,7 +2764,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) leaveBlk->bbRefs = 0; leaveBlk->bbPreds = nullptr; -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // This function (fgConvertBBToThrowBB) can be called before the predecessor lists are created (e.g., in // fgMorph). The fgClearFinallyTargetBit() function to update the BBF_FINALLY_TARGET bit depends on these // predecessor lists. If there are no predecessor lists, we immediately clear all BBF_FINALLY_TARGET bits @@ -2779,7 +2779,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgClearAllFinallyTargetBits(); fgNeedToAddFinallyTargetBits = true; } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } } @@ -2933,7 +2933,7 @@ inline regNumber genMapIntRegArgNumToRegNum(unsigned argNum) inline regNumber genMapFloatRegArgNumToRegNum(unsigned argNum) { -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 assert(argNum < ArrLen(fltArgRegs)); return fltArgRegs[argNum]; @@ -2970,7 +2970,7 @@ inline regMaskTP genMapIntRegArgNumToRegMask(unsigned argNum) inline regMaskTP genMapFloatRegArgNumToRegMask(unsigned argNum) { -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 assert(argNum < ArrLen(fltArgMasks)); return fltArgMasks[argNum]; @@ -2986,7 +2986,7 @@ __forceinline regMaskTP genMapArgNumToRegMask(unsigned argNum, var_types type) if (varTypeUsesFloatArgReg(type)) { result = genMapFloatRegArgNumToRegMask(argNum); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (type == TYP_DOUBLE) { assert((result & RBM_DBL_REGS) != 0); @@ -3060,9 +3060,9 @@ inline unsigned genMapFloatRegNumToRegArgNum(regNumber regNum) { assert(genRegMask(regNum) & RBM_FLTARG_REGS); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM return regNum - REG_F0; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return regNum - REG_V0; #elif defined(UNIX_AMD64_ABI) return regNum - REG_FLTARG_0; @@ -3965,7 +3965,7 @@ inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(const LclVarDsc* // We have a parameter that could be enregistered CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // The struct parameter is a register candidate return PROMOTION_TYPE_INDEPENDENT; @@ -4063,11 +4063,11 @@ inline bool Compiler::lvaIsGCTracked(const LclVarDsc* varDsc) { // Stack parameters are always untracked w.r.t. GC reportings const bool isStackParam = varDsc->lvIsParam && !varDsc->lvIsRegArg; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return !isStackParam && !lvaIsFieldOfDependentlyPromotedStruct(varDsc); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 return !isStackParam; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } else { diff --git a/src/coreclr/src/jit/copyprop.cpp b/src/coreclr/src/jit/copyprop.cpp index 3bacee1c9092a..ece3f28b99d0e 100644 --- a/src/coreclr/src/jit/copyprop.cpp +++ b/src/coreclr/src/jit/copyprop.cpp @@ -97,7 +97,7 @@ int Compiler::optCopyProp_LclVarScore(LclVarDsc* lclVarDsc, LclVarDsc* copyVarDs score -= 4; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // For doubles we also prefer to change parameters into non-parameter local variables if (lclVarDsc->lvType == TYP_DOUBLE) { diff --git a/src/coreclr/src/jit/decomposelongs.cpp b/src/coreclr/src/jit/decomposelongs.cpp index 8178bd20194f3..77112049271ea 100644 --- a/src/coreclr/src/jit/decomposelongs.cpp +++ b/src/coreclr/src/jit/decomposelongs.cpp @@ -26,7 +26,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX*/ #pragma hdrstop #endif -#ifndef _TARGET_64BIT_ // DecomposeLongs is only used on 32-bit platforms +#ifndef TARGET_64BIT // DecomposeLongs is only used on 32-bit platforms #include "decomposelongs.h" @@ -924,7 +924,7 @@ GenTree* DecomposeLongs::DecomposeNeg(LIR::Use& use) GenTree* zero = m_compiler->gtNewZeroConNode(TYP_INT); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) GenTree* hiAdjust = m_compiler->gtNewOperNode(GT_ADD_HI, TYP_INT, hiOp1, zero); GenTree* hiResult = m_compiler->gtNewOperNode(GT_NEG, TYP_INT, hiAdjust); @@ -933,7 +933,7 @@ GenTree* DecomposeLongs::DecomposeNeg(LIR::Use& use) loResult->gtFlags |= GTF_SET_FLAGS; hiAdjust->gtFlags |= GTF_USE_FLAGS; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // We tend to use "movs" to load zero to a register, and that sets the flags, so put the // zero before the loResult, which is setting the flags needed by GT_SUB_HI. @@ -1960,4 +1960,4 @@ genTreeOps DecomposeLongs::GetLoOper(genTreeOps oper) } } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT diff --git a/src/coreclr/src/jit/disasm.cpp b/src/coreclr/src/jit/disasm.cpp index 43ef4430ddf45..9190820ff0168 100644 --- a/src/coreclr/src/jit/disasm.cpp +++ b/src/coreclr/src/jit/disasm.cpp @@ -40,7 +40,7 @@ #define MAX_CLASSNAME_LENGTH 1024 -#if defined(_AMD64_) +#if defined(HOST_AMD64) #pragma comment(linker, \ "/ALTERNATENAME:__imp_?CchFormatAddr@DIS@@QEBA_K_KPEAG0@Z=__imp_?CchFormatAddr@DIS@@QEBA_K_KPEA_W0@Z") @@ -59,7 +59,7 @@ linker, \ "/ALTERNATENAME:__imp_?PfncchfixupSet@DIS@@QEAAP6A_KPEBV1@_K1PEAG1PEA_K@ZP6A_K011213@Z@Z=__imp_?PfncchfixupSet@DIS@@QEAAP6A_KPEBV1@_K1PEA_W1PEA_K@ZP6A_K011213@Z@Z") -#elif defined(_X86_) +#elif defined(HOST_X86) #pragma comment(linker, "/ALTERNATENAME:__imp_?CchFormatAddr@DIS@@QBEI_KPAGI@Z=__imp_?CchFormatAddr@DIS@@QBEI_KPA_WI@Z") #pragma comment(linker, "/ALTERNATENAME:__imp_?CchFormatInstr@DIS@@QBEIPAGI@Z=__imp_?CchFormatInstr@DIS@@QBEIPA_WI@Z") @@ -125,7 +125,7 @@ size_t DisAssembler::disCchAddrMember( size_t retval = 0; // assume we don't know -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) DISX86::TRMTA terminationType = DISX86::TRMTA(pdis->Trmta()); @@ -193,7 +193,7 @@ size_t DisAssembler::disCchAddrMember( break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case DISX86::trmtaFallThrough: @@ -205,7 +205,7 @@ size_t DisAssembler::disCchAddrMember( break; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 default: @@ -214,7 +214,7 @@ size_t DisAssembler::disCchAddrMember( break; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) DISARM64::TRMTA terminationType = DISARM64::TRMTA(pdis->Trmta()); @@ -360,7 +360,7 @@ size_t __stdcall DisAssembler::disCchFixup( size_t DisAssembler::disCchFixupMember( const DIS* pdis, DIS::ADDR addr, size_t size, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORDLONG* pdwDisp) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) DISX86::TRMTA terminationType = DISX86::TRMTA(pdis->Trmta()); // DIS::ADDR disIndAddr; @@ -462,7 +462,7 @@ size_t DisAssembler::disCchFixupMember( break; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) DISARM64::TRMTA terminationType = DISARM64::TRMTA(pdis->Trmta()); // DIS::ADDR disIndAddr; @@ -600,7 +600,7 @@ size_t __stdcall DisAssembler::disCchRegRel( size_t DisAssembler::disCchRegRelMember( const DIS* pdis, DIS::REGA reg, DWORD disp, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORD* pdwDisp) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) DISX86::TRMTA terminationType = DISX86::TRMTA(pdis->Trmta()); // DIS::ADDR disIndAddr; @@ -696,7 +696,7 @@ size_t DisAssembler::disCchRegRelMember( break; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) DISARM64::TRMTA terminationType = DISARM64::TRMTA(pdis->Trmta()); @@ -917,7 +917,7 @@ size_t DisAssembler::CbDisassemble(DIS* pdis, DISASM_DUMP("CbDisassemble offs %Iu addr %I64u\n", offs, addr); // assert(!"can't disassemble instruction!!!"); fprintf(pfile, "MSVCDIS can't disassemble instruction @ offset %Iu (0x%02x)!!!\n", offs, offs); -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) fprintf(pfile, "%08Xh\n", *(unsigned int*)pb); return 4; #else @@ -926,9 +926,9 @@ size_t DisAssembler::CbDisassemble(DIS* pdis, #endif } -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) assert(cb == 4); // all instructions are 4 bytes! -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 /* remember current offset and instruction size */ @@ -941,7 +941,7 @@ size_t DisAssembler::CbDisassemble(DIS* pdis, if (findLabels) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) DISX86::TRMTA terminationType = DISX86::TRMTA(pdis->Trmta()); /* check the termination type of the instruction */ @@ -1000,7 +1000,7 @@ size_t DisAssembler::CbDisassemble(DIS* pdis, break; } // end switch -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) DISARM64::TRMTA terminationType = DISARM64::TRMTA(pdis->Trmta()); /* check the termination type of the instruction */ @@ -1112,9 +1112,9 @@ size_t DisAssembler::CbDisassemble(DIS* pdis, fprintf(pfile, "%03X", offs); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 #define CCH_INDENT 8 // fixed sized instructions, always 8 characters -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #define CCH_INDENT 30 // large constants sometimes #else #define CCH_INDENT 24 @@ -1246,11 +1246,11 @@ void DisAssembler::DisasmBuffer(FILE* pfile, bool printit) { DIS* pdis = NULL; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 pdis = DIS::PdisNew(DIS::distX86); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) pdis = DIS::PdisNew(DIS::distX8664); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) pdis = DIS::PdisNew(DIS::distArm64); #else // _TARGET_* #error Unsupported or unset target architecture @@ -1262,7 +1262,7 @@ void DisAssembler::DisasmBuffer(FILE* pfile, bool printit) return; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT pdis->SetAddr64(true); #endif diff --git a/src/coreclr/src/jit/disasm.h b/src/coreclr/src/jit/disasm.h index 7ec292cb96ab2..c4d09a38376f7 100644 --- a/src/coreclr/src/jit/disasm.h +++ b/src/coreclr/src/jit/disasm.h @@ -41,9 +41,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "msvcdis.h" #pragma warning(default : 4640) -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH #include "disx86.h" -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #include "disarm64.h" #else // _TARGET_* #error Unsupported or unset target architecture @@ -56,17 +56,17 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX /*****************************************************************************/ -#ifdef _HOST_64BIT_ +#ifdef HOST_64BIT template struct SizeTKeyFuncs : JitLargePrimitiveKeyFuncs { }; -#else // !_HOST_64BIT_ +#else // !HOST_64BIT template struct SizeTKeyFuncs : JitSmallPrimitiveKeyFuncs { }; -#endif // _HOST_64BIT_ +#endif // HOST_64BIT typedef JitHashTable, CORINFO_METHOD_HANDLE> AddrToMethodHandleMap; typedef JitHashTable, size_t> AddrToAddrMap; diff --git a/src/coreclr/src/jit/ee_il_dll.cpp b/src/coreclr/src/jit/ee_il_dll.cpp index 42f6a170a087d..d5d3bbdefd46b 100644 --- a/src/coreclr/src/jit/ee_il_dll.cpp +++ b/src/coreclr/src/jit/ee_il_dll.cpp @@ -20,7 +20,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "emit.h" #include "corexcep.h" -#if !defined(_HOST_UNIX_) +#if !defined(HOST_UNIX) #include // For _dup, _setmode #include // For _O_TEXT #include // For EINVAL @@ -64,7 +64,7 @@ extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* jitHost) return; } -#ifdef FEATURE_PAL +#ifdef HOST_UNIX int err = PAL_InitializeDLL(); if (err != 0) { @@ -86,7 +86,7 @@ extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* jitHost) } #endif // DEBUG -#if !defined(_HOST_UNIX_) +#if !defined(HOST_UNIX) if (jitstdout == nullptr) { int stdoutFd = _fileno(procstdout()); @@ -110,7 +110,7 @@ extern "C" DLLEXPORT void __stdcall jitStartup(ICorJitHost* jitHost) } } } -#endif // !_HOST_UNIX_ +#endif // !HOST_UNIX // If jitstdout is still null, fallback to whatever procstdout() was // initially set to. @@ -402,7 +402,7 @@ unsigned CILJit::getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags) jitFlags.SetFromFlags(cpuCompileFlags); #ifdef FEATURE_SIMD -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT) && jitFlags.IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD) && jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2)) { @@ -420,7 +420,7 @@ unsigned CILJit::getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags) return 32; } } -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) if (GetJitTls() != nullptr && JitTls::GetCompiler() != nullptr) { JITDUMP("getMaxIntrinsicSIMDVectorLength: returning 16\n"); @@ -446,7 +446,7 @@ void CILJit::setRealJit(ICorJitCompiler* realJitCompiler) unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Everything fits into a single 'slot' size // to accommodate irregular sized structs, they are passed byref @@ -464,7 +464,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* #endif // UNIX_AMD64_ABI return TARGET_POINTER_SIZE; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 CORINFO_CLASS_HANDLE argClass; CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass)); @@ -482,7 +482,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_MULTIREG_ARGS -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // Any structs that are larger than MAX_PASS_MULTIREG_BYTES are always passed by reference if (structSize > MAX_PASS_MULTIREG_BYTES) { @@ -496,7 +496,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* { var_types hfaType = GetHfaType(argClass); // set to float or double if it is an HFA, otherwise TYP_UNDEF bool isHfa = (hfaType != TYP_UNDEF); -#ifndef _TARGET_UNIX_ +#ifndef TARGET_UNIX if (info.compIsVarArgs) { // Arm64 Varargs ABI requires passing in general purpose @@ -504,7 +504,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* // to false to correctly pass as if it was not an HFA. isHfa = false; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX if (!isHfa) { // This struct is passed by reference using a single 'slot' @@ -513,7 +513,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* } // otherwise will we pass this struct by value in multiple registers } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // otherwise will we pass this struct by value in multiple registers #else NYI("unknown target"); @@ -853,7 +853,7 @@ void Compiler::eeDispVar(ICorDebugInfo::NativeVarInfo* var) } break; -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 case CodeGenInterface::VLT_REG_REG: printf("%s-%s", getRegName(var->loc.vlRegReg.vlrrReg1), getRegName(var->loc.vlRegReg.vlrrReg2)); break; @@ -892,7 +892,7 @@ void Compiler::eeDispVar(ICorDebugInfo::NativeVarInfo* var) case CodeGenInterface::VLT_FIXED_VA: printf("fxd_va[%d]", var->loc.vlFixedVarArg.vlfvOffset); break; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 default: unreached(); // unexpected @@ -1416,7 +1416,7 @@ const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd) const WCHAR* Compiler::eeGetCPString(size_t strHandle) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX return nullptr; #else char buff[512 + sizeof(CORINFO_String)]; @@ -1440,7 +1440,7 @@ const WCHAR* Compiler::eeGetCPString(size_t strHandle) } return (asString->chars); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } #endif // DEBUG diff --git a/src/coreclr/src/jit/emit.cpp b/src/coreclr/src/jit/emit.cpp index 52a9961e1b694..d621d5a060481 100644 --- a/src/coreclr/src/jit/emit.cpp +++ b/src/coreclr/src/jit/emit.cpp @@ -139,20 +139,20 @@ unsigned emitter::emitTotalIGextend; unsigned emitter::emitTotalIDescSmallCnt; unsigned emitter::emitTotalIDescCnt; unsigned emitter::emitTotalIDescJmpCnt; -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) unsigned emitter::emitTotalIDescLblCnt; -#endif // !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_ARM64) unsigned emitter::emitTotalIDescCnsCnt; unsigned emitter::emitTotalIDescDspCnt; unsigned emitter::emitTotalIDescCnsDspCnt; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH unsigned emitter::emitTotalIDescAmdCnt; unsigned emitter::emitTotalIDescCnsAmdCnt; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH unsigned emitter::emitTotalIDescCGCACnt; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM unsigned emitter::emitTotalIDescRelocCnt; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM unsigned emitter::emitSmallDspCnt; unsigned emitter::emitLargeDspCnt; @@ -227,40 +227,40 @@ void emitterStaticStats(FILE* fout) // fprintf(fout, "Size of _idAddrUnion= %2u\n", sizeof(((emitter::instrDesc*)0)->_idAddrUnion)); fprintf(fout, "Size of instrDescJmp = %2u\n", sizeof(emitter::instrDescJmp)); -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) fprintf(fout, "Size of instrDescLbl = %2u\n", sizeof(emitter::instrDescLbl)); -#endif // !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_ARM64) fprintf(fout, "Size of instrDescCns = %2u\n", sizeof(emitter::instrDescCns)); fprintf(fout, "Size of instrDescDsp = %2u\n", sizeof(emitter::instrDescDsp)); fprintf(fout, "Size of instrDescCnsDsp = %2u\n", sizeof(emitter::instrDescCnsDsp)); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH fprintf(fout, "Size of instrDescAmd = %2u\n", sizeof(emitter::instrDescAmd)); fprintf(fout, "Size of instrDescCnsAmd = %2u\n", sizeof(emitter::instrDescCnsAmd)); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH fprintf(fout, "Size of instrDescCGCA = %2u\n", sizeof(emitter::instrDescCGCA)); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM fprintf(fout, "Size of instrDescReloc = %2u\n", sizeof(emitter::instrDescReloc)); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM fprintf(fout, "\n"); fprintf(fout, "SC_IG_BUFFER_SIZE = %2u\n", SC_IG_BUFFER_SIZE); fprintf(fout, "SMALL_IDSC_SIZE per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / SMALL_IDSC_SIZE); fprintf(fout, "instrDesc per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDesc)); fprintf(fout, "instrDescJmp per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescJmp)); -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) fprintf(fout, "instrDescLbl per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescLbl)); -#endif // !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_ARM64) fprintf(fout, "instrDescCns per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescCns)); fprintf(fout, "instrDescDsp per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescDsp)); fprintf(fout, "instrDescCnsDsp per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescCnsDsp)); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH fprintf(fout, "instrDescAmd per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescAmd)); fprintf(fout, "instrDescCnsAmd per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescCnsAmd)); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH fprintf(fout, "instrDescCGCA per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescCGCA)); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM fprintf(fout, "instrDescReloc per IG buffer = %2u\n", SC_IG_BUFFER_SIZE / sizeof(emitter::instrDescReloc)); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM fprintf(fout, "\n"); fprintf(fout, "GCInfo::regPtrDsc:\n"); @@ -366,28 +366,28 @@ void emitterStats(FILE* fout) 100.0 * emitter::emitTotalIDescCnt / emitter::emitTotalInsCnt); fprintf(fout, "Total instrDescJmp: %8u (%5.2f%%)\n", emitter::emitTotalIDescJmpCnt, 100.0 * emitter::emitTotalIDescJmpCnt / emitter::emitTotalInsCnt); -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) fprintf(fout, "Total instrDescLbl: %8u (%5.2f%%)\n", emitter::emitTotalIDescLblCnt, 100.0 * emitter::emitTotalIDescLblCnt / emitter::emitTotalInsCnt); -#endif // !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_ARM64) fprintf(fout, "Total instrDescCns: %8u (%5.2f%%)\n", emitter::emitTotalIDescCnsCnt, 100.0 * emitter::emitTotalIDescCnsCnt / emitter::emitTotalInsCnt); fprintf(fout, "Total instrDescDsp: %8u (%5.2f%%)\n", emitter::emitTotalIDescDspCnt, 100.0 * emitter::emitTotalIDescDspCnt / emitter::emitTotalInsCnt); fprintf(fout, "Total instrDescCnsDsp: %8u (%5.2f%%)\n", emitter::emitTotalIDescCnsDspCnt, 100.0 * emitter::emitTotalIDescCnsDspCnt / emitter::emitTotalInsCnt); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH fprintf(fout, "Total instrDescAmd: %8u (%5.2f%%)\n", emitter::emitTotalIDescAmdCnt, 100.0 * emitter::emitTotalIDescAmdCnt / emitter::emitTotalInsCnt); fprintf(fout, "Total instrDescCnsAmd: %8u (%5.2f%%)\n", emitter::emitTotalIDescCnsAmdCnt, 100.0 * emitter::emitTotalIDescCnsAmdCnt / emitter::emitTotalInsCnt); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH fprintf(fout, "Total instrDescCGCA: %8u (%5.2f%%)\n", emitter::emitTotalIDescCGCACnt, 100.0 * emitter::emitTotalIDescCGCACnt / emitter::emitTotalInsCnt); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM fprintf(fout, "Total instrDescReloc: %8u (%5.2f%%)\n", emitter::emitTotalIDescRelocCnt, 100.0 * emitter::emitTotalIDescRelocCnt / emitter::emitTotalInsCnt); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM fprintf(fout, "\n"); } @@ -949,10 +949,10 @@ void emitter::emitBegFN(bool hasFramePtr emitEpilogSize = 0; emitEpilogCnt = 0; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH emitExitSeqBegLoc.Init(); emitExitSeqSize = INT_MAX; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH emitPlaceholderList = emitPlaceholderLast = nullptr; @@ -1284,8 +1284,8 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) #endif #ifdef PSEUDORANDOM_NOP_INSERTION - // TODO-ARM-Bug?: PSEUDORANDOM_NOP_INSERTION is not defined for _TARGET_ARM_ - // ARM - This is currently broken on _TARGET_ARM_ + // TODO-ARM-Bug?: PSEUDORANDOM_NOP_INSERTION is not defined for TARGET_ARM + // ARM - This is currently broken on TARGET_ARM // When nopSize is odd we misalign emitCurIGsize // if (!emitComp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && !emitInInstrumentation && @@ -1302,7 +1302,7 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) emitInInstrumentation = false; idnop->idInsFmt(IF_NONE); idnop->idIns(INS_nop); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) idnop->idCodeSize(nopSize); #else #error "Undefined target for pseudorandom NOP insertion" @@ -1336,7 +1336,7 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) // These fields should have been zero-ed by the above assert(id->idReg1() == regNumber(0)); assert(id->idReg2() == regNumber(0)); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH assert(id->idCodeSize() == 0); #endif @@ -1391,9 +1391,9 @@ void* emitter::emitAllocAnyInstr(size_t sz, emitAttr opsz) // Amd64: ip-relative addressing is supported even when not generating relocatable ngen code if (EA_IS_DSP_RELOC(opsz) -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 && emitComp->opts.compReloc -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 ) { /* Mark idInfo()->idDspReloc to remember that the */ @@ -1573,9 +1573,9 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType, #endif // FEATURE_EH_FUNCLETS ) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 emitOutputPreEpilogNOP(); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 extend = true; } @@ -2090,7 +2090,7 @@ bool emitter::emitHasEpilogEnd() #endif // JIT32_GCENCODER -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH /***************************************************************************** * @@ -2104,7 +2104,7 @@ void emitter::emitStartExitSeq() emitExitSeqBegLoc.CaptureLocation(this); } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH /***************************************************************************** * @@ -2150,11 +2150,11 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi) assert(offsHi >= 0); } else -#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED) +#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) if (!emitComp->compIsProfilerHookNeeded()) #endif { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // doesn't have to be all negative on amd printf("-%04X ... %04X\n", -offsLo, offsHi); #else @@ -2162,7 +2162,7 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi) assert(offsHi <= 0); #endif } -#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED) +#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) else { // Under profiler due to prespilling of arguments, offHi need not be < 0 @@ -2336,7 +2336,7 @@ bool emitter::emitNoGChelper(CorInfoHelpFunc helpFunc) // case CORINFO_HELP_ULDIV: // case CORINFO_HELP_ULMOD: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case CORINFO_HELP_ASSIGN_REF_EAX: case CORINFO_HELP_ASSIGN_REF_ECX: case CORINFO_HELP_ASSIGN_REF_EBX: @@ -2414,12 +2414,12 @@ void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMas emitThisGCrefRegs = emitInitGCrefRegs = gcrefRegs; emitThisByrefRegs = emitInitByrefRegs = byrefRegs; -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (isFinallyTarget) { emitCurIG->igFlags |= IGF_FINALLY_TARGET; } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG if (EMIT_GC_VERBOSE) @@ -2448,7 +2448,7 @@ void* emitter::emitAddInlineLabel() return emitCurIG; } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // Does the argument location point to an IG at the end of a function or funclet? // We can ignore the codePos part of the location, since it doesn't affect the @@ -2791,11 +2791,11 @@ void emitter::emitWalkIDs(emitLocation* locFrom, emitProcessInstrFunc_t processF void emitter::emitGenerateUnwindNop(instrDesc* id, void* context) { Compiler* comp = (Compiler*)context; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) comp->unwindNop(id->idCodeSize()); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) comp->unwindNop(); -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) } /***************************************************************************** @@ -2809,9 +2809,9 @@ void emitter::emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp) emitWalkIDs(locFrom, emitGenerateUnwindNop, comp); } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) /***************************************************************************** * @@ -2834,7 +2834,7 @@ unsigned emitter::emitGetInstructionSize(emitLocation* emitLoc) return id->idCodeSize(); } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) /*****************************************************************************/ #ifdef DEBUG @@ -3191,12 +3191,12 @@ void emitter::emitDispIGflags(unsigned flags) { printf(", byref"); } -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (flags & IGF_FINALLY_TARGET) { printf(", ftarget"); } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (flags & IGF_FUNCLET_PROLOG) { printf(", funclet prolog"); @@ -3487,9 +3487,9 @@ size_t emitter::emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp) /* The instruction size estimate wasn't accurate; remember this */ ig->igFlags |= IGF_UPD_ISZ; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) id->idCodeSize(csz); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // This is done as part of emitSetShortJump(); // insSize isz = emitInsSize(id->idInsFmt()); // id->idInsSize(isz); @@ -3570,9 +3570,9 @@ void emitter::emitJumpDistBind() // to a small jump. If it is small enough, we will iterate in hopes of // converting those jumps we missed converting the first (or second...) time. -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) UNATIVE_OFFSET minMediumExtra; // Same as 'minShortExtra', but for medium-sized jumps. -#endif // _TARGET_ARM_ +#endif // TARGET_ARM UNATIVE_OFFSET adjIG; UNATIVE_OFFSET adjLJ; @@ -3608,9 +3608,9 @@ void emitter::emitJumpDistBind() adjIG = 0; minShortExtra = (UNATIVE_OFFSET)-1; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) minMediumExtra = (UNATIVE_OFFSET)-1; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM for (jmp = emitJumpList; jmp; jmp = jmp->idjNext) { @@ -3623,12 +3623,12 @@ void emitter::emitJumpDistBind() NATIVE_OFFSET nsd = 0; // small jump max. neg distance NATIVE_OFFSET psd = 0; // small jump max. pos distance -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) UNATIVE_OFFSET msz = 0; // medium jump size NATIVE_OFFSET nmd = 0; // medium jump max. neg distance NATIVE_OFFSET pmd = 0; // medium jump max. pos distance NATIVE_OFFSET mextra; // How far beyond the medium jump range is this jump offset? -#endif // _TARGET_ARM_ +#endif // TARGET_ARM NATIVE_OFFSET extra; // How far beyond the short jump range is this jump offset? UNATIVE_OFFSET srcInstrOffs; // offset of the source instruction of the jump @@ -3639,7 +3639,7 @@ void emitter::emitJumpDistBind() UNATIVE_OFFSET oldSize; UNATIVE_OFFSET sizeDif; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH assert(jmp->idInsFmt() == IF_LABEL || jmp->idInsFmt() == IF_RWR_LABEL || jmp->idInsFmt() == IF_SWR_LABEL); /* Figure out the smallest size we can end up with */ @@ -3659,9 +3659,9 @@ void emitter::emitJumpDistBind() psd = JMP_DIST_SMALL_MAX_POS; } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM assert((jmp->idInsFmt() == IF_T2_J1) || (jmp->idInsFmt() == IF_T2_J2) || (jmp->idInsFmt() == IF_T1_I) || (jmp->idInsFmt() == IF_T1_K) || (jmp->idInsFmt() == IF_T1_M) || (jmp->idInsFmt() == IF_T2_M1) || (jmp->idInsFmt() == IF_T2_N1) || (jmp->idInsFmt() == IF_T1_J3) || (jmp->idInsFmt() == IF_LARGEJMP)); @@ -3700,9 +3700,9 @@ void emitter::emitJumpDistBind() { assert(!"Unknown jump instruction"); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 /* Figure out the smallest size we can end up with */ if (emitIsCondJump(jmp)) @@ -3735,7 +3735,7 @@ void emitter::emitJumpDistBind() { assert(!"Unknown jump instruction"); } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 /* Make sure the jumps are properly ordered */ @@ -3795,7 +3795,7 @@ void emitter::emitJumpDistBind() // If this is a jump via register, the instruction size does not change, so we are done. CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // JIT code and data will be allocated together for arm64 so the relative offset to JIT data is known. // In case such offset can be encodeable for `ldr` (+-1MB), shorten it. if (jmp->idAddr()->iiaIsJitDataOffset()) @@ -3890,7 +3890,7 @@ void emitter::emitJumpDistBind() // We should not be jumping/branching across funclets/functions emitCheckFuncletBranch(jmp, jmpIG); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH /* Done if this is not a variable-sized jump */ if ((jmp->idIns() == INS_push) || (jmp->idIns() == INS_mov) || (jmp->idIns() == INS_call) || @@ -3899,14 +3899,14 @@ void emitter::emitJumpDistBind() continue; } #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if ((jmp->idIns() == INS_push) || (jmp->idIns() == INS_mov) || (jmp->idIns() == INS_movt) || (jmp->idIns() == INS_movw)) { continue; } #endif -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // There is only one size of unconditional branch; we don't support functions larger than 2^28 bytes (our branch // range). if (emitIsUncondJump(jmp)) @@ -3932,10 +3932,10 @@ void emitter::emitJumpDistBind() /* Note that the destination is always the beginning of an IG, so no need for an offset inside it */ dstOffs = tgtIG->igOffs; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) srcEncodingOffs = srcInstrOffs + 4; // For relative branches, ARM PC is always considered to be the instruction address + 4 -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) srcEncodingOffs = srcInstrOffs; // For relative branches, ARM64 PC is always considered to be the instruction address #else @@ -4051,7 +4051,7 @@ void emitter::emitJumpDistBind() minShortExtra = (unsigned)extra; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // If we're here, we couldn't convert to a small jump. // Handle conversion to medium-sized conditional jumps. @@ -4123,7 +4123,7 @@ void emitter::emitJumpDistBind() minMediumExtra = (unsigned)mextra; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM /***************************************************************************** * We arrive here if the jump must stay long, at least for now. @@ -4154,15 +4154,15 @@ void emitter::emitJumpDistBind() assert(oldSize >= jsz); sizeDif = oldSize - jsz; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) jmp->idCodeSize(jsz); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #if 0 // This is done as part of emitSetShortJump(): insSize isz = emitInsSize(jmp->idInsFmt()); jmp->idInsSize(isz); #endif -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // The size of IF_LARGEJMP/IF_LARGEADR/IF_LARGELDC are 8 or 12. // All other code size is 4. assert((sizeDif == 4) || (sizeDif == 8)); @@ -4172,7 +4172,7 @@ void emitter::emitJumpDistBind() goto NEXT_JMP; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) /*****************************************************************************/ /* Handle conversion to medium jump */ @@ -4199,7 +4199,7 @@ void emitter::emitJumpDistBind() goto NEXT_JMP; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM /*****************************************************************************/ @@ -4261,7 +4261,7 @@ void emitter::emitJumpDistBind() /* Is there a chance of other jumps becoming short? */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (EMITVERBOSE) printf("Total shrinkage = %3u, min extra short jump size = %3u, min extra medium jump size = %u\n", adjIG, minShortExtra, minMediumExtra); @@ -4274,9 +4274,9 @@ void emitter::emitJumpDistBind() #endif if ((minShortExtra <= adjIG) -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) || (minMediumExtra <= adjIG) -#endif // _TARGET_ARM_ +#endif // TARGET_ARM ) { jmp_iteration++; @@ -4312,7 +4312,7 @@ void emitter::emitCheckFuncletBranch(instrDesc* jmp, insGroup* jmpIG) // meets one of those criteria... assert(jmp->idIsBound()); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // An lea of a code address (for constant data stored with the code) // is treated like a jump for emission purposes but is not really a jump so // we don't have to check anything here. @@ -4322,22 +4322,22 @@ void emitter::emitCheckFuncletBranch(instrDesc* jmp, insGroup* jmpIG) } #endif -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH if (jmp->idAddr()->iiaHasInstrCount()) { // Too hard to figure out funclets from just an instruction count // You're on your own! return; } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // No interest if it's not jmp. if (emitIsLoadLabel(jmp) || emitIsLoadConstant(jmp)) { return; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 insGroup* tgtIG = jmp->idAddr()->iiaIGlabel; assert(tgtIG); @@ -4553,18 +4553,18 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, emitEpilogSize = 0; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH emitExitSeqSize = 0; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH } /* Return the size of the epilog to the caller */ *epilogSize = emitEpilogSize; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH *epilogSize += emitExitSeqSize; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH #ifdef DEBUG if (EMIT_INSTLIST_VERBOSE) @@ -4586,7 +4586,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, CorJitAllocMemFlag allocMemFlag = CORJIT_ALLOCMEM_DEFAULT_CODE_ALIGN; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // // These are the heuristics we use to decide whether or not to force the // code to be 16-byte aligned. @@ -4620,7 +4620,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, allocMemFlag = static_cast(allocMemFlag | CORJIT_ALLOCMEM_FLG_RODATA_16BYTE_ALIGN); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // For arm64, we want to allocate JIT data always adjacent to code similar to what native compiler does. // This way allows us to use a single `ldr` to access such data like float constant/jmp table. if (emitTotalColdCodeSize > 0) @@ -5075,7 +5075,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, { for (instrDescJmp* jmp = emitJumpList; jmp != nullptr; jmp = jmp->idjNext) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH assert(jmp->idInsFmt() == IF_LABEL || jmp->idInsFmt() == IF_RWR_LABEL || jmp->idInsFmt() == IF_SWR_LABEL); #endif insGroup* tgt = jmp->idAddr()->iiaIGlabel; @@ -5089,7 +5089,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, { BYTE* adr = jmp->idjTemp.idjAddr; int adj = jmp->idjOffs - tgt->igOffs; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On Arm, the offset is encoded in unit of 2 bytes. adj >>= 1; #endif @@ -5097,7 +5097,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, #if DEBUG_EMIT if ((jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM) || (INTERESTING_JUMP_NUM == 0)) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM printf("[5] This output is broken for ARM, since it doesn't properly decode the jump offsets of " "the instruction at adr\n"); #endif @@ -5124,13 +5124,13 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, { // Patch Forward Short Jump CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) *(BYTE*)adr -= (BYTE)adj; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // The following works because the jump offset is in the low order bits of the instruction. // Presumably we could also just call "emitOutputLJ(NULL, adr, jmp)", like for long jumps? *(short int*)adr -= (short)adj; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) assert(!jmp->idAddr()->iiaHasInstrCount()); emitOutputLJ(NULL, adr, jmp); #else @@ -5141,9 +5141,9 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, { // Patch Forward non-Short Jump CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) *(int*)adr -= adj; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) assert(!jmp->idAddr()->iiaHasInstrCount()); emitOutputLJ(NULL, adr, jmp); #else @@ -5219,7 +5219,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, // See specification comment at the declaration. void emitter::emitGenGCInfoIfFuncletRetTarget(insGroup* ig, BYTE* cp) { -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // We only emit this GC information on targets where finally's are implemented via funclets, // and the finally is invoked, during non-exceptional execution, via a branch with a predefined // link register, rather than a "true call" for which we would already generate GC info. Currently, @@ -5236,7 +5236,7 @@ void emitter::emitGenGCInfoIfFuncletRetTarget(insGroup* ig, BYTE* cp) emitRecordGCcall(cp, /*callInstrSize*/ 1); } } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } /***************************************************************************** @@ -5638,7 +5638,7 @@ void emitter::emitOutputDataSec(dataSecDsc* sec, BYTE* dst) // Append the appropriate address to the destination BYTE* target = emitOffsetToPtr(lab->igOffs); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM target = (BYTE*)((size_t)target | 1); // Or in thumb bit #endif bDst[i] = (target_size_t)target; @@ -5765,7 +5765,7 @@ void emitter::emitDispDataSec(dataSecDsc* section) } else { -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT // We have a 32-BIT target if (emitComp->opts.disDiffable) { @@ -5775,7 +5775,7 @@ void emitter::emitDispDataSec(dataSecDsc* section) { printf("dd\t%08Xh", reinterpret_cast(emitOffsetToPtr(ig->igOffs))); } -#else // _TARGET_64BIT_ +#else // TARGET_64BIT // We have a 64-BIT target if (emitComp->opts.disDiffable) { @@ -5785,7 +5785,7 @@ void emitter::emitDispDataSec(dataSecDsc* section) { printf("dq\t%016llXh", reinterpret_cast(emitOffsetToPtr(ig->igOffs))); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } if (!emitComp->opts.disDiffable) @@ -6354,10 +6354,10 @@ unsigned char emitter::emitOutputByte(BYTE* dst, ssize_t val) { printf("; emit_byte 0%02XH\n", val & 0xFF); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // if we're emitting code bytes, ensure that we've already emitted the rex prefix! assert(((val & 0xFF00000000LL) == 0) || ((val & 0xFFFFFFFF00000000LL) == 0xFFFFFFFF00000000LL)); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif return sizeof(unsigned char); @@ -6377,10 +6377,10 @@ unsigned char emitter::emitOutputWord(BYTE* dst, ssize_t val) { printf("; emit_word 0%02XH,0%02XH\n", (val & 0xFF), (val >> 8) & 0xFF); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // if we're emitting code bytes, ensure that we've already emitted the rex prefix! assert(((val & 0xFF00000000LL) == 0) || ((val & 0xFFFFFFFF00000000LL) == 0xFFFFFFFF00000000LL)); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif return sizeof(short); @@ -6400,10 +6400,10 @@ unsigned char emitter::emitOutputLong(BYTE* dst, ssize_t val) { printf("; emit_long 0%08XH\n", (int)val); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // if we're emitting code bytes, ensure that we've already emitted the rex prefix! assert(((val & 0xFF00000000LL) == 0) || ((val & 0xFFFFFFFF00000000LL) == 0xFFFFFFFF00000000LL)); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif return sizeof(int); @@ -6421,11 +6421,11 @@ unsigned char emitter::emitOutputSizeT(BYTE* dst, ssize_t val) #ifdef DEBUG if (emitComp->opts.dspEmit) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 printf("; emit_size_t 0%016llXH\n", val); -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 printf("; emit_size_t 0%08XH\n", val); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } #endif // DEBUG @@ -6444,7 +6444,7 @@ unsigned char emitter::emitOutputSizeT(BYTE* dst, ssize_t val) // Same as wrapped function. // -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) unsigned char emitter::emitOutputByte(BYTE* dst, size_t val) { return emitOutputByte(dst, (ssize_t)val); @@ -6484,7 +6484,7 @@ unsigned char emitter::emitOutputSizeT(BYTE* dst, unsigned __int64 val) { return emitOutputSizeT(dst, (ssize_t)val); } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) /***************************************************************************** * @@ -7018,21 +7018,21 @@ void emitter::emitNxtIG(bool extend) target_ssize_t emitter::emitGetInsSC(instrDesc* id) { -#ifdef _TARGET_ARM_ // should it be _TARGET_ARMARCH_? Why do we need this? Note that on ARM64 we store scaled immediates - // for some formats +#ifdef TARGET_ARM // should it be TARGET_ARMARCH? Why do we need this? Note that on ARM64 we store scaled immediates + // for some formats if (id->idIsLclVar()) { int varNum = id->idAddr()->iiaLclVar.lvaVarNum(); regNumber baseReg; int offs = id->idAddr()->iiaLclVar.lvaOffset(); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) int adr = emitComp->lvaFrameAddress(varNum, id->idIsLclFPBase(), &baseReg, offs, CodeGen::instIsFP(id->idIns())); int dsp = adr + offs; if ((id->idIns() == INS_sub) || (id->idIns() == INS_subw)) dsp = -dsp; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // TODO-ARM64-Cleanup: this is currently unreachable. Do we need it? bool FPbased; int adr = emitComp->lvaFrameAddress(varNum, &FPbased); @@ -7043,7 +7043,7 @@ target_ssize_t emitter::emitGetInsSC(instrDesc* id) return dsp; } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (id->idIsLargeCns()) { return ((instrDescCns*)id)->idcCnsVal; @@ -7054,14 +7054,14 @@ target_ssize_t emitter::emitGetInsSC(instrDesc* id) } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM BYTE* emitter::emitGetInsRelocValue(instrDesc* id) { return ((instrDescReloc*)id)->idrRelocVal; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM /*****************************************************************************/ #if EMIT_TRACK_STACK_DEPTH @@ -7476,7 +7476,7 @@ void emitter::emitRecordRelocation(void* location, /* IN */ #endif // defined(LATE_DISASM) } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM /***************************************************************************** * A helper for handling a Thumb-Mov32 of position-independent (PC-relative) value * @@ -7497,7 +7497,7 @@ void emitter::emitHandlePCRelativeMov32(void* location, /* IN */ emitRecordRelocation(location, target, IMAGE_REL_BASED_THUMB_MOV32); } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM /***************************************************************************** * A helper for recording a call site with the EE. @@ -7669,15 +7669,15 @@ regMaskTP emitter::emitGetGCRegsKilledByNoGCCall(CorInfoHelpFunc helper) switch (helper) { case CORINFO_HELP_ASSIGN_BYREF: -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // This helper only trashes ECX. result = RBM_ECX; break; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // This uses and defs RDI and RSI. result = RBM_CALLEE_TRASH_NOGC & ~(RBM_RDI | RBM_RSI); break; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) result = RBM_CALLEE_GCTRASH_WRITEBARRIER_BYREF; break; #else @@ -7696,18 +7696,18 @@ regMaskTP emitter::emitGetGCRegsKilledByNoGCCall(CorInfoHelpFunc helper) result = RBM_PROFILER_TAILCALL_TRASH; break; -#if defined(_TARGET_ARMARCH_) +#if defined(TARGET_ARMARCH) case CORINFO_HELP_ASSIGN_REF: case CORINFO_HELP_CHECKED_ASSIGN_REF: result = RBM_CALLEE_GCTRASH_WRITEBARRIER; break; -#endif // defined(_TARGET_ARMARCH_) +#endif // defined(TARGET_ARMARCH) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) case CORINFO_HELP_INIT_PINVOKE_FRAME: result = RBM_INIT_PINVOKE_FRAME_TRASH; break; -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) default: result = RBM_CALLEE_TRASH_NOGC; diff --git a/src/coreclr/src/jit/emit.h b/src/coreclr/src/jit/emit.h index 78e34087e1fea..cec64ecb81188 100644 --- a/src/coreclr/src/jit/emit.h +++ b/src/coreclr/src/jit/emit.h @@ -221,12 +221,12 @@ enum insGroupPlaceholderType : unsigned char #endif // FEATURE_EH_FUNCLETS }; -#if defined(_MSC_VER) && defined(_TARGET_ARM_) +#if defined(_MSC_VER) && defined(TARGET_ARM) // ARM aligns structures that contain 64-bit ints or doubles on 64-bit boundaries. This causes unwanted // padding to be added to the end, so sizeof() is unnecessarily big. #pragma pack(push) #pragma pack(4) -#endif // defined(_MSC_VER) && defined(_TARGET_ARM_) +#endif // defined(_MSC_VER) && defined(TARGET_ARM) struct insPlaceholderGroupData { @@ -261,9 +261,9 @@ struct insGroup #define IGF_GC_VARS 0x0001 // new set of live GC ref variables #define IGF_BYREF_REGS 0x0002 // new set of live by-ref registers -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #define IGF_FINALLY_TARGET 0x0004 // this group is the start of a basic block that is returned to after a finally. -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #define IGF_FUNCLET_PROLOG 0x0008 // this group belongs to a funclet prolog #define IGF_FUNCLET_EPILOG 0x0010 // this group belongs to a funclet epilog. #define IGF_EPILOG 0x0020 // this group belongs to a main function epilog @@ -352,9 +352,9 @@ struct insGroup // #define MAX_PLACEHOLDER_IG_SIZE 256 -#if defined(_MSC_VER) && defined(_TARGET_ARM_) +#if defined(_MSC_VER) && defined(TARGET_ARM) #pragma pack(pop) -#endif // defined(_MSC_VER) && defined(_TARGET_ARM_) +#endif // defined(_MSC_VER) && defined(TARGET_ARM) /*****************************************************************************/ @@ -420,9 +420,9 @@ class emitter emitVarRefOffs = 0; #endif // DEBUG -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH SetUseVEXEncoding(false); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH emitDataSecCur = nullptr; } @@ -453,7 +453,7 @@ class emitter OPSZ16 = 4, OPSZ32 = 5, OPSZ_COUNT = 6, -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 OPSZP = OPSZ8, #else OPSZP = OPSZ4, @@ -542,7 +542,7 @@ class emitter #endif // DEBUG -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM unsigned insEncodeSetFlags(insFlags sf); enum insSize : unsigned @@ -557,7 +557,7 @@ class emitter unsigned insEncodePUW_G0(insOpts opt, int imm); unsigned insEncodePUW_H0(insOpts opt, int imm); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM struct instrDescCns; @@ -565,19 +565,19 @@ class emitter { private: // The assembly instruction -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) static_assert_no_msg(INS_count <= 1024); instruction _idIns : 10; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) static_assert_no_msg(INS_count <= 512); instruction _idIns : 9; -#else // !(defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)) +#else // !(defined(TARGET_XARCH) || defined(TARGET_ARM64)) static_assert_no_msg(INS_count <= 256); instruction _idIns : 8; -#endif // !(defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)) +#endif // !(defined(TARGET_XARCH) || defined(TARGET_ARM64)) // The format for the instruction -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) static_assert_no_msg(IF_COUNT <= 128); insFormat _idInsFmt : 7; #else @@ -602,7 +602,7 @@ class emitter } void idInsFmt(insFormat insFmt) { -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) noway_assert(insFmt != IF_NONE); // Only the x86 emitter uses IF_NONE, it is invalid for ARM64 (and ARM32) #endif assert(insFmt < IF_COUNT); @@ -623,12 +623,12 @@ class emitter // arm64: 17 bits private: -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) unsigned _idCodeSize : 4; // size of instruction in bytes. Max size of an Intel instruction is 15 bytes. opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16, 5=32 // At this point we have fully consumed first DWORD so that next field // doesn't cross a byte boundary. -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Moved the definition of '_idOpSize' later so that we don't cross a 32-bit boundary when laying out bitfields #else // ARM opSize _idOpSize : 2; // operand size: 0=1 , 1=2 , 2=4 , 3=8 @@ -674,13 +674,13 @@ class emitter unsigned _idCallAddr : 1; // IL indirect calls: can make a direct call to iiaAddr unsigned _idNoGC : 1; // Some helpers don't get recorded in GC tables -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16 insOpts _idInsOpt : 6; // options for instructions unsigned _idLclVar : 1; // access a local on stack #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM insSize _idInsSize : 2; // size of instruction: 16, 32 or 48 bits insFlags _idInsFlags : 1; // will this instruction set the flags unsigned _idLclVar : 1; // access a local on stack @@ -690,10 +690,10 @@ class emitter // For arm we have used 16 bits #define ID_EXTRA_BITFIELD_BITS (16) -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // For Arm64, we have used 17 bits from the second DWORD. #define ID_EXTRA_BITFIELD_BITS (17) -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) // For xarch, we have used 14 bits from the second DWORD. #define ID_EXTRA_BITFIELD_BITS (14) #else @@ -795,7 +795,7 @@ class emitter // TODO-Cleanup: We should really add a DEBUG-only tag to this union so we can add asserts // about reading what we think is here, to avoid unexpected corruption issues. -#ifndef _TARGET_ARM64_ +#ifndef TARGET_ARM64 emitLclVarAddr iiaLclVar; #endif BasicBlock* iiaBBlabel; @@ -808,7 +808,7 @@ class emitter bool iiaIsJitDataOffset() const; int iiaGetJitDataOffset() const; -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // iiaEncodedInstrCount and its accessor functions are used to specify an instruction // count for jumps, instead of using a label and multiple blocks. This is used in the @@ -832,7 +832,7 @@ class emitter struct { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // For 64-bit architecture this 32-bit structure can pack with these unsigned bit fields emitLclVarAddr iiaLclVar; unsigned _idReg3Scaled : 1; // Reg3 is scaled by idOpSize bits @@ -841,13 +841,13 @@ class emitter regNumber _idReg3 : REGNUM_BITS; regNumber _idReg4 : REGNUM_BITS; }; -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) struct { regNumber _idReg3 : REGNUM_BITS; regNumber _idReg4 : REGNUM_BITS; }; -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) } _idAddrUnion; @@ -862,7 +862,7 @@ class emitter _idSmallDsc = 1; } -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) unsigned idCodeSize() const { @@ -885,7 +885,7 @@ class emitter assert(sz == _idCodeSize); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) unsigned idCodeSize() const { int size = 4; @@ -916,7 +916,7 @@ class emitter return size; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) bool idInstrIsT1() const { @@ -945,7 +945,7 @@ class emitter _idInsFlags = sf; assert(sf == _idInsFlags); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM emitAttr idOpSize() { @@ -975,7 +975,7 @@ class emitter assert(reg == _idReg1); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 GCtype idGCrefReg2() const { assert(!idIsSmallDsc()); @@ -986,7 +986,7 @@ class emitter assert(!idIsSmallDsc()); idAddr()->_idGCref2 = gctype; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 regNumber idReg2() const { @@ -998,7 +998,7 @@ class emitter assert(reg == _idReg2); } -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) regNumber idReg3() const { assert(!idIsSmallDsc()); @@ -1021,8 +1021,8 @@ class emitter idAddr()->_idReg4 = reg; assert(reg == idAddr()->_idReg4); } -#endif // defined(_TARGET_XARCH_) -#ifdef _TARGET_ARMARCH_ +#endif // defined(TARGET_XARCH) +#ifdef TARGET_ARMARCH insOpts idInsOpt() const { return (insOpts)_idInsOpt; @@ -1055,7 +1055,7 @@ class emitter idAddr()->_idReg4 = reg; assert(reg == idAddr()->_idReg4); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 bool idReg3Scaled() const { assert(!idIsSmallDsc()); @@ -1066,9 +1066,9 @@ class emitter assert(!idIsSmallDsc()); idAddr()->_idReg3Scaled = val ? 1 : 0; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH inline static bool fitsInSmallCns(ssize_t val) { @@ -1145,7 +1145,7 @@ class emitter _idNoGC = val; } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH bool idIsLclVar() const { return _idLclVar != 0; @@ -1154,9 +1154,9 @@ class emitter { _idLclVar = 1; } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) bool idIsLclFPBase() const { return _idLclFPBase != 0; @@ -1165,7 +1165,7 @@ class emitter { _idLclFPBase = 1; } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) bool idIsCnsReloc() const { @@ -1212,9 +1212,9 @@ class emitter } }; // End of struct instrDesc -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) insFormat getMemoryOperation(instrDesc* id); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) void getMemoryOperation(instrDesc* id, unsigned* pMemAccessKind, bool* pIsLocalAccess); #endif @@ -1273,7 +1273,7 @@ class emitter #define PERFSCORE_LATENCY_BRANCH_COND 2.0f // includes cost of a possible misprediction #define PERFSCORE_LATENCY_BRANCH_INDIRECT 2.0f // includes cost of a possible misprediction -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // a read,write or modify from stack location, possible def to use latency from L0 cache #define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_2C @@ -1291,7 +1291,7 @@ class emitter #define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_3C #define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_6C -#elif defined(_TARGET_ARM64_) || defined(_TARGET_ARM_) +#elif defined(TARGET_ARM64) || defined(TARGET_ARM) // a read,write or modify from stack location, possible def to use latency from L0 cache #define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C @@ -1364,12 +1364,12 @@ class emitter // hot to cold and cold to hot jumps) }; -#if !defined(_TARGET_ARM64_) // This shouldn't be needed for ARM32, either, but I don't want to touch the ARM32 JIT. +#if !defined(TARGET_ARM64) // This shouldn't be needed for ARM32, either, but I don't want to touch the ARM32 JIT. struct instrDescLbl : instrDescJmp { emitLclVarAddr dstLclVar; }; -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 struct instrDescCns : instrDesc // large const { @@ -1387,7 +1387,7 @@ class emitter int iddcDspVal; }; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH struct instrDescAmd : instrDesc // large addrmode disp { @@ -1400,7 +1400,7 @@ class emitter ssize_t idacAmdVal; }; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH struct instrDescCGCA : instrDesc // call with ... { @@ -1433,7 +1433,7 @@ class emitter #endif // MULTIREG_HAS_SECOND_GC_RET }; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM struct instrDescReloc : instrDesc { @@ -1442,7 +1442,7 @@ class emitter BYTE* emitGetInsRelocValue(instrDesc* id); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM insUpdateModes emitInsUpdateMode(instruction ins); insFormat emitInsModeFormat(instruction ins, insFormat base); @@ -1455,7 +1455,7 @@ class emitter size_t emitGetInstrDescSize(const instrDesc* id); size_t emitGetInstrDescSizeSC(const instrDesc* id); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH ssize_t emitGetInsCns(instrDesc* id); ssize_t emitGetInsDsp(instrDesc* id); @@ -1467,7 +1467,7 @@ class emitter // Return the argument count for a direct call "id". int emitGetInsCDinfo(instrDesc* id); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH target_ssize_t emitGetInsSC(instrDesc* id); unsigned emitInsCount; @@ -1514,13 +1514,13 @@ class emitter unsigned emitEpilogCnt; UNATIVE_OFFSET emitEpilogSize; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH void emitStartExitSeq(); // Mark the start of the "return" sequence emitLocation emitExitSeqBegLoc; UNATIVE_OFFSET emitExitSeqSize; // minimum size of any return sequence - the 'ret' after the epilog -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH insGroup* emitPlaceholderList; // per method placeholder list - head insGroup* emitPlaceholderLast; // per method placeholder list - tail @@ -1648,7 +1648,7 @@ class emitter unsigned char emitOutputLong(BYTE* dst, ssize_t val); unsigned char emitOutputSizeT(BYTE* dst, ssize_t val); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) unsigned char emitOutputByte(BYTE* dst, size_t val); unsigned char emitOutputWord(BYTE* dst, size_t val); unsigned char emitOutputLong(BYTE* dst, size_t val); @@ -1658,7 +1658,7 @@ class emitter unsigned char emitOutputWord(BYTE* dst, unsigned __int64 val); unsigned char emitOutputLong(BYTE* dst, unsigned __int64 val); unsigned char emitOutputSizeT(BYTE* dst, unsigned __int64 val); -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) size_t emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp); size_t emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp); @@ -1699,7 +1699,7 @@ class emitter /* The logic that creates and keeps track of instruction groups */ /************************************************************************/ -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // The only place where this limited instruction group size is a problem is // in the prolog, where we only support a single instruction group. We should really fix that. // ARM32 and ARM64 both can require a bigger prolog instruction group. One scenario is where @@ -1708,9 +1708,9 @@ class emitter // ugly code like "movw r10, 0x488; add r10, sp; vstr s0, [r10]" for each store, which // eats up our insGroup buffer. #define SC_IG_BUFFER_SIZE (100 * sizeof(emitter::instrDesc) + 14 * SMALL_IDSC_SIZE) -#else // !_TARGET_ARMARCH_ +#else // !TARGET_ARMARCH #define SC_IG_BUFFER_SIZE (50 * sizeof(emitter::instrDesc) + 14 * SMALL_IDSC_SIZE) -#endif // !_TARGET_ARMARCH_ +#endif // !TARGET_ARMARCH size_t emitIGbuffSize; @@ -1847,7 +1847,7 @@ class emitter // continues to track GC info as if there was no label. void* emitAddInlineLabel(); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH void emitGetInstrDescs(insGroup* ig, instrDesc** id, int* insCnt); @@ -1861,9 +1861,9 @@ class emitter static void emitGenerateUnwindNop(instrDesc* id, void* context); -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 void emitMarkStackLvl(unsigned stackLevel); #endif @@ -1894,7 +1894,7 @@ class emitter return (instrDescJmp*)emitAllocAnyInstr(sizeof(instrDescJmp), EA_1BYTE); } -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) instrDescLbl* emitAllocInstrLbl() { #if EMITTER_STATS @@ -1902,7 +1902,7 @@ class emitter #endif // EMITTER_STATS return (instrDescLbl*)emitAllocAnyInstr(sizeof(instrDescLbl), EA_4BYTE); } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 instrDescCns* emitAllocInstrCns(emitAttr attr) { @@ -1936,7 +1936,7 @@ class emitter return (instrDescCnsDsp*)emitAllocAnyInstr(sizeof(instrDescCnsDsp), attr); } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH instrDescAmd* emitAllocInstrAmd(emitAttr attr) { @@ -1954,7 +1954,7 @@ class emitter return (instrDescCnsAmd*)emitAllocAnyInstr(sizeof(instrDescCnsAmd), attr); } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH instrDescCGCA* emitAllocInstrCGCA(emitAttr attr) { @@ -1970,14 +1970,14 @@ class emitter instrDesc* emitNewInstrCns(emitAttr attr, target_ssize_t cns); instrDesc* emitNewInstrDsp(emitAttr attr, target_ssize_t dsp); instrDesc* emitNewInstrCnsDsp(emitAttr attr, target_ssize_t cns, int dsp); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM instrDesc* emitNewInstrReloc(emitAttr attr, BYTE* addr); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM instrDescJmp* emitNewInstrJmp(); -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) instrDescLbl* emitNewInstrLbl(); -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 static const BYTE emitFmtToOps[]; @@ -2023,7 +2023,7 @@ class emitter void emitInsSanityCheck(instrDesc* id); #endif -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // Returns true if instruction "id->idIns()" writes to a register that might be used to contain a GC // pointer. This exempts the SP and PC registers, and floating point registers. Memory access // instructions that pre- or post-increment their memory address registers are *not* considered to write @@ -2041,7 +2041,7 @@ class emitter // Returns "true" if instruction "id->idIns()" writes to a LclVar stack slot pair. bool emitInsWritesToLclVarStackLocPair(instrDesc* id); -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH /************************************************************************/ /* The following is used to distinguish helper vs non-helper calls */ @@ -2208,7 +2208,7 @@ class emitter WORD slotNum = 0, /* IN */ INT32 addlDelta = 0); /* IN */ -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM void emitHandlePCRelativeMov32(void* location, /* IN */ void* target); /* IN */ #endif @@ -2253,20 +2253,20 @@ class emitter static unsigned emitTotalIDescSmallCnt; static unsigned emitTotalIDescCnt; static unsigned emitTotalIDescJmpCnt; -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) static unsigned emitTotalIDescLblCnt; -#endif // !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_ARM64) static unsigned emitTotalIDescCnsCnt; static unsigned emitTotalIDescDspCnt; static unsigned emitTotalIDescCnsDspCnt; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH static unsigned emitTotalIDescAmdCnt; static unsigned emitTotalIDescCnsAmdCnt; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH static unsigned emitTotalIDescCGCACnt; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM static unsigned emitTotalIDescRelocCnt; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM static size_t emitTotMemAlloc; @@ -2472,12 +2472,12 @@ inline emitter::instrDescJmp* emitter::emitNewInstrJmp() return emitAllocInstrJmp(); } -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) inline emitter::instrDescLbl* emitter::emitNewInstrLbl() { return emitAllocInstrLbl(); } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 inline emitter::instrDesc* emitter::emitNewInstrDsp(emitAttr attr, target_ssize_t dsp) { @@ -2623,7 +2623,7 @@ inline size_t emitter::emitGetInstrDescSizeSC(const instrDesc* id) } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM inline emitter::instrDesc* emitter::emitNewInstrReloc(emitAttr attr, BYTE* addr) { @@ -2641,9 +2641,9 @@ inline emitter::instrDesc* emitter::emitNewInstrReloc(emitAttr attr, BYTE* addr) return id; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH /***************************************************************************** * @@ -2691,7 +2691,7 @@ inline unsigned emitter::emitGetInsCIargs(instrDesc* id) } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH /***************************************************************************** * diff --git a/src/coreclr/src/jit/emitarm.cpp b/src/coreclr/src/jit/emitarm.cpp index 05964694b02d6..8c60e0fe7c395 100644 --- a/src/coreclr/src/jit/emitarm.cpp +++ b/src/coreclr/src/jit/emitarm.cpp @@ -15,7 +15,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) /*****************************************************************************/ /*****************************************************************************/ @@ -7999,4 +7999,4 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins #endif // defined(DEBUG) || defined(LATE_DISASM) -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) diff --git a/src/coreclr/src/jit/emitarm.h b/src/coreclr/src/jit/emitarm.h index 29385593cdba6..fe6a7a0c0790a 100644 --- a/src/coreclr/src/jit/emitarm.h +++ b/src/coreclr/src/jit/emitarm.h @@ -2,7 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // This typedef defines the type that we use to hold encoded instructions. // @@ -381,4 +381,4 @@ inline bool emitIsLoadLabel(instrDesc* jmp) return (jmp->idInsFmt() == IF_T2_M1) || (jmp->idInsFmt() == IF_T1_J3) || (jmp->idInsFmt() == IF_T2_N1); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM diff --git a/src/coreclr/src/jit/emitarm64.cpp b/src/coreclr/src/jit/emitarm64.cpp index e9909b28f089a..83889446a5bce 100644 --- a/src/coreclr/src/jit/emitarm64.cpp +++ b/src/coreclr/src/jit/emitarm64.cpp @@ -16,7 +16,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) /*****************************************************************************/ /*****************************************************************************/ @@ -13147,4 +13147,4 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins #endif // defined(DEBUG) || defined(LATE_DISASM) -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) diff --git a/src/coreclr/src/jit/emitarm64.h b/src/coreclr/src/jit/emitarm64.h index 7af9e486e41de..4bdd715b4b23b 100644 --- a/src/coreclr/src/jit/emitarm64.h +++ b/src/coreclr/src/jit/emitarm64.h @@ -2,7 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // The ARM64 instructions are all 32 bits in size. // we use an unsigned int to hold the encoded instructions. @@ -901,4 +901,4 @@ inline bool emitIsLoadConstant(instrDesc* jmp) (jmp->idInsFmt() == IF_LARGELDC)); } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 diff --git a/src/coreclr/src/jit/emitdef.h b/src/coreclr/src/jit/emitdef.h index f7f9325b79c03..d148705dd16c4 100644 --- a/src/coreclr/src/jit/emitdef.h +++ b/src/coreclr/src/jit/emitdef.h @@ -7,11 +7,11 @@ #define _EMITDEF_H_ /*****************************************************************************/ -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include "emitxarch.h" -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #include "emitarm.h" -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #include "emitarm64.h" #else #error Unsupported or unset target architecture diff --git a/src/coreclr/src/jit/emitfmts.h b/src/coreclr/src/jit/emitfmts.h index 587033f2e9191..db42ef16993a1 100644 --- a/src/coreclr/src/jit/emitfmts.h +++ b/src/coreclr/src/jit/emitfmts.h @@ -3,11 +3,11 @@ // See the LICENSE file in the project root for more information. ////////////////////////////////////////////////////////////////////////////// -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include "emitfmtsxarch.h" -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #include "emitfmtsarm.h" -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #include "emitfmtsarm64.h" #else #error Unsupported or unset target architecture diff --git a/src/coreclr/src/jit/emitfmtsarm.h b/src/coreclr/src/jit/emitfmtsarm.h index ad99fb33ae78c..36c480571d23c 100644 --- a/src/coreclr/src/jit/emitfmtsarm.h +++ b/src/coreclr/src/jit/emitfmtsarm.h @@ -4,7 +4,7 @@ ////////////////////////////////////////////////////////////////////////////// // clang-format off -#if !defined(_TARGET_ARM_) +#if !defined(TARGET_ARM) #error Unexpected target type #endif diff --git a/src/coreclr/src/jit/emitfmtsarm64.h b/src/coreclr/src/jit/emitfmtsarm64.h index 1b8734763f009..78d8da7d30082 100644 --- a/src/coreclr/src/jit/emitfmtsarm64.h +++ b/src/coreclr/src/jit/emitfmtsarm64.h @@ -4,7 +4,7 @@ ////////////////////////////////////////////////////////////////////////////// // clang-format off -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) #error Unexpected target type #endif diff --git a/src/coreclr/src/jit/emitfmtsxarch.h b/src/coreclr/src/jit/emitfmtsxarch.h index 5f4abc120edd4..02a5f190822a2 100644 --- a/src/coreclr/src/jit/emitfmtsxarch.h +++ b/src/coreclr/src/jit/emitfmtsxarch.h @@ -8,7 +8,7 @@ // // clang-format off -#if !defined(_TARGET_XARCH_) +#if !defined(TARGET_XARCH) #error Unexpected target type #endif diff --git a/src/coreclr/src/jit/emitinl.h b/src/coreclr/src/jit/emitinl.h index b20983fbad933..6647c61bbdcf3 100644 --- a/src/coreclr/src/jit/emitinl.h +++ b/src/coreclr/src/jit/emitinl.h @@ -6,12 +6,12 @@ #ifndef _EMITINL_H_ #define _EMITINL_H_ -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH /* static */ inline bool emitter::instrIs3opImul(instruction ins) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 return ((ins >= INS_imul_AX) && (ins <= INS_imul_DI)); #else // _TARGET_AMD64 return ((ins >= INS_imul_AX) && (ins <= INS_imul_15)); @@ -21,7 +21,7 @@ inline bool emitter::instrIs3opImul(instruction ins) /* static */ inline bool emitter::instrIsExtendedReg3opImul(instruction ins) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 return false; #else // _TARGET_AMD64 return ((ins >= INS_imul_08) && (ins <= INS_imul_15)); @@ -47,7 +47,7 @@ inline void emitter::check3opImulValues() assert(INS_imul_BP - INS_imul_AX == REG_EBP); assert(INS_imul_SI - INS_imul_AX == REG_ESI); assert(INS_imul_DI - INS_imul_AX == REG_EDI); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 assert(INS_imul_08 - INS_imul_AX == REG_R8); assert(INS_imul_09 - INS_imul_AX == REG_R9); assert(INS_imul_10 - INS_imul_AX == REG_R10); @@ -102,7 +102,7 @@ inline regNumber emitter::inst3opImulReg(instruction ins) * get stored in different places within the instruction descriptor. */ -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH inline ssize_t emitter::emitGetInsAmd(instrDesc* id) { @@ -206,7 +206,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) return id->idAddr()->iiaAddrMode.amDisp; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH /***************************************************************************** * @@ -219,7 +219,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) unsigned encodeMask; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(REGNUM_BITS >= 3); encodeMask = 0; @@ -232,7 +232,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) id->idReg1((regNumber)encodeMask); // Save in idReg1 -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) assert(REGNUM_BITS >= 4); encodeMask = 0; @@ -276,7 +276,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) id->idReg2((regNumber)encodeMask); // Save in idReg2 -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) assert(REGNUM_BITS >= 4); encodeMask = 0; @@ -304,7 +304,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) id->idReg2((regNumber)encodeMask); // Save in idReg2 -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) assert(REGNUM_BITS >= 5); encodeMask = 0; @@ -346,7 +346,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) unsigned regmask = 0; unsigned encodeMask; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(REGNUM_BITS >= 3); encodeMask = id->idReg1(); @@ -356,7 +356,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) regmask |= RBM_EDI; if ((encodeMask & 0x04) != 0) regmask |= RBM_EBX; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) assert(REGNUM_BITS >= 4); encodeMask = id->idReg1(); @@ -396,7 +396,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) regmask |= RBM_R15; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) assert(REGNUM_BITS >= 4); encodeMask = id->idReg1(); @@ -420,7 +420,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) if ((encodeMask & 0x08) != 0) regmask |= RBM_R11; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) assert(REGNUM_BITS >= 5); encodeMask = id->idReg1(); @@ -455,7 +455,7 @@ inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id) return regmask; } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH inline bool insIsCMOV(instruction ins) { return ((ins >= INS_cmovo) && (ins <= INS_cmovg)); diff --git a/src/coreclr/src/jit/emitjmps.h b/src/coreclr/src/jit/emitjmps.h index af2d36bc6240e..363a5f336f3ce 100644 --- a/src/coreclr/src/jit/emitjmps.h +++ b/src/coreclr/src/jit/emitjmps.h @@ -7,7 +7,7 @@ #error Must define JMP_SMALL macro before including this file #endif -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // jump reverse instruction JMP_SMALL(jmp , jmp , jmp ) @@ -28,7 +28,7 @@ JMP_SMALL(jge , jl , jge ) JMP_SMALL(jle , jg , jle ) JMP_SMALL(jg , jle , jg ) -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) // jump reverse instruction condcode JMP_SMALL(jmp , jmp , b ) // AL always diff --git a/src/coreclr/src/jit/emitpub.h b/src/coreclr/src/jit/emitpub.h index 52c5000102f0f..cd28e0355c03b 100644 --- a/src/coreclr/src/jit/emitpub.h +++ b/src/coreclr/src/jit/emitpub.h @@ -101,7 +101,7 @@ UNATIVE_OFFSET emitDataSize(); /* Instruction information */ /************************************************************************/ -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH static bool instrIs3opImul(instruction ins); static bool instrIsExtendedReg3opImul(instruction ins); static bool instrHasImplicitRegPairDest(instruction ins); @@ -136,7 +136,7 @@ static void InitTranslator(PDBRewriter* pPDB, int* rgSecMap, IMAGE_SECTION_HEADE /* Interface for generating unwind information */ /************************************************************************/ -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH bool emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFragment = NULL); @@ -148,10 +148,10 @@ void emitSplit(emitLocation* startLoc, void emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp); -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) unsigned emitGetInstructionSize(emitLocation* emitLoc); -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) diff --git a/src/coreclr/src/jit/emitxarch.cpp b/src/coreclr/src/jit/emitxarch.cpp index 22599aa209ab8..ca5b162327538 100644 --- a/src/coreclr/src/jit/emitxarch.cpp +++ b/src/coreclr/src/jit/emitxarch.cpp @@ -16,7 +16,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) /*****************************************************************************/ /*****************************************************************************/ @@ -396,7 +396,7 @@ bool TakesRexWPrefix(instruction ins, emitAttr attr) break; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // movsx should always sign extend out to 8 bytes just because we don't track // whether the dest should be 4 bytes or 8 bytes (attr indicates the size // of the source, not the dest). @@ -456,9 +456,9 @@ bool TakesRexWPrefix(instruction ins, emitAttr attr) { return false; } -#else //!_TARGET_AMD64 = _TARGET_X86_ +#else //!_TARGET_AMD64 = TARGET_X86 return false; -#endif //!_TARGET_AMD64_ +#endif //! TARGET_AMD64 } // Returns true if using this register will require a REX.* prefix. @@ -467,7 +467,7 @@ bool TakesRexWPrefix(instruction ins, emitAttr attr) // instruction in question is AVX. bool IsExtendedReg(regNumber reg) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return ((reg >= REG_R8) && (reg <= REG_R15)) || ((reg >= REG_XMM8) && (reg <= REG_XMM15)); #else // X86 JIT operates in 32-bit mode and hence extended reg are not available. @@ -478,7 +478,7 @@ bool IsExtendedReg(regNumber reg) // Returns true if using this register, for the given EA_SIZE(attr), will require a REX.* prefix bool IsExtendedReg(regNumber reg, emitAttr attr) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Not a register, so doesn't need a prefix if (reg > REG_XMM15) { @@ -520,11 +520,11 @@ bool IsExtendedReg(regNumber reg, emitAttr attr) // can also used to know whether a YMM register in case of AVX instructions. bool IsXMMReg(regNumber reg) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return (reg >= REG_XMM0) && (reg <= REG_XMM15); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 return (reg >= REG_XMM0) && (reg <= REG_XMM7); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } // Returns bits to be encoded in instruction for the given register. @@ -550,7 +550,7 @@ emitter::code_t emitter::AddRexWPrefix(instruction ins, code_t code) return emitter::code_t(code | 0x00008000000000ULL); } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return emitter::code_t(code | 0x4800000000ULL); #else assert(!"UNREACHED"); @@ -558,7 +558,7 @@ emitter::code_t emitter::AddRexWPrefix(instruction ins, code_t code) #endif } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 emitter::code_t emitter::AddRexRPrefix(instruction ins, code_t code) { @@ -618,7 +618,7 @@ emitter::code_t emitter::AddRexPrefix(instruction ins, code_t code) return code | 0x4000000000ULL; } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 bool isPrefix(BYTE b) { @@ -795,7 +795,7 @@ unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, c return 3; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (code > 0x00FFFFFFFFLL) { BYTE prefix = (code >> 32) & 0xFF; @@ -849,12 +849,12 @@ unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, c return emitOutputByte(dst, prefix); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 return 0; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 /***************************************************************************** * Is the last instruction emitted a call instruction? */ @@ -880,7 +880,7 @@ void emitter::emitOutputPreEpilogNOP() } } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 // Size of rex prefix in bytes unsigned emitter::emitGetRexPrefixSize(instruction ins) @@ -1006,7 +1006,7 @@ unsigned emitter::emitGetPrefixSize(code_t code) return 0; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /***************************************************************************** * * Record a non-empty stack @@ -1426,7 +1426,7 @@ inline unsigned emitter::insEncodeReg012(instruction ins, regNumber reg, emitAtt { assert(reg < REG_STK); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. @@ -1442,7 +1442,7 @@ inline unsigned emitter::insEncodeReg012(instruction ins, regNumber reg, emitAtt // not the corresponding AH, CH, DH, or BH *code = AddRexPrefix(ins, *code); // REX } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 unsigned regBits = RegEncoding(reg); @@ -1460,7 +1460,7 @@ inline unsigned emitter::insEncodeReg345(instruction ins, regNumber reg, emitAtt { assert(reg < REG_STK); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. @@ -1476,7 +1476,7 @@ inline unsigned emitter::insEncodeReg345(instruction ins, regNumber reg, emitAtt // not the corresponding AH, CH, DH, or BH *code = AddRexPrefix(ins, *code); // REX } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 unsigned regBits = RegEncoding(reg); @@ -1522,7 +1522,7 @@ inline unsigned emitter::insEncodeRegSIB(instruction ins, regNumber reg, code_t* { assert(reg < REG_STK); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Either code is not NULL or reg is not an extended reg. // If reg is an extended reg, instruction needs to be prefixed with 'REX' // which would require code != NULL. @@ -1533,9 +1533,9 @@ inline unsigned emitter::insEncodeRegSIB(instruction ins, regNumber reg, code_t* *code = AddRexXPrefix(ins, *code); // REX.X } unsigned regBits = RegEncoding(reg); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 unsigned regBits = reg; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 assert(regBits < 8); return regBits; @@ -1766,7 +1766,7 @@ bool emitter::emitVerifyEncodable(instruction ins, emitAttr size, regNumber reg1 inline UNATIVE_OFFSET emitter::emitInsSize(code_t code) { UNATIVE_OFFSET size = (code & 0xFF000000) ? 4 : (code & 0x00FF0000) ? 3 : 2; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 size += emitGetPrefixSize(code); #endif return size; @@ -1813,11 +1813,11 @@ inline UNATIVE_OFFSET emitter::emitInsSizeRR(instrDesc* id, code_t code, int val UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize()); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc()); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (valSize > sizeof(INT32)) { @@ -1938,9 +1938,9 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSV(code_t code, int var, int dsp) /* Is this a stack parameter reference? */ if (emitComp->lvaIsParameter(var) -#if !defined(_TARGET_AMD64_) || defined(UNIX_AMD64_ABI) +#if !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) && !emitComp->lvaIsRegArgument(var) -#endif // !_TARGET_AMD64_ || UNIX_AMD64_ABI +#endif // !TARGET_AMD64 || UNIX_AMD64_ABI ) { /* If no EBP frame, arguments are off of ESP, above temps */ @@ -1959,7 +1959,7 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSV(code_t code, int var, int dsp) if (EBPbased) { -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // If localloc is not used, then ebp chaining is done and hence // offset of locals will be at negative offsets, Otherwise offsets // will be positive. In future, when RBP gets positioned in the @@ -2001,7 +2001,7 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSV(code_t code, int var, int dsp) // offset is negative return size + ((int(offs) >= SCHAR_MIN) ? sizeof(char) : sizeof(int)); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // This case arises for localloc frames else { @@ -2040,7 +2040,7 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSV(code_t code, int var, int dsp) // printf("lcl = %04X, tmp = %04X, stk = %04X, offs = %04X\n", // emitLclSize, emitMaxTmpSize, emitCurStackLvl, offs); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 bool useSmallEncoding = (SCHAR_MIN <= (int)offs) && ((int)offs <= SCHAR_MAX); #else bool useSmallEncoding = (offs <= size_t(SCHAR_MAX)); @@ -2083,11 +2083,11 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, code_t code, int var UNATIVE_OFFSET prefix = emitGetAdjustedSize(ins, attrSize, code); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(int) || !id->idIsCnsReloc()); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (valSize > sizeof(int)) { @@ -2123,7 +2123,7 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, code_t code, int var static bool baseRegisterRequiresSibByte(regNumber base) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return base == REG_ESP || base == REG_R12; #else return base == REG_ESP; @@ -2132,7 +2132,7 @@ static bool baseRegisterRequiresSibByte(regNumber base) static bool baseRegisterRequiresDisplacement(regNumber base) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return base == REG_EBP || base == REG_R13; #else return base == REG_EBP; @@ -2235,7 +2235,7 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code) size += sizeof(INT32); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // If id is not marked for reloc, add 1 additional byte for SIB that follows disp32 if (!id->idIsDspReloc()) { @@ -2365,11 +2365,11 @@ inline UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, code_t code, int val // Let's not complicate things until this is needed. assert(ins != INS_bt); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc()); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (valSize > sizeof(INT32)) { @@ -2423,11 +2423,11 @@ inline UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, code_t code, int val UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize()); bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test); -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 // occasionally longs get here on x86 if (valSize > sizeof(INT32)) valSize = sizeof(INT32); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 if (id->idIsCnsReloc()) { @@ -3610,18 +3610,18 @@ void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg) { case INS_inc: case INS_dec: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 sz = 2; // x64 has no 1-byte opcode (it is the same encoding as the REX prefix) -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 if (size == EA_1BYTE) sz = 2; // Use the long form as the small one has no 'w' bit else sz = 1; // Use short form -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 break; @@ -3703,7 +3703,7 @@ void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t noway_assert(emitVerifyEncodable(ins, size, reg)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(size < EA_8BYTE || ins == INS_mov || ((int)val == val && !EA_IS_CNS_RELOC(attr))); @@ -3722,7 +3722,7 @@ void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t switch (ins) { case INS_mov: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is equivalent to mov reg, imm32 if the high order bits are all 0 // and this isn't a reloc constant. if (((size > EA_4BYTE) && (0 == (val & 0xFFFFFFFF00000000LL))) && !EA_IS_CNS_RELOC(attr)) @@ -3735,7 +3735,7 @@ void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t sz = 9; // Really it is 10, but we'll add one more later break; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 sz = 5; break; @@ -3789,14 +3789,14 @@ void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t sz = 2; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (size > EA_4BYTE) { // We special-case anything that takes a full 8-byte constant. sz += 4; } else -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 { sz += EA_SIZE_IN_BYTES(attr); } @@ -3841,7 +3841,7 @@ void emitter::emitIns_I(instruction ins, emitAttr attr, int val) instrDesc* id; bool valInByte = ((signed char)val == val); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); @@ -3985,13 +3985,13 @@ void emitter::emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNum /* We don't want to generate any useless mov instructions! */ CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Same-reg 4-byte mov can be useful because it performs a // zero-extension to 8 bytes. assert(ins != INS_mov || reg1 != reg2 || size == EA_4BYTE); #else assert(ins != INS_mov || reg1 != reg2); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 assert(size <= EA_32BYTE); noway_assert(emitVerifyEncodable(ins, size, reg1, reg2)); @@ -4019,7 +4019,7 @@ void emitter::emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNum void emitter::emitIns_R_R_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int ival) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); @@ -4829,7 +4829,7 @@ void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO id->idInsFmt(fmt); id->idReg1(reg); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Special case: "mov eax, [addr]" is smaller. // This case is not enabled for amd64 as it always uses RIP relative addressing // and it results in smaller instruction size than encoding 64-bit addr in the @@ -4841,7 +4841,7 @@ void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO sz += 1; } else -#endif //_TARGET_X86_ +#endif // TARGET_X86 { sz = emitInsSizeCV(id, insCodeRM(ins)); } @@ -4876,7 +4876,7 @@ void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE f emitAttr size = EA_SIZE(attr); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // For x86 it is valid to storeind a double sized operand in an xmm reg to memory assert(size <= EA_8BYTE); #else @@ -4894,7 +4894,7 @@ void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE f UNATIVE_OFFSET sz; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Special case: "mov [addr], EAX" is smaller. // This case is not enable for amd64 as it always uses RIP relative addressing // and it will result in smaller instruction size than encoding 64-bit addr in @@ -4913,7 +4913,7 @@ void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE f } } else -#endif //_TARGET_X86_ +#endif // TARGET_X86 { sz = emitInsSizeCV(id, insCodeMR(ins)); } @@ -5016,7 +5016,7 @@ void emitter::emitIns_J_S(instruction ins, emitAttr attr, BasicBlock* dst, int v emitTotalIGjmps++; #endif -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 // Storing the address of a basicBlock will need a reloc // as the instruction uses the absolute address, // not a relative address. @@ -5101,7 +5101,7 @@ void emitter::emitIns_I_AR(instruction ins, emitAttr attr, int val, regNumber re { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); @@ -5157,7 +5157,7 @@ void emitter::emitIns_I_AI(instruction ins, emitAttr attr, int val, ssize_t disp { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); @@ -5335,7 +5335,7 @@ void emitter::emitIns_I_ARR(instruction ins, emitAttr attr, int val, regNumber r { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); @@ -5395,7 +5395,7 @@ void emitter::emitIns_I_ARX( { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); @@ -5519,7 +5519,7 @@ void emitter::emitIns_I_AX(instruction ins, emitAttr attr, int val, regNumber re { assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); @@ -6498,7 +6498,7 @@ void emitter::emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int va sz = emitInsSizeSV(id, insCodeMR(ins), varx, offs); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (attr == EA_1BYTE) { assert(isByteReg(ireg)); @@ -6538,7 +6538,7 @@ void emitter::emitIns_R_S(instruction ins, emitAttr attr, regNumber ireg, int va void emitter::emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov reg, imm64 is the only opcode which takes a full 8 byte immediate // all other opcodes take a sign-extended 4-byte immediate noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr)); @@ -7016,7 +7016,7 @@ void emitter::emitIns_Call(EmitCallType callType, { id->idSetIsDspReloc(); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 else { // An absolute indir address that doesn't need reloc should fit within 32-bits @@ -7025,7 +7025,7 @@ void emitter::emitIns_Call(EmitCallType callType, noway_assert(static_cast(reinterpret_cast(addr)) == (size_t)addr); sz++; } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 } break; @@ -7052,7 +7052,7 @@ void emitter::emitIns_Call(EmitCallType callType, { id->idSetIsDspReloc(); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 else { // An absolute indir address that doesn't need reloc should fit within 32-bits @@ -7061,7 +7061,7 @@ void emitter::emitIns_Call(EmitCallType callType, noway_assert(static_cast(reinterpret_cast(addr)) == (size_t)addr); sz++; } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 } else { @@ -7265,7 +7265,7 @@ const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varName) const char* rn = emitComp->compRegVarName(reg, varName); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 char suffix = '\0'; switch (EA_SIZE(attr)) @@ -7360,9 +7360,9 @@ const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varName) default: break; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(strlen(rn) >= 3); switch (EA_SIZE(attr)) @@ -7403,7 +7403,7 @@ const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varName) default: break; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if 0 // The following is useful if you want register names to be tagged with * or ^ representing gcref or byref, respectively, @@ -7872,7 +7872,7 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail) unsigned cnt = (jdsc->dsSize - 1) / TARGET_POINTER_SIZE; BasicBlock** bbp = (BasicBlock**)jdsc->dsCont; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #define SIZE_LETTER "Q" #else #define SIZE_LETTER "D" @@ -7949,7 +7949,7 @@ void emitter::emitDispInsHex(instrDesc* id, BYTE* code, size_t sz) // We do not display the instruction hex if we want diff-able disassembly if (!emitComp->opts.disDiffable) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // how many bytes per instruction we format for const size_t digits = 10; #else // _TARGET_X86 @@ -8126,11 +8126,11 @@ void emitter::emitDispIns( { printf(" %-9s", sstr); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (strnlen_s(sstr, 10) >= 8) -#else // FEATURE_PAL +#else // TARGET_UNIX if (strnlen(sstr, 10) >= 8) -#endif // FEATURE_PAL +#endif // TARGET_UNIX { printf(" "); } @@ -8158,7 +8158,7 @@ void emitter::emitDispIns( if (ins == INS_lea) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 assert((attr == EA_4BYTE) || (attr == EA_8BYTE)); #else assert(attr == EA_4BYTE); @@ -8189,7 +8189,7 @@ void emitter::emitDispIns( case IF_CNS: val = emitGetInsSC(id); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif @@ -8261,7 +8261,7 @@ void emitter::emitDispIns( case IF_RRD_ARD: case IF_RWR_ARD: case IF_RRW_ARD: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (ins == INS_movsxd) { attr = EA_8BYTE; @@ -8406,7 +8406,7 @@ void emitter::emitDispIns( emitDispAddrMode(id); emitGetInsAmdCns(id, &cnsVal); val = cnsVal.cnsVal; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif @@ -8474,7 +8474,7 @@ void emitter::emitDispIns( emitGetInsCns(id, &cnsVal); val = cnsVal.cnsVal; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif @@ -8524,7 +8524,7 @@ void emitter::emitDispIns( case IF_RRD_SRD: case IF_RWR_SRD: case IF_RRW_SRD: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (ins == INS_movsxd) { attr = EA_8BYTE; @@ -8633,12 +8633,12 @@ void emitter::emitDispIns( { printf(" %s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), EA_16BYTE)); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 else if (ins == INS_movsxd) { printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE), emitRegName(id->idReg2(), EA_4BYTE)); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 else if (ins == INS_movsx || ins == INS_movzx) { printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE), emitRegName(id->idReg2(), attr)); @@ -8709,7 +8709,7 @@ void emitter::emitDispIns( printf("%s,", emitRegName(id->idReg1(), attr)); printf(" %s", emitRegName(id->idReg2(), attr)); val = emitGetInsSC(id); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif @@ -8744,7 +8744,7 @@ void emitter::emitDispIns( { attr = EA_PTRSIZE; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 else if (ins == INS_movsxd) { attr = EA_PTRSIZE; @@ -8877,7 +8877,7 @@ void emitter::emitDispIns( emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC); emitGetInsDcmCns(id, &cnsVal); val = cnsVal.cnsVal; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // no 8-byte immediates allowed here! assert((val >= (ssize_t)0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL)); #endif @@ -9030,7 +9030,7 @@ static BYTE* emitOutputNOP(BYTE* dst, size_t nBytes) { assert(nBytes <= 15); -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 // TODO-X86-CQ: when VIA C3 CPU's are out of circulation, switch to the // more efficient real NOP: 0x0F 0x1F +modR/M // Also can't use AMD recommended, multiple size prefixes (i.e. 0x66 0x66 0x90 for 3 byte NOP) @@ -9087,7 +9087,7 @@ static BYTE* emitOutputNOP(BYTE* dst, size_t nBytes) case 0: break; } -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 switch (nBytes) { case 2: @@ -9164,7 +9164,7 @@ static BYTE* emitOutputNOP(BYTE* dst, size_t nBytes) *dst++ = 0x00; break; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 return dst; } @@ -9206,7 +9206,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) // The displacement field is in an unusual place for calls dsp = emitGetInsCIdisp(id); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Compute the REX prefix if it exists if (IsExtendedReg(reg, EA_PTRSIZE)) @@ -9226,7 +9226,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) // And emit the REX prefix dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 goto GOT_DSP; } @@ -9428,7 +9428,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) __fallthrough; case EA_4BYTE: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case EA_8BYTE: #endif @@ -9437,7 +9437,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) code |= 0x1; break; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case EA_8BYTE: /* Double operand - set the appropriate bit */ @@ -9445,7 +9445,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) code |= 0x04; break; -#endif // _TARGET_X86_ +#endif // TARGET_X86 default: NO_WAY("unexpected size"); @@ -9501,12 +9501,12 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) ssize_t cval = addc->cnsVal; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); -#else //_TARGET_X86_ +#else // TARGET_X86 noway_assert(opsz <= 4); -#endif //_TARGET_X86_ +#endif // TARGET_X86 switch (opsz) { @@ -9528,7 +9528,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // We emit zero on Amd64, to avoid the assert in emitOutputLong() dst += emitOutputLong(dst, 0); #else @@ -9539,7 +9539,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) { dst += emitOutputByte(dst, code | 0x05); @@ -9548,7 +9548,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { dst += emitOutputWord(dst, code | 0x0500); } -#else //_TARGET_AMD64_ +#else // TARGET_AMD64 // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. // This addr mode should never be used while generating relocatable ngen code nor if // the addr can be encoded as pc-relative address. @@ -9566,7 +9566,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) dst += emitOutputWord(dst, code | 0x0400); } dst += emitOutputByte(dst, 0x25); -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 dst += emitOutputLong(dst, dsp); } break; @@ -9916,7 +9916,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { ssize_t cval = addc->cnsVal; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif @@ -10192,9 +10192,9 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) __fallthrough; case EA_4BYTE: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case EA_8BYTE: -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 /* Set the 'w' size bit to indicate 32-bit operation * Note that incrementing "code" for INS_call (0xFF) would @@ -10204,7 +10204,7 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) code |= 0x01; break; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case EA_8BYTE: // Double operand - set the appropriate bit. @@ -10215,7 +10215,7 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) code |= 0x04; NO_WAY("bad 8 byte op"); break; -#endif // _TARGET_X86_ +#endif // TARGET_X86 default: NO_WAY("unexpected size"); @@ -10333,7 +10333,7 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { ssize_t cval = addc->cnsVal; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif @@ -10518,7 +10518,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) opsz = 1; } } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else { // Special case: "mov eax, [addr]" and "mov [addr], eax" @@ -10552,7 +10552,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) } } } -#endif //_TARGET_X86_ +#endif // TARGET_X86 // Special case emitting AVX instructions if (EncodedBySSE38orSSE3A(ins) || (ins == INS_crc32)) @@ -10658,19 +10658,19 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) __fallthrough; case EA_4BYTE: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case EA_8BYTE: #endif // Set the 'w' bit to get the large version code |= 0x1; break; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case EA_8BYTE: // Double operand - set the appropriate bit code |= 0x04; break; -#endif // _TARGET_X86_ +#endif // TARGET_X86 default: assert(!"unexpected size"); @@ -10745,12 +10745,12 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) ssize_t cval = addc->cnsVal; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); -#else //_TARGET_X86_ +#else // TARGET_X86 noway_assert(opsz <= 4); -#endif //_TARGET_X86_ +#endif // TARGET_X86 switch (opsz) { @@ -10772,13 +10772,13 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // All static field and data section constant accesses should be marked as relocatable noway_assert(id->idIsDspReloc()); dst += emitOutputLong(dst, 0); -#else //_TARGET_X86_ +#else // TARGET_X86 dst += emitOutputLong(dst, (int)target); -#endif //_TARGET_X86_ +#endif // TARGET_X86 if (id->idIsDspReloc()) { @@ -10787,12 +10787,12 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) } else { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // This code path should never be hit on amd64 since it always uses RIP relative addressing. // In future if ever there is a need to enable this special case, also enable the logic // that sets isMoffset to true on amd64. unreached(); -#else //_TARGET_X86_ +#else // TARGET_X86 dst += emitOutputSizeT(dst, (ssize_t)target); @@ -10801,7 +10801,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) emitRecordRelocation((void*)(dst - TARGET_POINTER_SIZE), target, IMAGE_REL_BASED_MOFFSET); } -#endif //_TARGET_X86_ +#endif // TARGET_X86 } // Now generate the constant value, if present @@ -10809,7 +10809,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) { ssize_t cval = addc->cnsVal; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // all these opcodes only take a sign-extended 4-byte immediate noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc)); #endif @@ -10939,7 +10939,7 @@ BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id) case INS_inc: case INS_dec: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (true) #else if (size == EA_1BYTE) @@ -11182,7 +11182,7 @@ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id) else if ((ins == INS_movsx) || (ins == INS_movzx) || (insIsCMOV(ins))) { code = insEncodeRMreg(ins, code) | (int)(size == EA_2BYTE); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 assert((size < EA_4BYTE) || (insIsCMOV(ins))); if ((size == EA_8BYTE) || (ins == INS_movsx)) @@ -11194,7 +11194,7 @@ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id) { code = insEncodeRMreg(ins, code); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } #ifdef FEATURE_HW_INTRINSICS else if ((ins == INS_crc32) || (ins == INS_lzcnt) || (ins == INS_popcnt) || (ins == INS_tzcnt)) @@ -11242,7 +11242,7 @@ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id) code |= 0x1; break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case EA_8BYTE: // TODO-AMD64-CQ: Better way to not emit REX.W when we don't need it // Don't need to zero out the high bits explicitly @@ -11255,7 +11255,7 @@ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id) code |= 0x1; break; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 default: assert(!"unexpected size"); @@ -11686,7 +11686,7 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id) { dst += emitOutputLong(dst, val); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 else { assert(size == EA_PTRSIZE); @@ -11793,7 +11793,7 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id) code |= 0x1; break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case EA_8BYTE: /* Set the 'w' bit to get the large version */ /* and the REX.W bit to get the really large version */ @@ -11845,11 +11845,11 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id) case EA_4BYTE: dst += emitOutputLong(dst, val); break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case EA_8BYTE: dst += emitOutputLong(dst, val); break; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 default: break; } @@ -11962,7 +11962,7 @@ BYTE* emitter::emitOutputIV(BYTE* dst, instrDesc* id) assert(!IsSSEInstruction(ins)); assert(!IsAVXInstruction(ins)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // all these opcodes take a sign-extended 4-byte immediate, max noway_assert(size < EA_8BYTE || ((int)val == val && !id->idIsCnsReloc())); #endif @@ -12335,7 +12335,7 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i) dst += emitOutputLong(dst, distVal); -#ifndef _TARGET_AMD64_ // all REL32 on AMD have to go through recordRelocation +#ifndef TARGET_AMD64 // all REL32 on AMD have to go through recordRelocation if (emitComp->opts.compReloc) #endif { @@ -12438,7 +12438,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) code = insCodeMR(ins); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Support only scalar AVX instructions and hence size is hard coded to 4-byte. code = AddVexPrefixIfNeeded(ins, code, EA_4BYTE); @@ -12547,7 +12547,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) if (id->idIsDspReloc()) { dst += emitOutputWord(dst, code | 0x0500); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 dst += emitOutputLong(dst, 0); #else dst += emitOutputLong(dst, (int)addr); @@ -12556,9 +12556,9 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 dst += emitOutputWord(dst, code | 0x0500); -#else //_TARGET_AMD64_ +#else // TARGET_AMD64 // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero. // This addr mode should never be used while generating relocatable ngen code nor if // the addr can be encoded as pc-relative address. @@ -12569,7 +12569,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) // This requires, specifying a SIB byte after ModRM byte. dst += emitOutputWord(dst, code | 0x0400); dst += emitOutputByte(dst, 0x25); -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 dst += emitOutputLong(dst, static_cast(reinterpret_cast(addr))); } goto DONE_CALL; @@ -12584,7 +12584,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) dst += (ins == INS_l_jmp) ? emitOutputByte(dst, insCode(ins)) : emitOutputByte(dst, insCodeMI(ins)); ssize_t offset; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // All REL32 on Amd64 go through recordRelocation. Here we will output zero to advance dst. offset = 0; assert(id->idIsDspReloc()); @@ -13854,7 +13854,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins } break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case INS_movsxd: #endif case INS_mov: @@ -13950,7 +13950,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins case INS_imul_BP: case INS_imul_SI: case INS_imul_DI: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case INS_imul_08: case INS_imul_09: case INS_imul_10: @@ -13959,7 +13959,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins case INS_imul_13: case INS_imul_14: case INS_imul_15: -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 case INS_mulEAX: case INS_imulEAX: case INS_imul: @@ -14195,9 +14195,9 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins result.insLatency = PERFSCORE_LATENCY_BRANCH_DIRECT; break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case INS_rex_jmp: -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 case INS_i_jmp: // branch to register result.insThroughput = PERFSCORE_THROUGHPUT_2C; @@ -14274,7 +14274,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins } break; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case INS_fld: case INS_fstp: result.insThroughput = PERFSCORE_THROUGHPUT_2X; @@ -14285,20 +14285,20 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins break; #endif // _TARGET_X86 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case INS_movsq: case INS_stosq: -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 case INS_movsd: case INS_stosd: // uops.info result.insThroughput = PERFSCORE_THROUGHPUT_1C; break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case INS_r_movsq: case INS_r_stosq: -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 case INS_r_movsd: case INS_r_movsb: case INS_r_stosd: @@ -15086,4 +15086,4 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins /*****************************************************************************/ /*****************************************************************************/ -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) diff --git a/src/coreclr/src/jit/emitxarch.h b/src/coreclr/src/jit/emitxarch.h index cd16b116cdc4e..100fda3ed7cb5 100644 --- a/src/coreclr/src/jit/emitxarch.h +++ b/src/coreclr/src/jit/emitxarch.h @@ -2,7 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) /************************************************************************/ /* Public inline informational methods */ @@ -99,12 +99,12 @@ bool AreUpper32BitsZero(regNumber reg); bool hasRexPrefix(code_t code) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 const code_t REX_PREFIX_MASK = 0xFF00000000LL; return (code & REX_PREFIX_MASK) != 0; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 return false; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } // 3-byte VEX prefix starts with byte 0xC4 @@ -524,14 +524,14 @@ void emitIns_Call(EmitCallType callType, bool isJump = false); // clang-format on -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Is the last instruction emitted a call instruction? bool emitIsLastInsCall(); // Insert a NOP at the end of the the current instruction group if the last emitted instruction was a 'call', // because the next instruction group will be an epilog. void emitOutputPreEpilogNOP(); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 /***************************************************************************** * @@ -561,4 +561,4 @@ inline bool emitIsUncondJump(instrDesc* jmp) return (ins == INS_jmp); } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH diff --git a/src/coreclr/src/jit/error.h b/src/coreclr/src/jit/error.h index 0175dca5cbf13..053586fecca95 100644 --- a/src/coreclr/src/jit/error.h +++ b/src/coreclr/src/jit/error.h @@ -168,28 +168,28 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line); #define NYI(msg) NYIRAW("NYI: " msg) #define NYI_IF(cond, msg) if (cond) NYIRAW("NYI: " msg) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #define NYI_AMD64(msg) NYIRAW("NYI_AMD64: " msg) #define NYI_X86(msg) do { } while (0) #define NYI_ARM(msg) do { } while (0) #define NYI_ARM64(msg) do { } while (0) -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) #define NYI_AMD64(msg) do { } while (0) #define NYI_X86(msg) NYIRAW("NYI_X86: " msg) #define NYI_ARM(msg) do { } while (0) #define NYI_ARM64(msg) do { } while (0) -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #define NYI_AMD64(msg) do { } while (0) #define NYI_X86(msg) do { } while (0) #define NYI_ARM(msg) NYIRAW("NYI_ARM: " msg) #define NYI_ARM64(msg) do { } while (0) -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define NYI_AMD64(msg) do { } while (0) #define NYI_X86(msg) do { } while (0) @@ -213,7 +213,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line); // clang-format on -#if defined(_HOST_X86_) && !defined(FEATURE_PAL) +#if defined(HOST_X86) && !defined(TARGET_UNIX) // While debugging in an Debugger, the "int 3" will cause the program to break // Outside, the exception handler will just filter out the "int 3". diff --git a/src/coreclr/src/jit/flowgraph.cpp b/src/coreclr/src/jit/flowgraph.cpp index 307f469dee6d6..f9dca0a143208 100644 --- a/src/coreclr/src/jit/flowgraph.cpp +++ b/src/coreclr/src/jit/flowgraph.cpp @@ -1976,7 +1976,7 @@ void Compiler::fgComputeEnterBlocksSet() } } -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // TODO-ARM-Cleanup: The ARM code here to prevent creating retless calls by adding the BBJ_ALWAYS // to the enter blocks is a bit of a compromise, because sometimes the blocks are already reachable, // and it messes up DFS ordering to have them marked as enter block. We should prevent the @@ -1992,7 +1992,7 @@ void Compiler::fgComputeEnterBlocksSet() BlockSetOps::AddElemD(this, fgEnterBlks, block->bbNext->bbNum); } } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG if (verbose) @@ -2075,7 +2075,7 @@ bool Compiler::fgRemoveUnreachableBlocks() block->bbJumpKind = BBJ_THROW; block->bbSetRunRarely(); -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // If this is a pair, we have to clear BBF_FINALLY_TARGET flag on // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) @@ -2083,7 +2083,7 @@ bool Compiler::fgRemoveUnreachableBlocks() noway_assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); fgClearFinallyTargetBit(block->bbNext->bbJumpDest); } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else { @@ -2275,7 +2275,7 @@ void Compiler::fgDfsInvPostOrder() // an incoming edge into the block). assert(fgEnterBlksSetValid); -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // // BlockSetOps::UnionD(this, startNodes, fgEnterBlks); // @@ -4855,7 +4855,7 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed case CEE_JMP: retBlocks++; -#if !defined(_TARGET_X86_) && !defined(_TARGET_ARM_) +#if !defined(TARGET_X86) && !defined(TARGET_ARM) if (!isInlining) { // We transform this into a set of ldarg's + tail call and @@ -4866,7 +4866,7 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed info.compMaxStack = max(info.compMaxStack, info.compILargsCount); break; } -#endif // !_TARGET_X86_ && !_TARGET_ARM_ +#endif // !TARGET_X86 && !TARGET_ARM // If we are inlining, we need to fail for a CEE_JMP opcode, just like // the list of other opcodes (for all platforms). @@ -5598,12 +5598,12 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F if (!impIsTailCallILPattern(tailCall, opcode, codeAddr + sz, codeEndp, isRecursive, &isCallPopAndRet)) { -#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_) +#if !defined(FEATURE_CORECLR) && defined(TARGET_AMD64) BADCODE3("tail call not followed by ret or pop+ret", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); #else BADCODE3("tail call not followed by ret", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); -#endif // !FEATURE_CORECLR && _TARGET_AMD64_ +#endif // !FEATURE_CORECLR && TARGET_AMD64 } if (fgCanSwitchToOptimized() && fgMayExplicitTailCall()) @@ -5614,7 +5614,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F fgSwitchToOptimized(); } -#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_) +#if !defined(FEATURE_CORECLR) && defined(TARGET_AMD64) if (isCallPopAndRet) { // By breaking here, we let pop and ret opcodes to be @@ -5623,7 +5623,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F // in fgMorphCall(). break; } -#endif // !FEATURE_CORECLR && _TARGET_AMD64_ +#endif // !FEATURE_CORECLR && TARGET_AMD64 } else { @@ -9394,9 +9394,9 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // interruptible if we exercised more care here. newBlock->bbFlags &= ~BBF_GC_SAFE_POINT; -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) newBlock->bbFlags &= ~(BBF_FINALLY_TARGET); -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // The new block has no code, so we leave bbCodeOffs/bbCodeOffsEnd set to BAD_IL_OFFSET. If a caller // puts code in the block, then it needs to update these. @@ -10131,7 +10131,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) return false; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // We can't compact a finally target block, as we need to generate special code for such blocks during code // generation if ((bNext->bbFlags & BBF_FINALLY_TARGET) != 0) @@ -10189,9 +10189,9 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty()); noway_assert(bNext->bbPreds); -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) noway_assert((bNext->bbFlags & BBF_FINALLY_TARGET) == 0); -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Make sure the second block is not the start of a TRY block or an exception handler @@ -10670,9 +10670,9 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) noway_assert(block->bbPrev != nullptr); // Can't use this function to remove the first block -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) assert(!block->isBBCallAlwaysPairTail()); // can't remove the BBJ_ALWAYS of a BBJ_CALLFINALLY / BBJ_ALWAYS pair -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) /* First walk the statement trees in this basic block and delete each stmt */ @@ -10975,10 +10975,10 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // Should never remove a genReturnBB, as we might have special hookups there. noway_assert(block != genReturnBB); -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove a finally target assert(!(block->bbFlags & BBF_FINALLY_TARGET)); -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (unreachable) { @@ -11005,9 +11005,9 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) NO_WAY("No retless call finally blocks; need unwind target instead"); -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && @@ -11047,9 +11047,9 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgRemoveBlock(leaveBlk, true); -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (block->bbJumpKind == BBJ_RETURN) { @@ -12354,7 +12354,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r #if defined(FEATURE_EH_FUNCLETS) -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) /***************************************************************************** * We just removed a BBJ_CALLFINALLY/BBJ_ALWAYS pair. If this was the only such pair @@ -12389,7 +12389,7 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) block->bbFlags &= ~BBF_FINALLY_TARGET; } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) /***************************************************************************** * Is this an intra-handler control flow edge? @@ -13747,7 +13747,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc optimizeJump = false; } -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't optimize a jump to a finally target. For BB1->BB2->BB3, where // BB2 is a finally target, if we changed BB1 to jump directly to BB3, // it would skip the finally target. BB1 might be a BBJ_ALWAYS block part @@ -13757,7 +13757,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc { optimizeJump = false; } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Must optimize jump if bDest has been removed // @@ -13959,11 +13959,11 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } } -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) /* Don't remove finally targets */ if (block->bbFlags & BBF_FINALLY_TARGET) break; -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if defined(FEATURE_EH_FUNCLETS) /* Don't remove an empty block that is in a different EH region @@ -16802,7 +16802,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication) continue; } -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. if (block->countOfInEdges() == 0 && bPrev->bbJumpKind == BBJ_CALLFINALLY) { @@ -16812,7 +16812,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication) bPrev = block; continue; } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) noway_assert(!block->bbCatchTyp); noway_assert(!(block->bbFlags & BBF_TRY_BEG)); @@ -16953,7 +16953,7 @@ void Compiler::fgDebugCheckUpdate() /* no unreachable blocks */ if ((block->countOfInEdges() == 0) && !(block->bbFlags & BBF_DONT_REMOVE) -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // With funclets, we never get rid of the BBJ_ALWAYS part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, // even if we can prove that the finally block never returns. && !block->isBBCallAlwaysPairTail() @@ -25237,7 +25237,7 @@ void Compiler::fgCleanupContinuation(BasicBlock* continuation) void Compiler::fgUpdateFinallyTargetFlags() { -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Any fixup required? if (!fgOptimizedFinally) @@ -25251,7 +25251,7 @@ void Compiler::fgUpdateFinallyTargetFlags() fgClearAllFinallyTargetBits(); fgAddFinallyTargetFlags(); -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } //------------------------------------------------------------------------ @@ -25260,7 +25260,7 @@ void Compiler::fgUpdateFinallyTargetFlags() // void Compiler::fgClearAllFinallyTargetBits() { -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) JITDUMP("*************** In fgClearAllFinallyTargetBits()\n"); @@ -25273,7 +25273,7 @@ void Compiler::fgClearAllFinallyTargetBits() block->bbFlags &= ~BBF_FINALLY_TARGET; } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } //------------------------------------------------------------------------ @@ -25281,7 +25281,7 @@ void Compiler::fgClearAllFinallyTargetBits() // void Compiler::fgAddFinallyTargetFlags() { -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) JITDUMP("*************** In fgAddFinallyTargetFlags()\n"); @@ -25307,7 +25307,7 @@ void Compiler::fgAddFinallyTargetFlags() } } } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } //------------------------------------------------------------------------ diff --git a/src/coreclr/src/jit/gcencode.cpp b/src/coreclr/src/jit/gcencode.cpp index b8e0cd1fdacdf..f948d95f42175 100644 --- a/src/coreclr/src/jit/gcencode.cpp +++ b/src/coreclr/src/jit/gcencode.cpp @@ -63,9 +63,9 @@ ReturnKind GCInfo::getReturnKind() case TYP_STRUCT: if (compiler->IsHfa(structType)) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 _ASSERTE(false && "HFAs not expected for X86"); -#endif // _TARGET_X86_ +#endif // TARGET_X86 return RT_Scalar; } @@ -81,21 +81,21 @@ ReturnKind GCInfo::getReturnKind() return GetStructReturnKind(first, second); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case TYP_FLOAT: case TYP_DOUBLE: return RT_Float; -#endif // _TARGET_X86_ +#endif // TARGET_X86 default: return RT_Scalar; } } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case TYP_FLOAT: case TYP_DOUBLE: return RT_Float; -#endif // _TARGET_X86_ +#endif // TARGET_X86 default: return RT_Scalar; @@ -2452,7 +2452,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un if (compiler->codeGen->GetInterruptible()) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(compiler->IsFullPtrRegMapRequired()); unsigned ptrRegs = 0; @@ -2776,7 +2776,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un if (!mask) dest = base; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* Terminate the table with 0xFF */ @@ -2786,7 +2786,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un } else if (compiler->isFramePointerUsed()) // GetInterruptible() is false { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* Encoding table for methods with an EBP frame and that are not fully interruptible @@ -3063,7 +3063,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un if (!mask) dest = base; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* Terminate the table with 0xFF */ @@ -3075,7 +3075,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un { assert(compiler->IsFullPtrRegMapRequired()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 regPtrDsc* genRegPtrTemp; regNumber thisRegNum = regNumber(0); @@ -3540,7 +3540,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, un assert(pasStk.pasCurDepth() == 0); -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* Terminate the table with 0xFF */ @@ -3799,7 +3799,7 @@ class GcInfoEncoderWithLogging } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 void SetWantsReportOnlyLeaf() { m_gcInfoEncoder->SetWantsReportOnlyLeaf(); @@ -3808,7 +3808,7 @@ class GcInfoEncoderWithLogging printf("Set WantsReportOnlyLeaf.\n"); } } -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) void SetHasTailCalls() { m_gcInfoEncoder->SetHasTailCalls(); @@ -3817,7 +3817,7 @@ class GcInfoEncoderWithLogging printf("Set HasTailCalls.\n"); } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 void SetSizeOfStackOutgoingAndScratchArea(UINT32 size) { @@ -3944,30 +3944,30 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz #if defined(FEATURE_EH_FUNCLETS) if (compiler->lvaPSPSym != BAD_VAR_NUM) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // The PSPSym is relative to InitialSP on X64 and CallerSP on other platforms. gcInfoEncoderWithLog->SetPSPSymStackSlot(compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym)); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 gcInfoEncoderWithLog->SetPSPSymStackSlot(compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (compiler->ehAnyFunclets()) { // Set this to avoid double-reporting the parent frame (unlike JIT64) gcInfoEncoderWithLog->SetWantsReportOnlyLeaf(); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif // FEATURE_EH_FUNCLETS -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH if (compiler->codeGen->GetHasTailCalls()) { gcInfoEncoderWithLog->SetHasTailCalls(); } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH #if FEATURE_FIXED_OUT_ARGS // outgoing stack area size diff --git a/src/coreclr/src/jit/gcinfo.cpp b/src/coreclr/src/jit/gcinfo.cpp index 32899988ce38b..450475d42274d 100644 --- a/src/coreclr/src/jit/gcinfo.cpp +++ b/src/coreclr/src/jit/gcinfo.cpp @@ -20,7 +20,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "emit.h" #include "jitgcinfo.h" -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #include "gcinfoencoder.h" //this includes a LOT of other files too #endif diff --git a/src/coreclr/src/jit/gentree.cpp b/src/coreclr/src/jit/gentree.cpp index 161528d00185d..1d2ec29e80fdd 100644 --- a/src/coreclr/src/jit/gentree.cpp +++ b/src/coreclr/src/jit/gentree.cpp @@ -707,14 +707,14 @@ int GenTree::GetRegisterDstCount() const return (const_cast(this))->AsPutArgSplit()->gtNumRegs; } #endif -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) else if (OperIsMultiRegOp()) { // A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST. // For the latter two (ARM-only), they only have multiple registers if they produce a long value // (GT_MUL_LONG always produces a long value). CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM return (TypeGet() == TYP_LONG) ? 2 : 1; #else assert(OperIs(GT_MUL_LONG)); @@ -1979,7 +1979,7 @@ unsigned Compiler::gtHashValue(GenTree* tree) break; case GT_CNS_LNG: bits = (UINT64)tree->AsLngCon()->gtLconVal; -#ifdef _HOST_64BIT_ +#ifdef HOST_64BIT add = bits; #else // 32-bit host add = genTreeHashAdd(uhi32(bits), ulo32(bits)); @@ -1987,7 +1987,7 @@ unsigned Compiler::gtHashValue(GenTree* tree) break; case GT_CNS_DBL: bits = *(UINT64*)(&tree->AsDblCon()->gtDconVal); -#ifdef _HOST_64BIT_ +#ifdef HOST_64BIT add = bits; #else // 32-bit host add = genTreeHashAdd(uhi32(bits), ulo32(bits)); @@ -2009,7 +2009,7 @@ unsigned Compiler::gtHashValue(GenTree* tree) // clang-format off // narrow 'add' into a 32-bit 'val' unsigned val; -#ifdef _HOST_64BIT_ +#ifdef HOST_64BIT val = genTreeHashAdd(uhi32(add), ulo32(add)); #else // 32-bit host val = add; @@ -2542,7 +2542,7 @@ unsigned Compiler::gtSetListOrder(GenTree* list, bool isListCallArgs, bool callA if (op1->GetCostSz() != 0) { costSz += op1->GetCostSz(); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (callArgsInRegs) // push is smaller than mov to reg #endif { @@ -2583,7 +2583,7 @@ unsigned Compiler::gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lat if (argNode->GetCostSz() != 0) { costSz += argNode->GetCostSz(); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (lateArgs) // push is smaller than mov to reg #endif { @@ -2769,7 +2769,7 @@ bool Compiler::gtIsLikelyRegVar(GenTree* tree) return false; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsFloating(tree->TypeGet())) return false; if (varTypeIsLong(tree->TypeGet())) @@ -2882,7 +2882,7 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ // nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost. addr->gtFlags |= GTF_ADDRMODE_NO_CSE; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // addrmodeCount is the count of items that we used to form // an addressing mode. The maximum value is 4 when we have // all of these: { base, idx, cns, mul } @@ -2974,7 +2974,7 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ } } } -#elif defined _TARGET_ARM_ +#elif defined TARGET_ARM if (base) { *pCostEx += base->GetCostEx(); @@ -3013,7 +3013,7 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ } } } -#elif defined _TARGET_ARM64_ +#elif defined TARGET_ARM64 if (base) { *pCostEx += base->GetCostEx(); @@ -3076,7 +3076,7 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ // we have already found either a non-ADD op1 or a non-constant op2. gtWalkOp(&op1, &op2, nullptr, true); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // For XARCH we will fold GT_ADDs in the op2 position into the addressing mode, so we call // gtWalkOp on both operands of the original GT_ADD. // This is not done for ARMARCH. Though the stated reason is that we don't try to create a @@ -3086,7 +3086,7 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ // into the addressing mode. // Walk op2 looking for non-overflow GT_ADDs of constants. gtWalkOp(&op2, &op1, nullptr, true); -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) // OK we are done walking the tree // Now assert that op1 and op2 correspond with base and idx @@ -3237,7 +3237,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) { switch (oper) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM case GT_CNS_LNG: costSz = 9; costEx = 4; @@ -3279,7 +3279,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) goto COMMON_CNS; } -#elif defined _TARGET_XARCH_ +#elif defined TARGET_XARCH case GT_CNS_LNG: costSz = 10; @@ -3305,13 +3305,13 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costSz = 1; costEx = 1; } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) else if (iconNeedsReloc || !con->FitsInI32()) { costSz = 10; costEx = 3; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 else { costSz = 4; @@ -3320,7 +3320,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) goto COMMON_CNS; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) case GT_CNS_LNG: case GT_CNS_STR: case GT_CNS_INT: @@ -3390,7 +3390,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costSz += 1; } } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // increase costSz for floating point locals if (isflt) { @@ -3404,7 +3404,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) break; case GT_CLS_VAR: -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // We generate movw/movt/ldr level = 1; costEx = 3 + IND_COST_EX; // 6 @@ -3505,7 +3505,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) break; case GT_CAST: -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) costEx = 1; costSz = 1; if (isflt || varTypeIsFloating(op1->TypeGet())) @@ -3513,7 +3513,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costEx = 3; costSz = 4; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) costEx = 1; costSz = 2; if (isflt || varTypeIsFloating(op1->TypeGet())) @@ -3521,7 +3521,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costEx = 2; costSz = 4; } -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) costEx = 1; costSz = 2; @@ -3651,7 +3651,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costSz = 2 * 2; break; -#if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_XARCH_) +#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) case GT_HWINTRINSIC: { if (tree->AsHWIntrinsic()->OperIsMemoryLoadOrStore()) @@ -3669,7 +3669,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) } } break; -#endif // FEATURE_HW_INTRINSICS && _TARGET_XARCH_ +#endif // FEATURE_HW_INTRINSICS && TARGET_XARCH case GT_BLK: case GT_IND: @@ -3700,9 +3700,9 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) { costEx += 1; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM costSz += 2; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } // Can we form an addressing mode with this indirection? @@ -3751,7 +3751,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) /* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */ goto DONE; } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH else if (op1->IsCnsIntOrI()) { // Indirection of a CNS_INT, subtract 1 from costEx @@ -3780,13 +3780,13 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costEx = 1; costSz = 1; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (isflt) { costSz += 2; } #endif -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (varTypeIsLong(op1->TypeGet())) { /* Operations on longs are more expensive */ @@ -3854,7 +3854,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costSz += 3; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((tree->gtType == TYP_LONG) || tree->gtOverflow()) { /* We use imulEAX for TYP_LONG and overflow multiplications */ @@ -3864,7 +3864,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) /* The 64-bit imul instruction costs more */ costEx += 4; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } break; @@ -4065,7 +4065,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) if (!op2->IsCnsIntOrI()) { costEx += 3; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT // Variable sized LONG shifts require the use of a helper call // if (tree->gtType == TYP_LONG) @@ -4075,7 +4075,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costEx += 3 * IND_COST_EX; costSz += 4; } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT } break; @@ -4324,7 +4324,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) } else { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (tree->AsCall()->IsVirtualStub()) { // We generate movw/movt/ldr @@ -4343,7 +4343,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) } costSz += 2; #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH costSz += 3; #endif } @@ -5345,7 +5345,7 @@ bool GenTree::OperRequiresCallFlag(Compiler* comp) case GT_INTRINSIC: return comp->IsIntrinsicImplementedByUserCall(this->AsIntrinsic()->gtIntrinsicId); -#if FEATURE_FIXED_OUT_ARGS && !defined(_TARGET_64BIT_) +#if FEATURE_FIXED_OUT_ARGS && !defined(TARGET_64BIT) case GT_LSH: case GT_RSH: case GT_RSZ: @@ -5359,7 +5359,7 @@ bool GenTree::OperRequiresCallFlag(Compiler* comp) // tree walk of the argument tree, so we just do it when morphing, instead, even though we'll // mark non-argument trees (that will still get converted to calls, anyway). return (this->TypeGet() == TYP_LONG) && (gtGetOp2()->OperGet() != GT_CNS_INT); -#endif // FEATURE_FIXED_OUT_ARGS && !_TARGET_64BIT_ +#endif // FEATURE_FIXED_OUT_ARGS && !TARGET_64BIT default: return false; @@ -5889,7 +5889,7 @@ GenTree* Compiler::gtNewStringLiteralNode(InfoAccessType iat, void* pValue) GenTree* Compiler::gtNewLconNode(__int64 value) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value); #else GenTree* node = new (this, GT_CNS_LNG) GenTreeLngCon(value); @@ -6111,7 +6111,7 @@ GenTreeCall* Compiler::gtNewCallNode( // Initialize spill flags of gtOtherRegs node->ClearOtherRegFlags(); -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) // Initialize the multi-reg long return info if necessary if (varTypeIsLong(node)) { @@ -6125,7 +6125,7 @@ GenTreeCall* Compiler::gtNewCallNode( // must be a long returned in two registers assert(retTypeDesc->GetReturnRegCount() == 2); } -#endif // defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#endif // defined(TARGET_X86) || defined(TARGET_ARM) return node; } @@ -6603,12 +6603,12 @@ void GenTreeIntCon::FixupInitBlkValue(var_types asgType) if (size >= 4) { cns |= cns << 16; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (size == 8) { cns |= cns << 32; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT // Make the type match for evaluation types. gtType = asgType; @@ -6797,7 +6797,7 @@ GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg assert(arg != nullptr); GenTree* node = nullptr; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // A PUTARG_REG could be a MultiRegOp on arm since we could move a double register to two int registers. node = new (this, GT_PUTARG_REG) GenTreeMultiRegOp(GT_PUTARG_REG, type, arg, nullptr); if (type == TYP_LONG) @@ -6831,7 +6831,7 @@ GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg) assert(arg != nullptr); GenTree* node = nullptr; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers. node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr); #else @@ -8196,7 +8196,7 @@ GenTree* Compiler::gtGetThisArg(GenTreeCall* call) bool GenTree::gtSetFlags() const { // - // When FEATURE_SET_FLAGS (_TARGET_ARM_) is active the method returns true + // When FEATURE_SET_FLAGS (TARGET_ARM) is active the method returns true // when the gtFlags has the flag GTF_SET_FLAGS set // otherwise the architecture will be have instructions that typically set // the flags and this method will return true. @@ -9831,7 +9831,7 @@ void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, __in __in_z _ goto DASH; case GT_MUL: -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif if (tree->gtFlags & GTF_MUL_64RSLT) @@ -10078,7 +10078,7 @@ void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, __in __in_z _ if (tree->gtOper == GT_RUNTIMELOOKUP) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT printf(" 0x%llx", dspPtr(tree->AsRuntimeLookup()->gtHnd)); #else printf(" 0x%x", dspPtr(tree->AsRuntimeLookup()->gtHnd)); @@ -10165,7 +10165,7 @@ void Compiler::gtDispRegVal(GenTree* tree) } #endif -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (tree->OperIsMultiRegOp() && (tree->AsMultiRegOp()->gtOtherReg != REG_NA)) { printf(",%s", compRegVarName(tree->AsMultiRegOp()->gtOtherReg)); @@ -10237,12 +10237,12 @@ void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, cons ilName = "OutArgs"; } #endif // FEATURE_FIXED_OUT_ARGS -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (lclNum == lvaPromotedStructAssemblyScratchVar) { ilName = "PromotedStructScratch"; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #if !defined(FEATURE_EH_FUNCLETS) else if (lclNum == lvaShadowSPslotsVar) { @@ -10431,7 +10431,7 @@ void Compiler::gtDispConst(GenTree* tree) else if ((tree->AsIntCon()->gtIconVal > -1000) && (tree->AsIntCon()->gtIconVal < 1000)) { printf(" %ld", dspIconVal); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT } else if ((tree->AsIntCon()->gtIconVal & 0xFFFFFFFF00000000LL) != 0) { @@ -10665,13 +10665,13 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack) { LclVarDsc* fieldVarDsc = &lvaTable[i]; const char* fieldName; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (varTypeIsLong(varDsc)) { fieldName = (i == 0) ? "lo" : "hi"; } else -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) { fldHnd = info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal); fieldName = eeGetFieldName(fldHnd); @@ -10967,7 +10967,7 @@ void Compiler::gtDispTree(GenTree* tree, { switch (tree->AsBlk()->gtBlkOpKind) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH case GenTreeBlk::BlkOpKindRepInstr: printf(" (RepInstr)"); break; @@ -10975,7 +10975,7 @@ void Compiler::gtDispTree(GenTree* tree, case GenTreeBlk::BlkOpKindUnroll: printf(" (Unroll)"); break; -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 case GenTreeBlk::BlkOpKindHelper: printf(" (Helper)"); break; @@ -11426,7 +11426,7 @@ void Compiler::gtGetArgMsg( } else { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (curArgTabEntry->IsSplit()) { regNumber firstReg = curArgTabEntry->GetRegNum(); @@ -11488,7 +11488,7 @@ void Compiler::gtGetArgMsg( } return; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #if FEATURE_FIXED_OUT_ARGS if (listCount == -1) { @@ -11556,7 +11556,7 @@ void Compiler::gtGetLateArgMsg( { sprintf_s(bufp, bufLength, "this in %s%c", compRegVarName(argReg), 0); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (curArgTabEntry->IsSplit()) { regNumber firstReg = curArgTabEntry->GetRegNum(); @@ -11618,7 +11618,7 @@ void Compiler::gtGetLateArgMsg( } return; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM else { #if FEATURE_MULTIREG_ARGS @@ -14155,12 +14155,12 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) } #endif -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Some operations are performed as 64 bit instead of 32 bit so the upper 32 bits // need to be discarded. Since constant values are stored as ssize_t and the node // has TYP_INT the result needs to be sign extended rather than zero extended. i1 = INT32(i1); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT /* Also all conditional folding jumps here since the node hanging from * GT_JTRUE has to be a GT_CNS_INT - value 0 or 1 */ @@ -16316,7 +16316,7 @@ bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op) return !ImmedValNeedsReloc(comp) || (op == GT_EQ) || (op == GT_NE); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Returns true if this absolute address fits within the base of an addr mode. // On Amd64 this effectively means, whether an absolute indirect address can // be encoded as 32-bit offset relative to IP or zero. @@ -16374,7 +16374,7 @@ bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp) } } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) // Returns true if this absolute address fits within the base of an addr mode. // On x86 all addresses are 4-bytes and can be directly encoded in an addr mode. bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp) @@ -16396,7 +16396,7 @@ bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp) // If generating relocatable code, icons should be reported for recording relocatons. return comp->opts.compReloc && IsIconHandle(); } -#endif //_TARGET_X86_ +#endif // TARGET_X86 bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pObj, GenTree** pStatic, FieldSeqNode** pFldSeq) { @@ -16668,7 +16668,7 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO { isHWSIMD = false; } -#if defined(_TARGET_ARM64_) && defined(FEATURE_HW_INTRINSICS) +#if defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS) else { assert(simdHandle == m_simdHandleCache->Vector64FloatHandle); @@ -16692,7 +16692,7 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO case TYP_UINT: assert(simdHandle == m_simdHandleCache->Vector64UIntHandle); break; -#endif // defined(_TARGET_ARM64_) && defined(FEATURE_HW_INTRINSICS) +#endif // defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS) default: break; } @@ -16789,7 +16789,7 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO break; } break; -#endif // _TARGET_XARCH_ && FEATURE_HW_INTRINSICS +#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS default: break; } @@ -16798,7 +16798,7 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO unsigned size = genTypeSize(simdType); if (isHWSIMD) { -#if defined(_TARGET_XARCH_) && defined(FEATURE_HW_INTRINSICS) +#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS) switch (simdType) { case TYP_SIMD16: @@ -16822,7 +16822,7 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO default: break; } -#endif // _TARGET_XARCH_ && FEATURE_HW_INTRINSICS +#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS JITDUMP("Coudn't find the matching SIMD type for %s<%s> in gtGetSIMDZero\n", varTypeName(simdType), varTypeName(baseType)); } @@ -18099,18 +18099,18 @@ bool GenTree::isCommutativeHWIntrinsic() const { assert(gtOper == GT_HWINTRINSIC); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH return HWIntrinsicInfo::IsCommutative(AsHWIntrinsic()->gtHWIntrinsicId); #else return false; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH } bool GenTree::isContainableHWIntrinsic() const { assert(gtOper == GT_HWINTRINSIC); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH switch (AsHWIntrinsic()->gtHWIntrinsicId) { case NI_SSE_LoadAlignedVector128: @@ -18134,7 +18134,7 @@ bool GenTree::isContainableHWIntrinsic() const } #else return false; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH } bool GenTree::isRMWHWIntrinsic(Compiler* comp) @@ -18142,7 +18142,7 @@ bool GenTree::isRMWHWIntrinsic(Compiler* comp) assert(gtOper == GT_HWINTRINSIC); assert(comp != nullptr); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (!comp->canUseVexEncoding()) { return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->gtHWIntrinsicId); @@ -18175,7 +18175,7 @@ bool GenTree::isRMWHWIntrinsic(Compiler* comp) } #else return false; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH } GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type, @@ -18303,7 +18303,7 @@ GenTree* Compiler::gtNewMustThrowException(unsigned helper, var_types type, CORI // Returns true for the HW Instrinsic instructions that have MemoryLoad semantics, false otherwise bool GenTreeHWIntrinsic::OperIsMemoryLoad() { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // Some xarch instructions have MemoryLoad sematics HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(gtHWIntrinsicId); if (category == HW_Category_MemoryLoad) @@ -18338,14 +18338,14 @@ bool GenTreeHWIntrinsic::OperIsMemoryLoad() } } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH return false; } // Returns true for the HW Instrinsic instructions that have MemoryStore semantics, false otherwise bool GenTreeHWIntrinsic::OperIsMemoryStore() { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // Some xarch instructions have MemoryStore sematics HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(gtHWIntrinsicId); if (category == HW_Category_MemoryStore) @@ -18373,14 +18373,14 @@ bool GenTreeHWIntrinsic::OperIsMemoryStore() } } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH return false; } // Returns true for the HW Instrinsic instructions that have MemoryLoad semantics, false otherwise bool GenTreeHWIntrinsic::OperIsMemoryLoadOrStore() { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH return OperIsMemoryLoad() || OperIsMemoryStore(); #else return false; @@ -18469,7 +18469,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA m_regType[i] = comp->GetEightByteType(structDesc, i); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // a non-HFA struct returned using two registers // @@ -18524,7 +18524,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA // void ReturnTypeDesc::InitializeLongReturnType(Compiler* comp) { -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) // Setups up a ReturnTypeDesc for returning a long using two registers // @@ -18532,11 +18532,11 @@ void ReturnTypeDesc::InitializeLongReturnType(Compiler* comp) m_regType[0] = TYP_INT; m_regType[1] = TYP_INT; -#else // not (_TARGET_X86_ or _TARGET_ARM_) +#else // not (TARGET_X86 or TARGET_ARM) m_regType[0] = TYP_LONG; -#endif // _TARGET_X86_ or _TARGET_ARM_ +#endif // TARGET_X86 or TARGET_ARM #ifdef DEBUG m_inited = true; @@ -18609,7 +18609,7 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) } } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) if (idx == 0) { @@ -18620,7 +18620,7 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) resultReg = REG_LNGRET_HI; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) var_types regType = GetReturnRegType(idx); if (varTypeIsIntegralOrI(regType)) @@ -18652,7 +18652,7 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) } } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) var_types regType = GetReturnRegType(idx); if (varTypeIsIntegralOrI(regType)) diff --git a/src/coreclr/src/jit/gentree.h b/src/coreclr/src/jit/gentree.h index 54695659f1227..8d5578190eea0 100644 --- a/src/coreclr/src/jit/gentree.h +++ b/src/coreclr/src/jit/gentree.h @@ -75,7 +75,7 @@ enum genTreeOps : BYTE GT_COUNT, -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target. // For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes GT_CNS_NATIVELONG = GT_CNS_INT, @@ -337,7 +337,7 @@ struct Statement; /*****************************************************************************/ -#ifndef _HOST_64BIT_ +#ifndef HOST_64BIT #include #endif @@ -1209,18 +1209,18 @@ struct GenTree bool OperIsMultiRegOp() const { -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (OperIs(GT_MUL_LONG)) { return true; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (OperIs(GT_PUTARG_REG, GT_BITCAST)) { return true; } -#endif // _TARGET_ARM_ -#endif // _TARGET_64BIT_ +#endif // TARGET_ARM +#endif // TARGET_64BIT return false; } @@ -1286,7 +1286,7 @@ struct GenTree static bool OperIsShiftLong(genTreeOps gtOper) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return false; #else return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO); @@ -1321,7 +1321,7 @@ struct GenTree static bool OperIsMul(genTreeOps gtOper) { return (gtOper == GT_MUL) || (gtOper == GT_MULHI) -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) || (gtOper == GT_MUL_LONG) #endif ; @@ -1344,7 +1344,7 @@ struct GenTree || OperIsShiftOrRotate(op); } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH static bool OperIsRMWMemOp(genTreeOps gtOper) { // Return if binary op is one of the supported operations for RMW of memory. @@ -1356,7 +1356,7 @@ struct GenTree // Return if binary op is one of the supported operations for RMW of memory. return OperIsRMWMemOp(gtOper); } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH static bool OperIsUnary(genTreeOps gtOper) { @@ -1437,7 +1437,7 @@ struct GenTree static bool OperMayOverflow(genTreeOps gtOper) { return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST) -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) || (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI) #endif ); @@ -1528,7 +1528,7 @@ struct GenTree // This is here for cleaner GT_LONG #ifdefs. static bool OperIsLong(genTreeOps gtOper) { -#if defined(_TARGET_64BIT_) +#if defined(TARGET_64BIT) return false; #else return gtOper == GT_LONG; @@ -1611,9 +1611,9 @@ struct GenTree case GT_HWINTRINSIC: #endif // FEATURE_HW_INTRINSICS -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) case GT_PUTARG_REG: -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) return true; default: @@ -2834,7 +2834,7 @@ struct GenTreeIntConCommon : public GenTree static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return (int32_t)val == val; #else return true; @@ -2844,7 +2844,7 @@ struct GenTreeIntConCommon : public GenTree bool ImmedValNeedsReloc(Compiler* comp); bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH bool FitsInAddrBase(Compiler* comp); bool AddrNeedsReloc(Compiler* comp); #endif @@ -2921,7 +2921,7 @@ struct GenTreeIntCon : public GenTreeIntConCommon void FixupInitBlkValue(var_types asgType); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT void TruncateOrSignExtend32() { if (gtFlags & GTF_UNSIGNED) @@ -2933,7 +2933,7 @@ struct GenTreeIntCon : public GenTreeIntConCommon gtIconVal = INT32(gtIconVal); } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT #if DEBUGGABLE_GENTREE GenTreeIntCon() : GenTreeIntConCommon() @@ -2970,7 +2970,7 @@ struct GenTreeLngCon : public GenTreeIntConCommon inline INT64 GenTreeIntConCommon::LngValue() { -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT assert(gtOper == GT_CNS_LNG); return AsLngCon()->gtLconVal; #else @@ -2980,7 +2980,7 @@ inline INT64 GenTreeIntConCommon::LngValue() inline void GenTreeIntConCommon::SetLngValue(INT64 val) { -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT assert(gtOper == GT_CNS_LNG); AsLngCon()->gtLconVal = val; #else @@ -3006,11 +3006,11 @@ inline void GenTreeIntConCommon::SetIconValue(ssize_t val) inline INT64 GenTreeIntConCommon::IntegralValue() { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return LngValue(); #else return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue(); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } /* gtDblCon -- double constant (GT_CNS_DBL) */ @@ -3990,11 +3990,11 @@ struct GenTreeCall final : public GenTree // bool HasMultiRegRetVal() const { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return varTypeIsLong(gtType); -#elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_) +#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) return varTypeIsLong(gtType) || (varTypeIsStruct(gtType) && !HasRetBufArg()); -#elif defined(FEATURE_HFA) && defined(_TARGET_ARM64_) +#elif defined(FEATURE_HFA) && defined(TARGET_ARM64) // SIMD types are returned in vector regs on ARM64. return (gtType == TYP_STRUCT) && !HasRetBufArg(); #elif FEATURE_MULTIREG_RET @@ -4262,7 +4262,7 @@ struct GenTreeCmpXchg : public GenTree #endif }; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) struct GenTreeMultiRegOp : public GenTreeOp { regNumber gtOtherReg; @@ -4416,7 +4416,7 @@ struct GenTreeMultiRegOp : public GenTreeOp } #endif }; -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) struct GenTreeFptrVal : public GenTree { @@ -5117,10 +5117,10 @@ struct GenTreeBlk : public GenTreeIndir enum { BlkOpKindInvalid, -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 BlkOpKindHelper, #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH BlkOpKindRepInstr, #endif BlkOpKindUnroll, @@ -6294,7 +6294,7 @@ struct GenCondition // see GenConditionDesc and its associated mapping table for more details. bool PreferSwap() const { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU); #else return false; @@ -6826,7 +6826,7 @@ inline bool GenTree::IsMultiRegNode() const } #endif -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (OperIsMultiRegOp()) { return true; @@ -6869,7 +6869,7 @@ inline unsigned GenTree::GetMultiRegCount() } #endif -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (OperIsMultiRegOp()) { return AsMultiRegOp()->GetRegCount(); @@ -6920,7 +6920,7 @@ inline regNumber GenTree::GetRegByIndex(int regIndex) return AsPutArgSplit()->GetRegNumByIdx(regIndex); } #endif -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (OperIsMultiRegOp()) { return AsMultiRegOp()->GetRegNumByIdx(regIndex); @@ -6969,7 +6969,7 @@ inline var_types GenTree::GetRegTypeByIndex(int regIndex) return AsPutArgSplit()->GetRegType(regIndex); } #endif -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (OperIsMultiRegOp()) { return AsMultiRegOp()->GetRegType(regIndex); @@ -7020,21 +7020,21 @@ inline bool GenTree::IsCnsIntOrI() const inline bool GenTree::IsIntegralConst() const { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return IsCnsIntOrI(); -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG)); -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT } // Is this node an integer constant that fits in a 32-bit signed integer (INT32) inline bool GenTree::IsIntCnsFitsInI32() { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return IsCnsIntOrI() && AsIntCon()->FitsInI32(); -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT return IsCnsIntOrI(); -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT } inline bool GenTree::IsCnsFltOrDbl() const @@ -7080,7 +7080,7 @@ inline bool GenTree::isUsedFromSpillTemp() const /*****************************************************************************/ -#ifndef _HOST_64BIT_ +#ifndef HOST_64BIT #include #endif diff --git a/src/coreclr/src/jit/gtlist.h b/src/coreclr/src/jit/gtlist.h index 31a301651ae5d..b4e9b82e46fcf 100644 --- a/src/coreclr/src/jit/gtlist.h +++ b/src/coreclr/src/jit/gtlist.h @@ -62,7 +62,7 @@ GTNODE(MEMORYBARRIER , GenTree ,0,(GTK_LEAF|GTK_NOVALUE)) GTNODE(KEEPALIVE , GenTree ,0,(GTK_UNOP|GTK_NOVALUE)) // keep operand alive, generate no code, produce no result GTNODE(CAST , GenTreeCast ,0,(GTK_UNOP|GTK_EXOP)) // conversion to another type -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) GTNODE(BITCAST , GenTreeMultiRegOp ,0,GTK_UNOP) // reinterpretation of bits as another type #else GTNODE(BITCAST , GenTreeOp ,0,GTK_UNOP) // reinterpretation of bits as another type @@ -167,7 +167,7 @@ GTNODE(MKREFANY , GenTreeOp ,0,GTK_BINOP|GTK_NOTLIR) GTNODE(LEA , GenTreeAddrMode ,0,(GTK_BINOP|GTK_EXOP)) -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) // A GT_LONG node simply represents the long value produced by the concatenation // of its two (lower and upper half) operands. Some GT_LONG nodes are transient, // during the decomposing of longs; others are handled by codegen as operands of @@ -189,7 +189,7 @@ GTNODE(SUB_HI , GenTreeOp ,0,GTK_BINOP) // with long results are morphed into helper calls. It is similar to GT_MULHI, // the difference being that GT_MULHI drops the lo part of the result, whereas // GT_MUL_LONG keeps both parts of the result. -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) GTNODE(MUL_LONG , GenTreeMultiRegOp ,1,GTK_BINOP) #endif @@ -202,7 +202,7 @@ GTNODE(MUL_LONG , GenTreeMultiRegOp ,1,GTK_BINOP) // RSH_LO represents the lo operation of a 64-bit right shift by a constant int. GTNODE(LSH_HI , GenTreeOp ,0,GTK_BINOP) GTNODE(RSH_LO , GenTreeOp ,0,GTK_BINOP) -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) #ifdef FEATURE_SIMD GTNODE(SIMD , GenTreeSIMD ,0,(GTK_BINOP|GTK_EXOP)) // SIMD functions/operators/intrinsics @@ -223,7 +223,7 @@ GTNODE(JCC , GenTreeCC ,0,(GTK_LEAF|GTK_NOVALUE)) // Che // by GenTreeCC::gtCondition is true. GTNODE(SETCC , GenTreeCC ,0,GTK_LEAF) // Checks the condition flags and produces 1 if the condition specified // by GenTreeCC::gtCondition is true and 0 otherwise. -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH GTNODE(BT , GenTreeOp ,0,(GTK_BINOP|GTK_NOVALUE)) // The XARCH BT instruction. Like CMP, this sets the condition flags (CF // to be precise) and does not produce a value. #endif @@ -288,7 +288,7 @@ GTNODE(PHYSREG , GenTreePhysReg ,0,GTK_LEAF) GTNODE(EMITNOP , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // emitter-placed nop GTNODE(PINVOKE_PROLOG , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // pinvoke prolog seq GTNODE(PINVOKE_EPILOG , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // pinvoke epilog seq -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) GTNODE(PUTARG_REG , GenTreeMultiRegOp ,0,GTK_UNOP) // operator that places outgoing arg in register #else GTNODE(PUTARG_REG , GenTreeOp ,0,GTK_UNOP) // operator that places outgoing arg in register diff --git a/src/coreclr/src/jit/gtstructs.h b/src/coreclr/src/jit/gtstructs.h index e10087ea082e8..70c409b3b2e19 100644 --- a/src/coreclr/src/jit/gtstructs.h +++ b/src/coreclr/src/jit/gtstructs.h @@ -119,9 +119,9 @@ GTSTRUCT_1(HWIntrinsic , GT_HWINTRINSIC) GTSTRUCT_1(AllocObj , GT_ALLOCOBJ) GTSTRUCT_1(RuntimeLookup, GT_RUNTIMELOOKUP) GTSTRUCT_2(CC , GT_JCC, GT_SETCC) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) GTSTRUCT_1(MultiRegOp , GT_MUL_LONG) -#elif defined (_TARGET_ARM_) +#elif defined (TARGET_ARM) GTSTRUCT_3(MultiRegOp , GT_MUL_LONG, GT_PUTARG_REG, GT_BITCAST) #endif /*****************************************************************************/ diff --git a/src/coreclr/src/jit/hashbv.h b/src/coreclr/src/jit/hashbv.h index b07b3d89c8de2..9ebcb247b2435 100644 --- a/src/coreclr/src/jit/hashbv.h +++ b/src/coreclr/src/jit/hashbv.h @@ -24,7 +24,7 @@ #define ELEMENTS_PER_NODE (1 << LOG2_ELEMENTS_PER_NODE) #define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 typedef unsigned __int64 elemType; typedef unsigned __int64 indexType; #else diff --git a/src/coreclr/src/jit/hwintrinsic.cpp b/src/coreclr/src/jit/hwintrinsic.cpp index 984aca2b839bb..1fb7d8d4ef6ba 100644 --- a/src/coreclr/src/jit/hwintrinsic.cpp +++ b/src/coreclr/src/jit/hwintrinsic.cpp @@ -9,11 +9,11 @@ static const HWIntrinsicInfo hwIntrinsicInfoArray[] = { // clang-format off -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #define HARDWARE_INTRINSIC(id, name, isa, ival, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \ {NI_##id, name, InstructionSet_##isa, ival, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast(flag)}, #include "hwintrinsiclistxarch.h" -#elif defined (_TARGET_ARM64_) +#elif defined (TARGET_ARM64) #define HARDWARE_INTRINSIC(isa, name, ival, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \ {NI_##isa##_##name, #name, InstructionSet_##isa, ival, static_cast(size), numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, static_cast(flag)}, #include "hwintrinsiclistarm64.h" @@ -115,7 +115,7 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, va assert(!"Didn't find a class handle for simdType"); } } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH else if (simdType == TYP_SIMD32) { switch (simdBaseType) @@ -144,8 +144,8 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, va assert(!"Didn't find a class handle for simdType"); } } -#endif // _TARGET_XARCH_ -#ifdef _TARGET_ARM64_ +#endif // TARGET_XARCH +#ifdef TARGET_ARM64 else if (simdType == TYP_SIMD8) { switch (simdBaseType) @@ -168,7 +168,7 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, va assert(!"Didn't find a class handle for simdType"); } } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 return NO_CLASS_HANDLE; } @@ -436,7 +436,7 @@ GenTree* Compiler::addRangeCheckIfNeeded(NamedIntrinsic intrinsic, GenTree* immO // AVX2 Gather intrinsics no not need the range-check // because their imm-parameter have discrete valid values that are handle by managed code if (mustExpand && !HWIntrinsicInfo::HasFullRangeImm(intrinsic) && HWIntrinsicInfo::isImmOp(intrinsic, immOp) -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH && !HWIntrinsicInfo::isAVX2GatherIntrinsic(intrinsic) #endif ) @@ -677,7 +677,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic, retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, baseType, simdSize); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (intrinsic == NI_AVX2_GatherVector128 || intrinsic == NI_AVX2_GatherVector256) { assert(varTypeIsSIMD(op2->TypeGet())); diff --git a/src/coreclr/src/jit/hwintrinsic.h b/src/coreclr/src/jit/hwintrinsic.h index 13406b8e25511..2931c51813e1e 100644 --- a/src/coreclr/src/jit/hwintrinsic.h +++ b/src/coreclr/src/jit/hwintrinsic.h @@ -141,7 +141,7 @@ struct HWIntrinsicInfo static bool isFullyImplementedIsa(InstructionSet isa); static bool isScalarIsa(InstructionSet isa); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH static bool isAVX2GatherIntrinsic(NamedIntrinsic id); #endif diff --git a/src/coreclr/src/jit/hwintrinsicxarch.cpp b/src/coreclr/src/jit/hwintrinsicxarch.cpp index c073dd5338440..9056733ff4781 100644 --- a/src/coreclr/src/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/src/jit/hwintrinsicxarch.cpp @@ -669,7 +669,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsLong(baseType)) { // TODO-XARCH-CQ: It may be beneficial to emit the movq @@ -677,7 +677,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, // works on 32-bit x86 systems. break; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (compSupports(InstructionSet_SSE2) || (compSupports(InstructionSet_SSE) && (baseType == TYP_FLOAT))) { @@ -728,7 +728,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsLong(baseType)) { // TODO-XARCH-CQ: It may be beneficial to emit the movq @@ -736,7 +736,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, // works on 32-bit x86 systems. break; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (compSupports(InstructionSet_AVX)) { diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp index 0cf34c35e7dfc..2679a816a3f48 100644 --- a/src/coreclr/src/jit/importer.cpp +++ b/src/coreclr/src/jit/importer.cpp @@ -1235,7 +1235,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, dest = lcl; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lcl->AsLclVarCommon()->GetLclNum(), src, structHnd); @@ -2268,9 +2268,9 @@ bool Compiler::impSpillStackEntry(unsigned level, // be catched when importing the destblock. We still allow int/byrefs and float/double differences. if ((genActualType(valTyp) != genActualType(dstTyp)) && !( -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) || -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp)))) { if (verNeedsVerification()) @@ -2964,7 +2964,7 @@ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) tree->gtType = TYP_I_IMPL; } } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF @@ -2975,7 +2975,7 @@ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } return tree; @@ -3487,7 +3487,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, *pIntrinsicID = intrinsicID; -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM genTreeOps interlockedOperator; #endif @@ -3496,7 +3496,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr) { // must be done regardless of DbgCode and MinOpts @@ -3549,7 +3549,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall); break; -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic // Note that CORINFO_INTRINSIC_InterlockedAdd32/64 are not actually used. @@ -3563,7 +3563,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, interlockedOperator = GT_XCHG; goto InterlockedBinOpCommon; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case CORINFO_INTRINSIC_InterlockedAdd64: case CORINFO_INTRINSIC_InterlockedXAdd64: interlockedOperator = GT_XADD; @@ -3571,7 +3571,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, case CORINFO_INTRINSIC_InterlockedXchg64: interlockedOperator = GT_XCHG; goto InterlockedBinOpCommon; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 InterlockedBinOpCommon: assert(callType != TYP_STRUCT); @@ -3594,7 +3594,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; -#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case CORINFO_INTRINSIC_MemoryBarrier: @@ -3605,10 +3605,10 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, retNode = op1; break; -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case CORINFO_INTRINSIC_InterlockedCmpXchg32: -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case CORINFO_INTRINSIC_InterlockedCmpXchg64: #endif { @@ -3626,7 +3626,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, retNode = node; break; } -#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case CORINFO_INTRINSIC_StringLength: op1 = impPopStack().val; @@ -4048,7 +4048,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, case NI_System_Math_FusedMultiplyAdd: case NI_System_MathF_FusedMultiplyAdd: { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (compSupports(InstructionSet_FMA)) { assert(varTypeIsFloating(callType)); @@ -4071,7 +4071,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callType, 16); } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH break; } #endif // FEATURE_HW_INTRINSICS @@ -4115,10 +4115,10 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; @@ -4183,7 +4183,7 @@ GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, op1 = nullptr; -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because @@ -4343,7 +4343,7 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) } } } -#if defined(_TARGET_XARCH_) // We currently only support BSWAP on x86 +#if defined(TARGET_XARCH) // We currently only support BSWAP on x86 else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) @@ -4351,7 +4351,7 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } -#endif // !defined(_TARGET_XARCH_) +#endif // !defined(TARGET_XARCH) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) @@ -4365,9 +4365,9 @@ NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) namespaceName += 25; const char* platformNamespaceName; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) platformNamespaceName = ".X86"; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform @@ -4704,7 +4704,7 @@ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logM // be turned off during importation). CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #ifdef DEBUG bool canSkipVerificationResult = @@ -4717,7 +4717,7 @@ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logM { tiIsVerifiableCode = FALSE; } -#endif //_TARGET_64BIT_ +#endif // TARGET_64BIT verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); @@ -4743,7 +4743,7 @@ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsH } break; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) @@ -4756,7 +4756,7 @@ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsH return typeInfo::nativeInt(); } break; -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: @@ -4828,12 +4828,12 @@ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructT return typeInfo(); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { @@ -5679,14 +5679,14 @@ void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsig { if (tiOp1.IsNumberType()) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // _TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { @@ -6438,7 +6438,7 @@ bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) return true; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. @@ -6474,7 +6474,7 @@ bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) return false; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT return true; } @@ -6686,7 +6686,7 @@ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) argsToReverse--; } -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif @@ -7146,7 +7146,7 @@ bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType, return true; } -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { @@ -7176,7 +7176,7 @@ bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType, { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } -#endif // _TARGET_AMD64_ || _TARGET_ARM64_ +#endif // TARGET_AMD64 || TARGET_ARM64 return false; } @@ -7251,7 +7251,7 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed, int cntPop = 0; OPCODE nextOpcode; -#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_) +#if !defined(FEATURE_CORECLR) && defined(TARGET_AMD64) do { nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); @@ -7262,7 +7262,7 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed, // one pop seen so far. #else nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); -#endif // !FEATURE_CORECLR && _TARGET_AMD64_ +#endif // !FEATURE_CORECLR && TARGET_AMD64 if (isCallPopAndRet) { @@ -7270,7 +7270,7 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed, *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1); } -#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_) +#if !defined(FEATURE_CORECLR) && defined(TARGET_AMD64) // Jit64 Compat: // Tail call IL pattern could be either of the following // 1) call/callvirt/calli + ret @@ -7278,7 +7278,7 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed, return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID))); #else return (nextOpcode == CEE_RET) && (cntPop == 0); -#endif // !FEATURE_CORECLR && _TARGET_AMD64_ +#endif // !FEATURE_CORECLR && TARGET_AMD64 } /***************************************************************************** @@ -7712,7 +7712,7 @@ var_types Compiler::impImportCall(OPCODE opcode, call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; @@ -7979,7 +7979,7 @@ var_types Compiler::impImportCall(OPCODE opcode, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (canTailCall) { canTailCall = false; @@ -8486,13 +8486,13 @@ var_types Compiler::impImportCall(OPCODE opcode, // Stack empty check for implicit tail calls. if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0)) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); -#else // _TARGET_64BIT_ +#else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); -#endif //!_TARGET_64BIT_ +#endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); @@ -9048,7 +9048,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer - Note that this method is only call for !_TARGET_X86_ + Note that this method is only call for !TARGET_X86 */ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd) @@ -9059,7 +9059,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #ifdef UNIX_AMD64_ABI // No VarArgs for CoreCLR on x64 Unix @@ -9095,7 +9095,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re assert(info.compRetNativeType != TYP_STRUCT); #endif // !UNIX_AMD64_ABI -#elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_) +#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { @@ -9129,7 +9129,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re return impAssignMultiRegTypeToVar(op, retClsHnd); } -#elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_) +#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd)) @@ -9622,14 +9622,14 @@ void Compiler::impImportLeave(BasicBlock* block) // exit) returns to this block step->bbJumpDest->bbRefs++; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->setBBWeight(block->bbWeight); @@ -9767,14 +9767,14 @@ void Compiler::impImportLeave(BasicBlock* block) // finally in the chain) step->bbJumpDest->bbRefs++; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->setBBWeight(block->bbWeight); @@ -9872,13 +9872,13 @@ void Compiler::impImportLeave(BasicBlock* block) step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->setBBWeight(block->bbWeight); @@ -9929,14 +9929,14 @@ void Compiler::impImportLeave(BasicBlock* block) { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) @@ -10167,13 +10167,13 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT type = TYP_I_IMPL; } @@ -10182,13 +10182,13 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT type = TYP_BYREF; } @@ -10204,7 +10204,7 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) @@ -10218,11 +10218,11 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT type = TYP_BYREF; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); @@ -10254,7 +10254,7 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr type = TYP_LONG; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT else { // int + int => gives an int @@ -11190,14 +11190,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = impImplicitIorI4Cast(op1, lclTyp); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { assert(!tiVerificationNeeded); // We should have thrown the VerificationException before. op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( @@ -11745,7 +11745,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { tiRetVal = verGetArrayElemType(tiArray); typeInfo arrayElemTi = typeInfo(lclTyp); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (opcode == CEE_LDELEM_I) { arrayElemTi = typeInfo::nativeInt(); @@ -11756,7 +11756,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array"); } else -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array"); } @@ -11956,12 +11956,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) // As per ECMA 'index' specified can be either int32 or native int. Verify(tiIndex.IsIntOrNativeIntType(), "bad index"); typeInfo arrayElem = typeInfo(lclTyp); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (opcode == CEE_STELEM_I) { arrayElem = typeInfo::nativeInt(); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem), "bad array"); @@ -12156,12 +12156,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) tiRetVal = tiOp1; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (tiOp2.IsNativeIntType()) { tiRetVal = tiOp2; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } op2 = impPopStack().val; @@ -12218,7 +12218,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); @@ -12496,7 +12496,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = impPopStack().val; op1 = impPopStack().val; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); @@ -12505,7 +12505,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) || @@ -12596,7 +12596,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = impPopStack().val; op1 = impPopStack().val; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); @@ -12605,7 +12605,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) || @@ -12841,7 +12841,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) const typeInfo& tiVal = impStackTop().seTypeInfo; Verify(tiVal.IsNumberType(), "bad arg"); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT bool isNative = false; switch (opcode) @@ -12862,7 +12862,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) tiRetVal = typeInfo::nativeInt(); } else -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { tiRetVal = typeInfo(lclTyp).NormaliseForStack(); } @@ -12874,7 +12874,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double @@ -13131,12 +13131,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (tiVerificationNeeded) { typeInfo instrType(lclTyp); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (opcode == CEE_STIND_I) { instrType = typeInfo::nativeInt(); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType); } else @@ -13156,7 +13156,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = impImplicitR4orR8Cast(op2, lclTyp); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { @@ -13179,7 +13179,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { @@ -13275,12 +13275,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (tiVerificationNeeded) { typeInfo lclTiType(lclTyp); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (opcode == CEE_LDIND_I) { lclTiType = typeInfo::nativeInt(); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType); tiRetVal.NormaliseForStack(); } @@ -13294,7 +13294,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) @@ -14230,7 +14230,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) break; case CORINFO_FIELD_STATIC_TLS: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ @@ -14538,7 +14538,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) break; case CORINFO_FIELD_STATIC_TLS: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ @@ -14605,7 +14605,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) */ CLANG_FORMAT_COMMENT_ANCHOR; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) @@ -14620,7 +14620,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } #endif -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { @@ -14750,7 +14750,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) @@ -14765,7 +14765,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT #ifdef FEATURE_READYTORUN_COMPILER if (opts.IsReadyToRun()) @@ -16170,7 +16170,7 @@ void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the @@ -16208,7 +16208,7 @@ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO } } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM //------------------------------------------------------------------------ // impAssignSmallStructTypeToVar: ensure calls that return small structs whose @@ -16585,8 +16585,8 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) (unsigned)CHECK_SPILL_ALL); } -#if defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI) -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) +#if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. @@ -16612,7 +16612,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { if (!impInlineInfo->retExpr) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. @@ -16627,7 +16627,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } else -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); @@ -16651,7 +16651,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } else -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); @@ -16696,13 +16696,13 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); -#else // !defined(_TARGET_AMD64_) +#else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. @@ -16715,7 +16715,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } -#endif // !defined(_TARGET_AMD64_) +#endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { @@ -16737,14 +16737,14 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) // We must have imported a tailcall and jumped to RET if (isTailCall) { -#if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_) +#if defined(FEATURE_CORECLR) || !defined(TARGET_AMD64) // Jit64 compat: // This cannot be asserted on Amd64 since we permit the following IL pattern: // tail.call // pop // ret assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); -#endif // FEATURE_CORECLR || !_TARGET_AMD64_ +#endif // FEATURE_CORECLR || !TARGET_AMD64 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES @@ -17202,7 +17202,7 @@ void Compiler::impImportBlock(BasicBlock* block) markImport = true; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr && @@ -17262,7 +17262,7 @@ void Compiler::impImportBlock(BasicBlock* block) verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { @@ -18093,7 +18093,7 @@ void Compiler::impImport(BasicBlock* method) if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure // method for further explanation on why we raise this exception instead of making the jitted @@ -18103,7 +18103,7 @@ void Compiler::impImport(BasicBlock* method) BADCODE("Basic block marked as not verifiable"); } else -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); @@ -19005,7 +19005,7 @@ void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) assert(inlArgNode->OperIsConst()); } } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening @@ -19023,7 +19023,7 @@ void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) assert(inlArgNode->OperIsConst()); } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } } } @@ -19834,7 +19834,7 @@ void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) switch (intrinsicId) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 @@ -19858,7 +19858,7 @@ bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId) default: return false; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) switch (intrinsicId) { case CORINFO_INTRINSIC_Sqrt: @@ -19871,7 +19871,7 @@ bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId) default: return false; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) switch (intrinsicId) { case CORINFO_INTRINSIC_Sqrt: diff --git a/src/coreclr/src/jit/instr.cpp b/src/coreclr/src/jit/instr.cpp index 40005b944268e..0b31b8a18586b 100644 --- a/src/coreclr/src/jit/instr.cpp +++ b/src/coreclr/src/jit/instr.cpp @@ -36,7 +36,7 @@ const char* CodeGen::genInsName(instruction ins) static const char * const insNames[] = { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #define INST0(id, nm, um, mr, flags) nm, #define INST1(id, nm, um, mr, flags) nm, #define INST2(id, nm, um, mr, mi, flags) nm, @@ -45,7 +45,7 @@ const char* CodeGen::genInsName(instruction ins) #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) nm, #include "instrs.h" -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #define INST1(id, nm, fp, ldst, fmt, e1 ) nm, #define INST2(id, nm, fp, ldst, fmt, e1, e2 ) nm, #define INST3(id, nm, fp, ldst, fmt, e1, e2, e3 ) nm, @@ -56,7 +56,7 @@ const char* CodeGen::genInsName(instruction ins) #define INST9(id, nm, fp, ldst, fmt, e1, e2, e3, e4, e5, e6, e7, e8, e9 ) nm, #include "instrs.h" -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define INST1(id, nm, fp, ldst, fmt, e1 ) nm, #define INST2(id, nm, fp, ldst, fmt, e1, e2 ) nm, #define INST3(id, nm, fp, ldst, fmt, e1, e2, e3 ) nm, @@ -193,7 +193,7 @@ void CodeGen::instGen(instruction ins) GetEmitter()->emitIns(ins); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // A workaround necessitated by limitations of emitter // if we are scheduled to insert a nop here, we have to delay it // hopefully we have not missed any other prefix instructions or places @@ -215,14 +215,14 @@ bool CodeGenInterface::instIsFP(instruction ins) { assert((unsigned)ins < _countof(instInfo)); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH return (instInfo[ins] & INS_FLAGS_x87Instr) != 0; #else return (instInfo[ins] & INST_FP) != 0; #endif } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH /***************************************************************************** * * Generate a multi-byte NOP instruction. @@ -268,7 +268,7 @@ void CodeGen::inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock) void CodeGen::inst_SET(emitJumpKind condition, regNumber reg) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH instruction ins; /* Convert the condition to an instruction opcode */ @@ -330,7 +330,7 @@ void CodeGen::inst_SET(emitJumpKind condition, regNumber reg) // These instructions only write the low byte of 'reg' GetEmitter()->emitIns_R(ins, EA_1BYTE, reg); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) insCond cond; /* Convert the condition to an insCond value */ switch (condition) @@ -423,7 +423,7 @@ void CodeGen::inst_RV_RV(instruction ins, size = emitActualTypeSize(type); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM GetEmitter()->emitIns_R_R(ins, size, reg1, reg2, flags); #else GetEmitter()->emitIns_R_R(ins, size, reg1, reg2); @@ -442,9 +442,9 @@ void CodeGen::inst_RV_RV_RV(instruction ins, emitAttr size, insFlags flags /* = INS_FLAGS_DONT_CARE */) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM GetEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3, flags); -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) GetEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3); #else NYI("inst_RV_RV_RV"); @@ -495,11 +495,11 @@ void CodeGen::inst_set_SV_var(GenTree* tree) void CodeGen::inst_RV_IV( instruction ins, regNumber reg, target_ssize_t val, emitAttr size, insFlags flags /* = INS_FLAGS_DONT_CARE */) { -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) assert(size != EA_8BYTE); #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (arm_Valid_Imm_For_Instr(ins, val, flags)) { GetEmitter()->emitIns_R_I(ins, size, reg, val, flags); @@ -513,15 +513,15 @@ void CodeGen::inst_RV_IV( // TODO-Cleanup: Add a comment about why this is unreached() for RyuJIT backend. unreached(); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // TODO-Arm64-Bug: handle large constants! // Probably need something like the ARM case above: if (arm_Valid_Imm_For_Instr(ins, val)) ... assert(ins != INS_cmp); assert(ins != INS_tst); assert(ins != INS_mov); GetEmitter()->emitIns_R_R_I(ins, size, reg, reg, val); -#else // !_TARGET_ARM_ -#ifdef _TARGET_AMD64_ +#else // !TARGET_ARM +#ifdef TARGET_AMD64 // Instead of an 8-byte immediate load, a 4-byte immediate will do fine // as the high 4 bytes will be zero anyway. if (size == EA_8BYTE && ins == INS_mov && ((val & 0xFFFFFFFF00000000LL) == 0)) @@ -534,11 +534,11 @@ void CodeGen::inst_RV_IV( assert(!"Invalid immediate for inst_RV_IV"); } else -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 { GetEmitter()->emitIns_R_I(ins, size, reg, val); } -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM } /***************************************************************************** @@ -626,7 +626,7 @@ void CodeGen::inst_TT(instruction ins, GenTree* tree, unsigned offs, int shfv, e } break; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case GT_CNS_INT: // We will get here for GT_MKREFANY from CodeGen::genPushArgList assert(offs == 0); @@ -712,7 +712,7 @@ void CodeGen::inst_RV_TT(instruction ins, } } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH #ifdef DEBUG // If it is a GC type and the result is not, then either // 1) it is an LEA @@ -733,7 +733,7 @@ void CodeGen::inst_RV_TT(instruction ins, #if CPU_LOAD_STORE_ARCH if (ins == INS_mov) { -#if defined(_TARGET_ARM64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) || defined(TARGET_ARM64) ins = ins_Move_Extend(tree->TypeGet(), false); #else NYI("CodeGen::inst_RV_TT with INS_mov"); @@ -769,7 +769,7 @@ void CodeGen::inst_RV_TT(instruction ins, varNum = tree->AsLclVarCommon()->GetLclNum(); assert(varNum < compiler->lvaCount); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM switch (ins) { case INS_mov: @@ -797,10 +797,10 @@ void CodeGen::inst_RV_TT(instruction ins, regSet.verifyRegUsed(regTmp); return; } -#else // !_TARGET_ARM_ +#else // !TARGET_ARM GetEmitter()->emitIns_R_S(ins, size, reg, varNum, offs); return; -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM case GT_CLS_VAR: // Make sure FP instruction size matches the operand size @@ -838,9 +838,9 @@ void CodeGen::inst_RV_TT(instruction ins, assert(size == EA_4BYTE || size == EA_8BYTE); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 assert(offs == 0); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 target_ssize_t constVal; emitAttr size; @@ -875,16 +875,16 @@ void CodeGen::inst_RV_TT(instruction ins, void CodeGen::inst_RV_SH( instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags /* = INS_FLAGS_DONT_CARE */) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (val >= 32) val &= 0x1f; GetEmitter()->emitIns_R_I(ins, size, reg, val, flags); -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // X64 JB BE insures only encodable values make it here. // x86 can encode 8 bits, though it masks down to 5 or 6 // depending on 32-bit or 64-bit registers are used. @@ -915,7 +915,7 @@ void CodeGen::inst_RV_SH( void CodeGen::inst_TT_SH(instruction ins, GenTree* tree, unsigned val, unsigned offs) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (val == 0) { // Shift by 0 - why are you wasting our precious time???? @@ -931,9 +931,9 @@ void CodeGen::inst_TT_SH(instruction ins, GenTree* tree, unsigned val, unsigned { inst_TT(ins, tree, offs, val, emitTypeSize(tree->TypeGet())); } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM inst_TT(ins, tree, offs, val, emitTypeSize(tree->TypeGet())); #endif } @@ -953,7 +953,7 @@ void CodeGen::inst_TT_CL(instruction ins, GenTree* tree, unsigned offs) * Generate an instruction of the form "op reg1, reg2, icon". */ -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) void CodeGen::inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival) { assert(ins == INS_shld || ins == INS_shrd || ins == INS_shufps || ins == INS_shufpd || ins == INS_pshufd || @@ -1238,7 +1238,7 @@ void CodeGen::inst_RV_RV_TT( GetEmitter()->emitIns_SIMD_R_R_R(ins, size, targetReg, op1Reg, op2Reg); } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH /***************************************************************************** * @@ -1249,7 +1249,7 @@ void CodeGen::inst_RV_RV_TT( void CodeGen::inst_RV_RR(instruction ins, emitAttr size, regNumber reg1, regNumber reg2) { assert(size == EA_1BYTE || size == EA_2BYTE); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH assert(ins == INS_movsx || ins == INS_movzx); assert(size != EA_1BYTE || (genRegMask(reg2) & RBM_BYTE_REGS)); #endif @@ -1314,7 +1314,7 @@ void CodeGen::inst_RV_ST(instruction ins, regNumber reg, TempDsc* tmp, unsigned size = emitActualTypeSize(type); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM switch (ins) { case INS_mov: @@ -1336,9 +1336,9 @@ void CodeGen::inst_RV_ST(instruction ins, regNumber reg, TempDsc* tmp, unsigned assert(!"Default inst_RV_ST case not supported for Arm"); break; } -#else // !_TARGET_ARM_ +#else // !TARGET_ARM GetEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs); -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM } void CodeGen::inst_mov_RV_ST(regNumber reg, GenTree* tree) @@ -1361,14 +1361,14 @@ void CodeGen::inst_mov_RV_ST(regNumber reg, GenTree* tree) inst_RV_TT(loadIns, reg, tree); } } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH void CodeGen::inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs) { GetEmitter()->emitIns_S(ins, size, tmp->tdTempNum(), ofs); } #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool CodeGenInterface::validImmForInstr(instruction ins, target_ssize_t imm, insFlags flags) { if (GetEmitter()->emitInsIsLoadOrStore(ins) && !instIsFP(ins)) @@ -1535,9 +1535,9 @@ bool CodeGen::ins_Writes_Dest(instruction ins) return true; } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) bool CodeGenInterface::validImmForBL(ssize_t addr) { // On arm64, we always assume a call target is in range and generate a 28-bit relative @@ -1546,7 +1546,7 @@ bool CodeGenInterface::validImmForBL(ssize_t addr) // (for JIT) or zapinfo.cpp (for NGEN). If we cannot allocate a jump stub, it is fatal. return true; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 /***************************************************************************** * @@ -1562,7 +1562,7 @@ instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg) if (varTypeIsSIMD(srcType)) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // SSE2/AVX requires destination to be a reg always. // If src is in reg means, it is a reg-reg move. // @@ -1573,14 +1573,14 @@ instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg) // TODO-CQ: based on whether src type is aligned use movaps instead return (srcInReg) ? INS_movaps : INS_movups; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return (srcInReg) ? INS_mov : ins_Load(srcType); -#else // !defined(_TARGET_ARM64_) && !defined(_TARGET_XARCH_) +#else // !defined(TARGET_ARM64) && !defined(TARGET_XARCH) assert(!"unhandled SIMD type"); -#endif // !defined(_TARGET_ARM64_) && !defined(_TARGET_XARCH_) +#endif // !defined(TARGET_ARM64) && !defined(TARGET_XARCH) } -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (varTypeIsFloating(srcType)) { if (srcType == TYP_DOUBLE) @@ -1596,14 +1596,14 @@ instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg) assert(!"unhandled floating type"); } } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if (varTypeIsFloating(srcType)) return INS_vmov; #else assert(!varTypeIsFloating(srcType)); #endif -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (!varTypeIsSmall(srcType)) { ins = INS_mov; @@ -1616,7 +1616,7 @@ instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg) { ins = INS_movsx; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // // Register to Register zero/sign extend operation // @@ -1645,7 +1645,7 @@ instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg) { ins = ins_Load(srcType); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // // Register to Register zero/sign extend operation // @@ -1717,7 +1717,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false* if (varTypeIsSIMD(srcType)) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #ifdef FEATURE_SIMD if (srcType == TYP_SIMD8) { @@ -1736,7 +1736,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false* // latter. return (aligned) ? INS_movaps : INS_movups; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return INS_ldr; #else assert(!"ins_Load with SIMD type"); @@ -1745,7 +1745,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false* if (varTypeIsFloating(srcType)) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (srcType == TYP_DOUBLE) { return INS_movsdsse2; @@ -1758,16 +1758,16 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false* { assert(!"unhandled floating type"); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return INS_ldr; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return INS_vldr; #else assert(!varTypeIsFloating(srcType)); #endif } -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (!varTypeIsSmall(srcType)) { ins = INS_mov; @@ -1781,7 +1781,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false* ins = INS_movsx; } -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) if (!varTypeIsSmall(srcType)) { ins = INS_ldr; @@ -1817,7 +1817,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false* */ instruction CodeGen::ins_Copy(var_types dstType) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (varTypeIsSIMD(dstType)) { return INS_movaps; @@ -1831,7 +1831,7 @@ instruction CodeGen::ins_Copy(var_types dstType) { return INS_mov; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (varTypeIsFloating(dstType)) { return INS_fmov; @@ -1840,7 +1840,7 @@ instruction CodeGen::ins_Copy(var_types dstType) { return INS_mov; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) assert(!varTypeIsSIMD(dstType)); if (varTypeIsFloating(dstType)) { @@ -1850,7 +1850,7 @@ instruction CodeGen::ins_Copy(var_types dstType) { return INS_mov; } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) assert(!varTypeIsSIMD(dstType)); assert(!varTypeIsFloating(dstType)); return INS_mov; @@ -1871,7 +1871,7 @@ instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false { instruction ins = INS_invalid; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (varTypeIsSIMD(dstType)) { #ifdef FEATURE_SIMD @@ -1908,13 +1908,13 @@ instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false assert(!"unhandled floating type"); } } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (varTypeIsSIMD(dstType) || varTypeIsFloating(dstType)) { // All sizes of SIMD and FP instructions use INS_str return INS_str; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) assert(!varTypeIsSIMD(dstType)); if (varTypeIsFloating(dstType)) { @@ -1925,9 +1925,9 @@ instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false assert(!varTypeIsFloating(dstType)); #endif -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) ins = INS_mov; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) if (!varTypeIsSmall(dstType)) ins = INS_str; else if (varTypeIsByte(dstType)) @@ -1942,7 +1942,7 @@ instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false return ins; } -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) bool CodeGen::isMoveIns(instruction ins) { @@ -1978,10 +1978,10 @@ instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType) // On SSE2/AVX - the same instruction is used for moving double/quad word to XMM/YMM register. assert((srcType == TYP_INT) || (srcType == TYP_UINT) || (srcType == TYP_LONG) || (srcType == TYP_ULONG)); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) // No 64-bit registers on x86. assert((srcType != TYP_LONG) && (srcType != TYP_ULONG)); -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) return INS_mov_i2xmm; } @@ -1991,10 +1991,10 @@ instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType) // On SSE2/AVX - the same instruction is used for moving double/quad word of XMM/YMM to an integer register. assert((dstType == TYP_INT) || (dstType == TYP_UINT) || (dstType == TYP_LONG) || (dstType == TYP_ULONG)); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) // No 64-bit registers on x86. assert((dstType != TYP_LONG) && (dstType != TYP_ULONG)); -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) return INS_mov_xmm2i; } @@ -2095,7 +2095,7 @@ instruction CodeGen::ins_FloatConv(var_types to, var_types from) } } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) bool CodeGen::isMoveIns(instruction ins) { @@ -2255,7 +2255,7 @@ instruction CodeGen::ins_FloatConv(var_types to, var_types from) } } -#endif // #elif defined(_TARGET_ARM_) +#endif // #elif defined(TARGET_ARM) /***************************************************************************** * @@ -2263,7 +2263,7 @@ instruction CodeGen::ins_FloatConv(var_types to, var_types from) */ void CodeGen::instGen_Return(unsigned stkArgSize) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (stkArgSize == 0) { instGen(INS_ret); @@ -2272,7 +2272,7 @@ void CodeGen::instGen_Return(unsigned stkArgSize) { inst_IV(INS_ret, stkArgSize); } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // // The return on ARM is folded into the pop multiple instruction // and as we do not know the exact set of registers that we will @@ -2280,7 +2280,7 @@ void CodeGen::instGen_Return(unsigned stkArgSize) // instead just not emit anything for this method on the ARM // The return will be part of the pop multiple and that will be // part of the epilog that is generated by genFnEpilog() -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // This function shouldn't be used on ARM64. unreached(); #else @@ -2295,7 +2295,7 @@ void CodeGen::instGen_Return(unsigned stkArgSize) * Note: all MemoryBarriers instructions can be removed by * SET COMPlus_JitNoMemoryBarriers=1 */ -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 void CodeGen::instGen_MemoryBarrier(insBarrier barrierType) #else void CodeGen::instGen_MemoryBarrier() @@ -2308,12 +2308,12 @@ void CodeGen::instGen_MemoryBarrier() } #endif // DEBUG -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) instGen(INS_lock); GetEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) GetEmitter()->emitIns_I(INS_dmb, EA_4BYTE, 0xf); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) GetEmitter()->emitIns_BARR(INS_dmb, barrierType); #else #error "Unknown _TARGET_" @@ -2326,9 +2326,9 @@ void CodeGen::instGen_MemoryBarrier() */ void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) GetEmitter()->emitIns_R_R(INS_xor, size, reg, reg); -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) GetEmitter()->emitIns_R_I(INS_mov, size, reg, 0 ARM_ARG(flags)); #else #error "Unknown _TARGET_" @@ -2343,9 +2343,9 @@ void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags fla */ void CodeGen::instGen_Compare_Reg_To_Zero(emitAttr size, regNumber reg) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) GetEmitter()->emitIns_R_R(INS_test, size, reg, reg); -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) GetEmitter()->emitIns_R_I(INS_cmp, size, reg, 0); #else #error "Unknown _TARGET_" @@ -2359,7 +2359,7 @@ void CodeGen::instGen_Compare_Reg_To_Zero(emitAttr size, regNumber reg) */ void CodeGen::instGen_Compare_Reg_To_Reg(emitAttr size, regNumber reg1, regNumber reg2) { -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_) +#if defined(TARGET_XARCH) || defined(TARGET_ARMARCH) GetEmitter()->emitIns_R_R(INS_cmp, size, reg1, reg2); #else #error "Unknown _TARGET_" @@ -2379,18 +2379,18 @@ void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size, regNumber reg, target_ss } else { -#if defined(_TARGET_XARCH_) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_XARCH) +#if defined(TARGET_AMD64) if ((EA_SIZE(size) == EA_8BYTE) && (((int)imm != (ssize_t)imm) || EA_IS_CNS_RELOC(size))) { assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm"); } else -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 { GetEmitter()->emitIns_R_I(INS_cmp, size, reg, imm); } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if (arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm)) { GetEmitter()->emitIns_R_I(INS_cmp, size, reg, imm); @@ -2399,7 +2399,7 @@ void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size, regNumber reg, target_ss { assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm"); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (true) // TODO-ARM64-NYI: arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm)) { GetEmitter()->emitIns_R_I(INS_cmp, size, reg, imm); diff --git a/src/coreclr/src/jit/instr.h b/src/coreclr/src/jit/instr.h index fdde64838672b..0de79a4cc1387 100644 --- a/src/coreclr/src/jit/instr.h +++ b/src/coreclr/src/jit/instr.h @@ -14,7 +14,7 @@ // clang-format off enum instruction : unsigned { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #define INST0(id, nm, um, mr, flags) INS_##id, #define INST1(id, nm, um, mr, flags) INS_##id, #define INST2(id, nm, um, mr, mi, flags) INS_##id, @@ -23,7 +23,7 @@ enum instruction : unsigned #define INST5(id, nm, um, mr, mi, rm, a4, rr, flags) INS_##id, #include "instrs.h" -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #define INST1(id, nm, fp, ldst, fmt, e1 ) INS_##id, #define INST2(id, nm, fp, ldst, fmt, e1, e2 ) INS_##id, #define INST3(id, nm, fp, ldst, fmt, e1, e2, e3 ) INS_##id, @@ -36,7 +36,7 @@ enum instruction : unsigned INS_lea, // Not a real instruction. It is used for load the address of stack locals -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define INST1(id, nm, fp, ldst, fmt, e1 ) INS_##id, #define INST2(id, nm, fp, ldst, fmt, e1, e2 ) INS_##id, #define INST3(id, nm, fp, ldst, fmt, e1, e2, e3 ) INS_##id, @@ -86,7 +86,7 @@ enum GCtype : unsigned GCT_BYREF }; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) enum insFlags: uint8_t { INS_FLAGS_None = 0x00, @@ -96,11 +96,11 @@ enum insFlags: uint8_t INS_Flags_IsDstDstSrcAVXInstruction = 0x08, INS_Flags_IsDstSrcSrcAVXInstruction = 0x10, - // TODO-Cleanup: Remove this flag and its usage from _TARGET_XARCH_ + // TODO-Cleanup: Remove this flag and its usage from TARGET_XARCH INS_FLAGS_DONT_CARE = 0x00, }; -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) -// TODO-Cleanup: Move 'insFlags' under _TARGET_ARM_ +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) +// TODO-Cleanup: Move 'insFlags' under TARGET_ARM enum insFlags: unsigned { INS_FLAGS_NOT_SET = 0x00, @@ -111,7 +111,7 @@ enum insFlags: unsigned #error Unsupported target architecture #endif -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) enum insOpts: unsigned { INS_OPTS_NONE, @@ -124,7 +124,7 @@ enum insOpts: unsigned INS_OPTS_ASR, INS_OPTS_ROR }; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) enum insOpts : unsigned { INS_OPTS_NONE, @@ -257,7 +257,7 @@ enum emitAttr : unsigned EA_32BYTE = 0x020, EA_SIZE_MASK = 0x03F, -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT EA_PTRSIZE = EA_8BYTE, #else EA_PTRSIZE = EA_4BYTE, @@ -295,7 +295,7 @@ enum emitAttr : unsigned enum InstructionSet { InstructionSet_ILLEGAL = 0, -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH InstructionSet_Vector128, InstructionSet_Vector256, // Start linear order SIMD instruction sets @@ -324,9 +324,9 @@ enum InstructionSet InstructionSet_SSE2_X64, InstructionSet_SSE41_X64, InstructionSet_SSE42_X64, -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) InstructionSet_NEON, -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) InstructionSet_AdvSimd, // ID_AA64PFR0_EL1.AdvSIMD is 0 or better InstructionSet_AdvSimd_Arm64, InstructionSet_AdvSimd_Fp16, // ID_AA64PFR0_EL1.AdvSIMD is 1 or better diff --git a/src/coreclr/src/jit/instrs.h b/src/coreclr/src/jit/instrs.h index 2f5c14fc6f4a1..37b9fe3090b2c 100644 --- a/src/coreclr/src/jit/instrs.h +++ b/src/coreclr/src/jit/instrs.h @@ -2,11 +2,11 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include "instrsxarch.h" -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #include "instrsarm.h" -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #include "instrsarm64.h" #else #error Unsupported or unset target architecture diff --git a/src/coreclr/src/jit/instrsarm.h b/src/coreclr/src/jit/instrsarm.h index 6e786020079df..990414349a988 100644 --- a/src/coreclr/src/jit/instrsarm.h +++ b/src/coreclr/src/jit/instrsarm.h @@ -22,7 +22,7 @@ * ******************************************************************************/ -#if !defined(_TARGET_ARM_) +#if !defined(TARGET_ARM) #error Unexpected target type #endif diff --git a/src/coreclr/src/jit/instrsarm64.h b/src/coreclr/src/jit/instrsarm64.h index 2f030160e9161..6c8b196cba2b7 100644 --- a/src/coreclr/src/jit/instrsarm64.h +++ b/src/coreclr/src/jit/instrsarm64.h @@ -18,7 +18,7 @@ * ******************************************************************************/ -#if !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM64) #error Unexpected target type #endif diff --git a/src/coreclr/src/jit/instrsxarch.h b/src/coreclr/src/jit/instrsxarch.h index 33ece4497deb2..0bd795d8b3487 100644 --- a/src/coreclr/src/jit/instrsxarch.h +++ b/src/coreclr/src/jit/instrsxarch.h @@ -21,7 +21,7 @@ ******************************************************************************/ // clang-format off -#if !defined(_TARGET_XARCH_) +#if !defined(TARGET_XARCH) #error Unexpected target type #endif @@ -88,7 +88,7 @@ INST4(lea, "lea", IUM_WR, BAD_CODE, BAD_CODE, INST3(bt, "bt", IUM_RD, 0x0F00A3, BAD_CODE, 0x0F00A3, INS_FLAGS_WritesFlags) INST3(movsx, "movsx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00BE, INS_FLAGS_None) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 INST3(movsxd, "movsxd", IUM_WR, BAD_CODE, BAD_CODE, 0x4800000063, INS_FLAGS_None) #endif INST3(movzx, "movzx", IUM_WR, BAD_CODE, BAD_CODE, 0x0F00B6, INS_FLAGS_None) @@ -128,7 +128,7 @@ INSTMUL(imul_BP, "imul", IUM_RD, BAD_CODE, 0x002868, INSTMUL(imul_SI, "imul", IUM_RD, BAD_CODE, 0x003068, BAD_CODE, INS_FLAGS_WritesFlags) INSTMUL(imul_DI, "imul", IUM_RD, BAD_CODE, 0x003868, BAD_CODE, INS_FLAGS_WritesFlags) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 INSTMUL(imul_08, "imul", IUM_RD, BAD_CODE, 0x4400000068, BAD_CODE, INS_FLAGS_WritesFlags) INSTMUL(imul_09, "imul", IUM_RD, BAD_CODE, 0x4400000868, BAD_CODE, INS_FLAGS_WritesFlags) @@ -139,7 +139,7 @@ INSTMUL(imul_13, "imul", IUM_RD, BAD_CODE, 0x4400002868, INSTMUL(imul_14, "imul", IUM_RD, BAD_CODE, 0x4400003068, BAD_CODE, INS_FLAGS_WritesFlags) INSTMUL(imul_15, "imul", IUM_RD, BAD_CODE, 0x4400003868, BAD_CODE, INS_FLAGS_WritesFlags) -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // the hex codes in this file represent the instruction encoding as follows: // 0x0000ff00 - modrm byte position @@ -645,25 +645,25 @@ INST2(sar_N, "sar", IUM_RW, 0x0038C0, 0x0038C0, // id nm um mr flags INST1(r_movsb, "rep movsb", IUM_RD, 0x00A4F3, INS_FLAGS_None) INST1(r_movsd, "rep movsd", IUM_RD, 0x00A5F3, INS_FLAGS_None) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) INST1(r_movsq, "rep movsq", IUM_RD, 0xF3A548, INS_FLAGS_None) -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) INST1(movsb, "movsb", IUM_RD, 0x0000A4, INS_FLAGS_None) INST1(movsd, "movsd", IUM_RD, 0x0000A5, INS_FLAGS_None) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) INST1(movsq, "movsq", IUM_RD, 0x00A548, INS_FLAGS_None) -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) INST1(r_stosb, "rep stosb", IUM_RD, 0x00AAF3, INS_FLAGS_None) INST1(r_stosd, "rep stosd", IUM_RD, 0x00ABF3, INS_FLAGS_None) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) INST1(r_stosq, "rep stosq", IUM_RD, 0xF3AB48, INS_FLAGS_None) -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) INST1(stosb, "stosb", IUM_RD, 0x0000AA, INS_FLAGS_None) INST1(stosd, "stosd", IUM_RD, 0x0000AB, INS_FLAGS_None) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) INST1(stosq, "stosq", IUM_RD, 0x00AB48, INS_FLAGS_None) -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) INST1(int3, "int3", IUM_RD, 0x0000CC, INS_FLAGS_None) INST1(nop, "nop", IUM_RD, 0x000090, INS_FLAGS_None) @@ -691,7 +691,7 @@ INST1(shrd, "shrd", IUM_RW, 0x0F00AC, // For RyuJIT/x86, we follow the x86 calling convention that requires // us to return floating point value on the x87 FP stack, so we need // these instructions regardless of whether we're using full stack fp. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 INST1(fld, "fld", IUM_WR, 0x0000D9, INS_FLAGS_x87Instr) INST1(fstp, "fstp", IUM_WR, 0x0018D9, INS_FLAGS_x87Instr) #endif // _TARGET_X86 @@ -713,7 +713,7 @@ INST1(setge, "setge", IUM_WR, 0x0F009D, INST1(setle, "setle", IUM_WR, 0x0F009E, INS_FLAGS_ReadsFlags) INST1(setg, "setg", IUM_WR, 0x0F009F, INS_FLAGS_ReadsFlags) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // A jump with rex prefix. This is used for register indirect // tail calls. INST1(rex_jmp, "rex.jmp", IUM_RD, 0x0020FE, INS_FLAGS_None) diff --git a/src/coreclr/src/jit/jit.h b/src/coreclr/src/jit/jit.h index 95de1f9f5c83d..f43860c6934fb 100644 --- a/src/coreclr/src/jit/jit.h +++ b/src/coreclr/src/jit/jit.h @@ -52,192 +52,162 @@ #define CHECK_STRUCT_PADDING 0 // Never enable it for non-MSFT compilers #endif -#if defined(_X86_) -#if defined(_ARM_) -#error Cannot define both _X86_ and _ARM_ +#if defined(HOST_X86) +#if defined(HOST_ARM) +#error Cannot define both HOST_X86 and HOST_ARM #endif -#if defined(_AMD64_) -#error Cannot define both _X86_ and _AMD64_ +#if defined(HOST_AMD64) +#error Cannot define both HOST_X86 and HOST_AMD64 #endif -#if defined(_ARM64_) -#error Cannot define both _X86_ and _ARM64_ +#if defined(HOST_ARM64) +#error Cannot define both HOST_X86 and HOST_ARM64 #endif -#define _HOST_X86_ -#elif defined(_AMD64_) -#if defined(_X86_) -#error Cannot define both _AMD64_ and _X86_ +#elif defined(HOST_AMD64) +#if defined(HOST_X86) +#error Cannot define both HOST_AMD64 and HOST_X86 #endif -#if defined(_ARM_) -#error Cannot define both _AMD64_ and _ARM_ +#if defined(HOST_ARM) +#error Cannot define both HOST_AMD64 and HOST_ARM #endif -#if defined(_ARM64_) -#error Cannot define both _AMD64_ and _ARM64_ +#if defined(HOST_ARM64) +#error Cannot define both HOST_AMD64 and HOST_ARM64 #endif -#define _HOST_AMD64_ -#elif defined(_ARM_) -#if defined(_X86_) -#error Cannot define both _ARM_ and _X86_ +#elif defined(HOST_ARM) +#if defined(HOST_X86) +#error Cannot define both HOST_ARM and HOST_X86 #endif -#if defined(_AMD64_) -#error Cannot define both _ARM_ and _AMD64_ +#if defined(HOST_AMD64) +#error Cannot define both HOST_ARM and HOST_AMD64 #endif -#if defined(_ARM64_) -#error Cannot define both _ARM_ and _ARM64_ +#if defined(HOST_ARM64) +#error Cannot define both HOST_ARM and HOST_ARM64 #endif -#define _HOST_ARM_ -#elif defined(_ARM64_) -#if defined(_X86_) -#error Cannot define both _ARM64_ and _X86_ +#elif defined(HOST_ARM64) +#if defined(HOST_X86) +#error Cannot define both HOST_ARM64 and HOST_X86 #endif -#if defined(_AMD64_) -#error Cannot define both _ARM64_ and _AMD64_ +#if defined(HOST_AMD64) +#error Cannot define both HOST_ARM64 and HOST_AMD64 #endif -#if defined(_ARM_) -#error Cannot define both _ARM64_ and _ARM_ +#if defined(HOST_ARM) +#error Cannot define both HOST_ARM64 and HOST_ARM #endif -#define _HOST_ARM64_ #else #error Unsupported or unset host architecture #endif -#if defined(_HOST_AMD64_) || defined(_HOST_ARM64_) -#define _HOST_64BIT_ +#if defined(TARGET_X86) +#if defined(TARGET_ARM) +#error Cannot define both TARGET_X86 and TARGET_ARM #endif - -#if defined(_TARGET_X86_) -#if defined(_TARGET_ARM_) -#error Cannot define both _TARGET_X86_ and _TARGET_ARM_ -#endif -#if defined(_TARGET_AMD64_) -#error Cannot define both _TARGET_X86_ and _TARGET_AMD64_ +#if defined(TARGET_AMD64) +#error Cannot define both TARGET_X86 and TARGET_AMD64 #endif -#if defined(_TARGET_ARM64_) -#error Cannot define both _TARGET_X86_ and _TARGET_ARM64_ +#if defined(TARGET_ARM64) +#error Cannot define both TARGET_X86 and TARGET_ARM64 #endif -#if !defined(_HOST_X86_) +#if !defined(HOST_X86) #define _CROSS_COMPILER_ #endif -#elif defined(_TARGET_AMD64_) -#if defined(_TARGET_X86_) -#error Cannot define both _TARGET_AMD64_ and _TARGET_X86_ +#elif defined(TARGET_AMD64) +#if defined(TARGET_X86) +#error Cannot define both TARGET_AMD64 and TARGET_X86 #endif -#if defined(_TARGET_ARM_) -#error Cannot define both _TARGET_AMD64_ and _TARGET_ARM_ +#if defined(TARGET_ARM) +#error Cannot define both TARGET_AMD64 and TARGET_ARM #endif -#if defined(_TARGET_ARM64_) -#error Cannot define both _TARGET_AMD64_ and _TARGET_ARM64_ +#if defined(TARGET_ARM64) +#error Cannot define both TARGET_AMD64 and TARGET_ARM64 #endif -#if !defined(_HOST_AMD64_) +#if !defined(HOST_AMD64) #define _CROSS_COMPILER_ #endif -#elif defined(_TARGET_ARM_) -#if defined(_TARGET_X86_) -#error Cannot define both _TARGET_ARM_ and _TARGET_X86_ +#elif defined(TARGET_ARM) +#if defined(TARGET_X86) +#error Cannot define both TARGET_ARM and TARGET_X86 #endif -#if defined(_TARGET_AMD64_) -#error Cannot define both _TARGET_ARM_ and _TARGET_AMD64_ +#if defined(TARGET_AMD64) +#error Cannot define both TARGET_ARM and TARGET_AMD64 #endif -#if defined(_TARGET_ARM64_) -#error Cannot define both _TARGET_ARM_ and _TARGET_ARM64_ +#if defined(TARGET_ARM64) +#error Cannot define both TARGET_ARM and TARGET_ARM64 #endif -#if !defined(_HOST_ARM_) +#if !defined(HOST_ARM) #define _CROSS_COMPILER_ #endif -#elif defined(_TARGET_ARM64_) -#if defined(_TARGET_X86_) -#error Cannot define both _TARGET_ARM64_ and _TARGET_X86_ +#elif defined(TARGET_ARM64) +#if defined(TARGET_X86) +#error Cannot define both TARGET_ARM64 and TARGET_X86 #endif -#if defined(_TARGET_AMD64_) -#error Cannot define both _TARGET_ARM64_ and _TARGET_AMD64_ +#if defined(TARGET_AMD64) +#error Cannot define both TARGET_ARM64 and TARGET_AMD64 #endif -#if defined(_TARGET_ARM_) -#error Cannot define both _TARGET_ARM64_ and _TARGET_ARM_ +#if defined(TARGET_ARM) +#error Cannot define both TARGET_ARM64 and TARGET_ARM #endif -#if !defined(_HOST_ARM64_) +#if !defined(HOST_ARM64) #define _CROSS_COMPILER_ #endif #else #error Unsupported or unset target architecture #endif -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) -#ifndef _TARGET_64BIT_ -#define _TARGET_64BIT_ -#endif // _TARGET_64BIT_ -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#ifdef TARGET_64BIT +#ifdef TARGET_X86 +#error Cannot define both TARGET_X86 and TARGET_64BIT +#endif // TARGET_X86 +#ifdef TARGET_ARM +#error Cannot define both TARGET_ARM and TARGET_64BIT +#endif // TARGET_ARM +#endif // TARGET_64BIT -#ifdef _TARGET_64BIT_ -#ifdef _TARGET_X86_ -#error Cannot define both _TARGET_X86_ and _TARGET_64BIT_ -#endif // _TARGET_X86_ -#ifdef _TARGET_ARM_ -#error Cannot define both _TARGET_ARM_ and _TARGET_64BIT_ -#endif // _TARGET_ARM_ -#endif // _TARGET_64BIT_ - -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) -#define _TARGET_XARCH_ +#if defined(TARGET_X86) || defined(TARGET_AMD64) +#define TARGET_XARCH #endif -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) -#define _TARGET_ARMARCH_ +#if defined(TARGET_ARM) || defined(TARGET_ARM64) +#define TARGET_ARMARCH #endif -// If the UNIX_AMD64_ABI is defined make sure that _TARGET_AMD64_ is also defined. +// If the UNIX_AMD64_ABI is defined make sure that TARGET_AMD64 is also defined. #if defined(UNIX_AMD64_ABI) -#if !defined(_TARGET_AMD64_) -#error When UNIX_AMD64_ABI is defined you must define _TARGET_AMD64_ defined as well. +#if !defined(TARGET_AMD64) +#error When UNIX_AMD64_ABI is defined you must define TARGET_AMD64 defined as well. #endif #endif -// If the UNIX_X86_ABI is defined make sure that _TARGET_X86_ is also defined. +// If the UNIX_X86_ABI is defined make sure that TARGET_X86 is also defined. #if defined(UNIX_X86_ABI) -#if !defined(_TARGET_X86_) -#error When UNIX_X86_ABI is defined you must define _TARGET_X86_ defined as well. -#endif +#if !defined(TARGET_X86) +#error When UNIX_X86_ABI is defined you must define TARGET_X86 defined as well. #endif - -#if defined(PLATFORM_UNIX) -#define _HOST_UNIX_ #endif -// Are we generating code to target Unix? This is true if we will run on Unix (_HOST_UNIX_ is defined). -// It's also true if we are building an altjit targetting Unix, which we determine by checking if either -// UNIX_AMD64_ABI or UNIX_X86_ABI is defined. -#if defined(_HOST_UNIX_) || ((defined(UNIX_AMD64_ABI) || defined(UNIX_X86_ABI)) && defined(ALT_JIT)) -#define _TARGET_UNIX_ +#if (defined(ALT_JIT) && (defined(UNIX_AMD64_ABI) || defined(UNIX_X86_ABI)) && !defined(TARGET_UNIX)) +// If we are building an ALT_JIT targeting Unix, override the TARGET_ to TARGET_UNIX +#undef TARGET_WINDOWS +#define TARGET_UNIX #endif -#ifndef _TARGET_UNIX_ -#define _TARGET_WINDOWS_ -#endif // !_TARGET_UNIX_ - // -------------------------------------------------------------------------------- // IMAGE_FILE_MACHINE_TARGET // -------------------------------------------------------------------------------- -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_I386 -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_AMD64 -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_ARMNT -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_ARM64 // 0xAA64 #else #error Unsupported or unset target architecture #endif // Include the AMD64 unwind codes when appropriate. -#if defined(_TARGET_AMD64_) -// We need to temporarily set PLATFORM_UNIX, if necessary, to get the Unix-specific unwind codes. -#if defined(_TARGET_UNIX_) && !defined(_HOST_UNIX_) -#define PLATFORM_UNIX -#endif +#if defined(TARGET_AMD64) #include "win64unwind.h" -#if defined(_TARGET_UNIX_) && !defined(_HOST_UNIX_) -#undef PLATFORM_UNIX -#endif #endif #include "corhdr.h" @@ -276,7 +246,7 @@ #define UNIX_AMD64_ABI_ONLY(x) #endif // defined(UNIX_AMD64_ABI) -#if defined(UNIX_AMD64_ABI) || !defined(_TARGET_64BIT_) || defined(_TARGET_ARM64_) +#if defined(UNIX_AMD64_ABI) || !defined(TARGET_64BIT) || defined(TARGET_ARM64) #define FEATURE_PUT_STRUCT_ARG_STK 1 #define PUT_STRUCT_ARG_STK_ONLY_ARG(x) , x #define PUT_STRUCT_ARG_STK_ONLY(x) x @@ -293,7 +263,7 @@ #define UNIX_AMD64_ABI_ONLY(x) #endif // defined(UNIX_AMD64_ABI) -#if defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_) +#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) #define MULTIREG_HAS_SECOND_GC_RET 1 #define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) , x #define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x @@ -306,11 +276,11 @@ // Arm64 Windows supports FEATURE_ARG_SPLIT, note this is different from // the official Arm64 ABI. // Case: splitting 16 byte struct between x7 and stack -#if (defined(_TARGET_ARM_) || (defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_))) +#if (defined(TARGET_ARM) || (defined(TARGET_WINDOWS) && defined(TARGET_ARM64))) #define FEATURE_ARG_SPLIT 1 #else #define FEATURE_ARG_SPLIT 0 -#endif // (defined(_TARGET_ARM_) || (defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_))) +#endif // (defined(TARGET_ARM) || (defined(TARGET_WINDOWS) && defined(TARGET_ARM64))) // To get rid of warning 4701 : local variable may be used without being initialized #define DUMMY_INIT(x) (x) @@ -499,7 +469,7 @@ typedef ptrdiff_t ssize_t; #ifdef DEBUG #define MEASURE_CLRAPI_CALLS 0 // No point in measuring DEBUG code. #endif -#if !defined(_HOST_X86_) && !defined(_HOST_AMD64_) +#if !defined(HOST_X86) && !defined(HOST_AMD64) #define MEASURE_CLRAPI_CALLS 0 // Cycle counters only hooked up on x86/x64. #endif #if !defined(_MSC_VER) && !defined(__GNUC__) @@ -514,7 +484,7 @@ typedef ptrdiff_t ssize_t; /*****************************************************************************/ /* Portability Defines */ /*****************************************************************************/ -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #define JIT32_GCENCODER #endif @@ -590,7 +560,7 @@ const bool dspGCtbls = true; * (frameless method support is now always on) */ -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #define DOUBLE_ALIGN 1 // permit the double alignment of ESP in prolog, // and permit the double alignment of local offsets #else @@ -671,7 +641,7 @@ inline size_t roundDn(size_t size, size_t mult = sizeof(size_t)) return (size) & ~(mult - 1); } -#ifdef _HOST_64BIT_ +#ifdef HOST_64BIT inline unsigned int roundUp(unsigned size, unsigned mult) { return (unsigned int)roundUp((size_t)size, (size_t)mult); @@ -681,19 +651,19 @@ inline unsigned int roundDn(unsigned size, unsigned mult) { return (unsigned int)roundDn((size_t)size, (size_t)mult); } -#endif // _HOST_64BIT_ +#endif // HOST_64BIT inline unsigned int unsigned_abs(int x) { return ((unsigned int)abs(x)); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT inline size_t unsigned_abs(ssize_t x) { return ((size_t)abs(x)); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT /*****************************************************************************/ @@ -802,7 +772,7 @@ extern int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, JitFlags* compileFlags, void* inlineInfoPtr); -#ifdef _HOST_64BIT_ +#ifdef HOST_64BIT const size_t INVALID_POINTER_VALUE = 0xFEEDFACEABADF00D; #else const size_t INVALID_POINTER_VALUE = 0xFEEDFACE; diff --git a/src/coreclr/src/jit/jitconfigvalues.h b/src/coreclr/src/jit/jitconfigvalues.h index fe39ba27362b3..688008f69717d 100644 --- a/src/coreclr/src/jit/jitconfigvalues.h +++ b/src/coreclr/src/jit/jitconfigvalues.h @@ -208,19 +208,19 @@ CONFIG_INTEGER(JitNoRangeChks, W("JitNoRngChks"), 0) // If 1, don't generate ran // AltJitAssertOnNYI should be 0 on targets where JIT is under development or bring up stage, so as to facilitate // fallback to main JIT on hitting a NYI. -#if defined(_TARGET_ARM64_) || defined(_TARGET_X86_) +#if defined(TARGET_ARM64) || defined(TARGET_X86) CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 0) // Controls the AltJit behavior of NYI stuff -#else // !defined(_TARGET_ARM64_) && !defined(_TARGET_X86_) +#else // !defined(TARGET_ARM64) && !defined(TARGET_X86) CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 1) // Controls the AltJit behavior of NYI stuff -#endif // defined(_TARGET_ARM64_) || defined(_TARGET_X86_) +#endif // defined(TARGET_ARM64) || defined(TARGET_X86) /// /// JIT Hardware Intrinsics /// -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) CONFIG_INTEGER(EnableSSE3_4, W("EnableSSE3_4"), 1) // Enable SSE3, SSSE3, SSE 4.1 and 4.2 instruction set as default #endif -#if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#if defined(TARGET_AMD64) || defined(TARGET_X86) // Enable AVX instruction set for wide operations as default. When both AVX and SSE3_4 are set, we will use the most // capable instruction set available which will prefer AVX over SSE3/4. CONFIG_INTEGER(EnableHWIntrinsic, W("EnableHWIntrinsic"), 1) // Enable Base @@ -239,14 +239,14 @@ CONFIG_INTEGER(EnableBMI2, W("EnableBMI2"), 1) // Enable BMI2 CONFIG_INTEGER(EnableLZCNT, W("EnableLZCNT"), 1) // Enable AES CONFIG_INTEGER(EnablePCLMULQDQ, W("EnablePCLMULQDQ"), 1) // Enable PCLMULQDQ CONFIG_INTEGER(EnablePOPCNT, W("EnablePOPCNT"), 1) // Enable POPCNT -#else // !defined(_TARGET_AMD64_) && !defined(_TARGET_X86_) +#else // !defined(TARGET_AMD64) && !defined(TARGET_X86) // Enable AVX instruction set for wide operations as default CONFIG_INTEGER(EnableAVX, W("EnableAVX"), 0) -#endif // !defined(_TARGET_AMD64_) && !defined(_TARGET_X86_) +#endif // !defined(TARGET_AMD64) && !defined(TARGET_X86) // clang-format off -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) CONFIG_INTEGER(EnableHWIntrinsic, W("EnableHWIntrinsic"), 1) CONFIG_INTEGER(EnableArm64Aes, W("EnableArm64Aes"), 1) CONFIG_INTEGER(EnableArm64Atomics, W("EnableArm64Atomics"), 1) @@ -269,7 +269,7 @@ CONFIG_INTEGER(EnableArm64AdvSimd_Fp16, W("EnableArm64AdvSimd_Fp16"), 1) CONFIG_INTEGER(EnableArm64Sm3, W("EnableArm64Sm3"), 1) CONFIG_INTEGER(EnableArm64Sm4, W("EnableArm64Sm4"), 1) CONFIG_INTEGER(EnableArm64Sve, W("EnableArm64Sve"), 1) -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) // clang-format on @@ -285,7 +285,7 @@ CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 1) // It was originally intended that JitMinOptsTrackGCrefs only be enabled for amd64 on CoreCLR. A mistake was // made, and it was enabled for x86 as well. Whether it should continue to be enabled for x86 should be investigated. // This is tracked by issue https://github.com/dotnet/coreclr/issues/12415. -#if (defined(_TARGET_AMD64_) && defined(FEATURE_CORECLR)) || defined(_TARGET_X86_) +#if (defined(TARGET_AMD64) && defined(FEATURE_CORECLR)) || defined(TARGET_X86) #define JitMinOptsTrackGCrefs_Default 0 // Not tracking GC refs in MinOpts is new behavior #else #define JitMinOptsTrackGCrefs_Default 1 @@ -412,14 +412,14 @@ CONFIG_STRING(JitFunctionFile, W("JitFunctionFile")) #endif // DEBUG #if defined(DEBUG) -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // JitSaveFpLrWithCalleeSavedRegisters: // 0: use default frame type decision // 1: disable frames that save FP/LR registers with the callee-saved registers (at the top of the frame) // 2: force all frames to use the frame types that save FP/LR registers with the callee-saved registers (at the top // of the frame) CONFIG_INTEGER(JitSaveFpLrWithCalleeSavedRegisters, W("JitSaveFpLrWithCalleeSavedRegisters"), 0) -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) #endif // DEBUG #undef CONFIG_INTEGER diff --git a/src/coreclr/src/jit/jitee.h b/src/coreclr/src/jit/jitee.h index e31a29f8fa4a6..751489e946a45 100644 --- a/src/coreclr/src/jit/jitee.h +++ b/src/coreclr/src/jit/jitee.h @@ -21,7 +21,7 @@ class JitFlags JIT_FLAG_GCPOLL_CALLS = 6, // Emit calls to JIT_POLLGC for thread suspension. JIT_FLAG_MCJIT_BACKGROUND = 7, // Calling from multicore JIT background thread, do not call JitComplete - #if defined(_TARGET_X86_) + #if defined(TARGET_X86) JIT_FLAG_PINVOKE_RESTORE_ESP = 8, // Restore ESP after returning from inlined PInvoke JIT_FLAG_TARGET_P4 = 9, @@ -29,7 +29,7 @@ class JitFlags JIT_FLAG_USE_CMOV = 11, // Generated code may use cmov instruction JIT_FLAG_USE_SSE2 = 12, // Generated code may use SSE-2 instructions - #else // !defined(_TARGET_X86_) + #else // !defined(TARGET_X86) JIT_FLAG_UNUSED1 = 8, JIT_FLAG_UNUSED2 = 9, @@ -37,29 +37,29 @@ class JitFlags JIT_FLAG_UNUSED4 = 11, JIT_FLAG_UNUSED5 = 12, - #endif // !defined(_TARGET_X86_) + #endif // !defined(TARGET_X86) JIT_FLAG_UNUSED6 = 13, - #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) + #if defined(TARGET_X86) || defined(TARGET_AMD64) JIT_FLAG_USE_AVX = 14, JIT_FLAG_USE_AVX2 = 15, JIT_FLAG_USE_AVX_512 = 16, - #else // !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) + #else // !defined(TARGET_X86) && !defined(TARGET_AMD64) JIT_FLAG_UNUSED7 = 14, JIT_FLAG_UNUSED8 = 15, JIT_FLAG_UNUSED9 = 16, - #endif // !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) + #endif // !defined(TARGET_X86) && !defined(TARGET_AMD64) - #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) + #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) JIT_FLAG_FEATURE_SIMD = 17, #else JIT_FLAG_UNUSED10 = 17, - #endif // !(defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) + #endif // !(defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64)) JIT_FLAG_MAKEFINALCODE = 18, // Use the final code generator, i.e., not the interpreter. JIT_FLAG_READYTORUN = 19, // Use version-resilient code generation @@ -85,15 +85,15 @@ class JitFlags JIT_FLAG_TIER0 = 39, // This is the initial tier for tiered compilation which should generate code as quickly as possible JIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) JIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records -#else // !defined(_TARGET_ARM_) +#else // !defined(TARGET_ARM) JIT_FLAG_UNUSED11 = 41, -#endif // !defined(_TARGET_ARM_) +#endif // !defined(TARGET_ARM) JIT_FLAG_NO_INLINING = 42, // JIT should not inline any called method into this method -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) JIT_FLAG_HAS_ARM64_AES = 43, // ID_AA64ISAR0_EL1.AES is 1 or better JIT_FLAG_HAS_ARM64_ATOMICS = 44, // ID_AA64ISAR0_EL1.Atomic is 2 or better @@ -117,7 +117,7 @@ class JitFlags JIT_FLAG_HAS_ARM64_SM4 = 62, // ID_AA64ISAR0_EL1.SM4 is 1 or better JIT_FLAG_HAS_ARM64_SVE = 63 // ID_AA64PFR0_EL1.SVE is 1 or better -#elif defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#elif defined(TARGET_X86) || defined(TARGET_AMD64) JIT_FLAG_USE_SSE3 = 43, JIT_FLAG_USE_SSSE3 = 44, @@ -142,7 +142,7 @@ class JitFlags JIT_FLAG_UNUSED32 = 63 -#else // !defined(_TARGET_ARM64_) && !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) +#else // !defined(TARGET_ARM64) && !defined(TARGET_X86) && !defined(TARGET_AMD64) JIT_FLAG_UNUSED12 = 43, JIT_FLAG_UNUSED13 = 44, @@ -166,7 +166,7 @@ class JitFlags JIT_FLAG_UNUSED31 = 62, JIT_FLAG_UNUSED32 = 63 -#endif // !defined(_TARGET_ARM64_) && !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) +#endif // !defined(TARGET_ARM64) && !defined(TARGET_X86) && !defined(TARGET_AMD64) }; // clang-format on @@ -236,7 +236,7 @@ class JitFlags FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_GCPOLL_CALLS, JIT_FLAG_GCPOLL_CALLS); FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND, JIT_FLAG_MCJIT_BACKGROUND); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_PINVOKE_RESTORE_ESP, JIT_FLAG_PINVOKE_RESTORE_ESP); FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TARGET_P4, JIT_FLAG_TARGET_P4); @@ -246,7 +246,7 @@ class JitFlags #endif -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX, JIT_FLAG_USE_AVX); FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2, JIT_FLAG_USE_AVX2); @@ -254,7 +254,7 @@ class JitFlags #endif -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD, JIT_FLAG_FEATURE_SIMD); @@ -284,15 +284,15 @@ class JitFlags FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TIER0, JIT_FLAG_TIER0); FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TIER1, JIT_FLAG_TIER1); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS, JIT_FLAG_RELATIVE_CODE_RELOCS); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING, JIT_FLAG_NO_INLINING); -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_AES, JIT_FLAG_HAS_ARM64_AES); FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ATOMICS, JIT_FLAG_HAS_ARM64_ATOMICS); @@ -316,7 +316,7 @@ class JitFlags FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SM4, JIT_FLAG_HAS_ARM64_SM4); FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SVE, JIT_FLAG_HAS_ARM64_SVE); -#elif defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#elif defined(TARGET_X86) || defined(TARGET_AMD64) FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE3, JIT_FLAG_USE_SSE3); FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_SSSE3, JIT_FLAG_USE_SSSE3); @@ -330,7 +330,7 @@ class JitFlags FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_PCLMULQDQ, JIT_FLAG_USE_PCLMULQDQ); FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_POPCNT, JIT_FLAG_USE_POPCNT); -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 #undef FLAGS_EQUAL } diff --git a/src/coreclr/src/jit/jiteh.cpp b/src/coreclr/src/jit/jiteh.cpp index 496c78a2dbaf8..b17785c50b800 100644 --- a/src/coreclr/src/jit/jiteh.cpp +++ b/src/coreclr/src/jit/jiteh.cpp @@ -892,7 +892,7 @@ unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTr assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); #else *inTryRegion = true; @@ -1074,7 +1074,7 @@ void* Compiler::ehEmitCookie(BasicBlock* block) void* cookie; -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (block->bbFlags & BBF_FINALLY_TARGET) { // Use the offset of the beginning of the NOP padding, not the main block. @@ -1084,7 +1084,7 @@ void* Compiler::ehEmitCookie(BasicBlock* block) cookie = block->bbUnwindNopEmitCookie; } else -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) { cookie = block->bbEmitCookie; } diff --git a/src/coreclr/src/jit/layout.cpp b/src/coreclr/src/jit/layout.cpp index f3f9eee1cd083..0a838fb656016 100644 --- a/src/coreclr/src/jit/layout.cpp +++ b/src/coreclr/src/jit/layout.cpp @@ -385,7 +385,7 @@ void ClassLayout::InitializeGCPtrs(Compiler* compiler) INDEBUG(m_gcPtrsInitialized = true;) } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 ClassLayout* ClassLayout::GetPPPQuirkLayout(CompAllocator alloc) { assert(m_gcPtrsInitialized); @@ -415,4 +415,4 @@ ClassLayout* ClassLayout::GetPPPQuirkLayout(CompAllocator alloc) return m_pppQuirkLayout; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 diff --git a/src/coreclr/src/jit/layout.h b/src/coreclr/src/jit/layout.h index 6eb71e22f8812..23d7a06777b9a 100644 --- a/src/coreclr/src/jit/layout.h +++ b/src/coreclr/src/jit/layout.h @@ -32,7 +32,7 @@ class ClassLayout BYTE m_gcPtrsArray[sizeof(BYTE*)]; }; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // A layout that has its size artificially inflated to avoid stack corruption due to // bugs in user code - see Compiler::compQuirkForPPP for details. ClassLayout* m_pppQuirkLayout; @@ -53,7 +53,7 @@ class ClassLayout #endif , m_gcPtrCount(0) , m_gcPtrs(nullptr) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 , m_pppQuirkLayout(nullptr) #endif #ifdef DEBUG @@ -73,7 +73,7 @@ class ClassLayout #endif , m_gcPtrCount(0) , m_gcPtrs(nullptr) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 , m_pppQuirkLayout(nullptr) #endif #ifdef DEBUG @@ -86,7 +86,7 @@ class ClassLayout void InitializeGCPtrs(Compiler* compiler); public: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Get the layout for the PPP quirk - see Compiler::compQuirkForPPP for details. ClassLayout* GetPPPQuirkLayout(CompAllocator alloc); #endif diff --git a/src/coreclr/src/jit/lclmorph.cpp b/src/coreclr/src/jit/lclmorph.cpp index 94ef5a6381806..546a5d453ed6e 100644 --- a/src/coreclr/src/jit/lclmorph.cpp +++ b/src/coreclr/src/jit/lclmorph.cpp @@ -599,7 +599,7 @@ class LocalAddressVisitor final : public GenTreeVisitor m_compiler->lvaSetVarAddrExposed(exposeParentLcl ? varDsc->lvParentLcl : val.LclNum()); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // If the address of a variable is passed in a call and the allocation size of the variable // is 32 bits we will quirk the size to 64 bits. Some PInvoke signatures incorrectly specify // a ByRef to an INT32 when they actually write a SIZE_T or INT64. There are cases where @@ -615,7 +615,7 @@ class LocalAddressVisitor final : public GenTreeVisitor varTypeName(varDsc->TypeGet())); } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT // TODO-ADDR: For now use LCL_VAR_ADDR and LCL_FLD_ADDR only as call arguments and assignment sources. // Other usages require more changes. For example, a tree like OBJ(ADD(ADDR(LCL_VAR), 4)) @@ -809,7 +809,7 @@ class LocalAddressVisitor final : public GenTreeVisitor return; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (m_compiler->info.compIsVarArgs && varDsc->lvIsParam && !varDsc->lvIsRegArg) { // TODO-ADDR: For now we ignore all stack parameters of varargs methods, @@ -900,7 +900,7 @@ class LocalAddressVisitor final : public GenTreeVisitor return; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (m_compiler->info.compIsVarArgs && varDsc->lvIsParam && !varDsc->lvIsRegArg) { // TODO-ADDR: For now we ignore all stack parameters of varargs methods, diff --git a/src/coreclr/src/jit/lclvars.cpp b/src/coreclr/src/jit/lclvars.cpp index ad831eea28acd..a4110734d17ab 100644 --- a/src/coreclr/src/jit/lclvars.cpp +++ b/src/coreclr/src/jit/lclvars.cpp @@ -56,17 +56,17 @@ void Compiler::lvaInit() lvaOutgoingArgSpaceVar = BAD_VAR_NUM; lvaOutgoingArgSpaceSize = PhasedVar(); #endif // FEATURE_FIXED_OUT_ARGS -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM lvaPromotedStructAssemblyScratchVar = BAD_VAR_NUM; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef JIT32_GCENCODER lvaLocAllocSPvar = BAD_VAR_NUM; #endif // JIT32_GCENCODER lvaNewObjArrayArgs = BAD_VAR_NUM; lvaGSSecurityCookie = BAD_VAR_NUM; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 lvaVarargsBaseOfStkArgs = BAD_VAR_NUM; -#endif // _TARGET_X86_ +#endif // TARGET_X86 lvaVarargsHandleArg = BAD_VAR_NUM; lvaSecurityObject = BAD_VAR_NUM; lvaStubArgumentVar = BAD_VAR_NUM; @@ -324,7 +324,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) { compArgSize = 0; -#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED) +#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) // Prespill all argument regs on to stack in case of Arm when under profiler. if (compIsProfilerHookNeeded()) { @@ -384,7 +384,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) // The total argument size must be aligned. noway_assert((compArgSize % TARGET_POINTER_SIZE) == 0); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* We can not pass more than 2^16 dwords as arguments as the "ret" instruction can only pop 2^16 arguments. Could be handled correctly but it will be very difficult for fully interruptible code */ @@ -551,10 +551,10 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) // Walk the function signature for the explicit arguments //------------------------------------------------------------------------- -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Only (some of) the implicit args are enregistered for varargs varDscInfo->maxIntRegArgNum = info.compIsVarArgs ? varDscInfo->intRegArgNum : MAX_REG_ARG; -#elif defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#elif defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On System V type environment the float registers are not indexed together with the int ones. varDscInfo->floatRegArgNum = varDscInfo->intRegArgNum; #endif // _TARGET_* @@ -563,9 +563,9 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) const unsigned argSigLen = info.compMethodInfo->args.numArgs; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM regMaskTP doubleAlignMask = RBM_NONE; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM for (unsigned i = 0; i < argSigLen; i++, varDscInfo->varNum++, varDscInfo->varDsc++, argLst = info.compCompHnd->getArgNext(argLst)) @@ -587,7 +587,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) // For ARM, ARM64, and AMD64 varargs, all arguments go in integer registers var_types argType = mangleVarArgsType(varDsc->TypeGet()); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM var_types origArgType = argType; #endif // TARGET_ARM @@ -600,13 +600,13 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) bool isHfaArg = false; var_types hfaType = TYP_UNDEF; -#if defined(_TARGET_ARM64_) && defined(_TARGET_UNIX_) +#if defined(TARGET_ARM64) && defined(TARGET_UNIX) // Native varargs on arm64 unix use the regular calling convention. if (!opts.compUseSoftFP) #else // Methods that use VarArg or SoftFP cannot have HFA arguments if (!info.compIsVarArgs && !opts.compUseSoftFP) -#endif // defined(_TARGET_ARM64_) && defined(_TARGET_UNIX_) +#endif // defined(TARGET_ARM64) && defined(TARGET_UNIX) { // If the argType is a struct, then check if it is an HFA if (varTypeIsStruct(argType)) @@ -618,7 +618,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) } else if (info.compIsVarArgs) { -#ifdef _TARGET_UNIX_ +#ifdef TARGET_UNIX // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as @@ -643,7 +643,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) // it enregistered, as long as we can split the rest onto the stack. unsigned cSlotsToEnregister = cSlots; -#if defined(_TARGET_ARM64_) && FEATURE_ARG_SPLIT +#if defined(TARGET_ARM64) && FEATURE_ARG_SPLIT // On arm64 Windows we will need to properly handle the case where a >8byte <=16byte // struct is split between register r7 and virtual stack slot s[0] @@ -659,9 +659,9 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) } } -#endif // defined(_TARGET_ARM64_) && FEATURE_ARG_SPLIT +#endif // defined(TARGET_ARM64) && FEATURE_ARG_SPLIT -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On ARM we pass the first 4 words of integer arguments and non-HFA structs in registers. // But we pre-spill user arguments in varargs methods and structs. // @@ -738,7 +738,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) codeGen->regSet.rsMaskPreSpillRegArg |= regMask; } } -#else // !_TARGET_ARM_ +#else // !TARGET_ARM #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; if (varTypeIsStruct(argType)) @@ -779,7 +779,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) } } #endif // UNIX_AMD64_ABI -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM // The final home for this incoming register might be our local stack frame. // For System V platforms the final home will always be on the local stack frame. @@ -845,7 +845,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) varDsc->lvIsRegArg = 1; #if FEATURE_MULTIREG_ARGS -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (argType == TYP_STRUCT) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); @@ -883,12 +883,12 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, argType)); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_LONG) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_INT)); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef DEBUG if (verbose) @@ -952,7 +952,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) break; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (isFloat) { // Print register size prefix @@ -973,7 +973,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) } } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType), isFloat)); } @@ -985,14 +985,14 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) } // end if (canPassArgInRegisters) else { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) varDscInfo->setAllRegArgUsed(argType); if (varTypeIsFloating(argType)) { varDscInfo->setAnyFloatStackArgs(); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // If we needed to use the stack in order to pass this argument then // record the fact that we have used up any remaining registers of this 'type' @@ -1018,17 +1018,17 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) #endif // !UNIX_AMD64_ABI if (info.compIsVarArgs || isHfaArg || isSoftFPPreSpill) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) varDsc->lvStkOffs = compArgSize; -#else // !_TARGET_X86_ +#else // !TARGET_X86 // TODO-CQ: We shouldn't have to go as far as to declare these // address-exposed -- DoNotEnregister should suffice. lvaSetVarAddrExposed(varDscInfo->varNum); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } } // for each user arg -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (doubleAlignMask != RBM_NONE) { assert(RBM_ARG_REGS == 0xF); @@ -1055,7 +1055,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) } } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } /*****************************************************************************/ @@ -1104,10 +1104,10 @@ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo) compArgSize += TARGET_POINTER_SIZE; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (info.compIsVarArgs) varDsc->lvStkOffs = compArgSize; -#endif // _TARGET_X86_ +#endif // TARGET_X86 varDscInfo->varNum++; varDscInfo->varDsc++; @@ -1144,7 +1144,7 @@ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo) varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // This has to be spilled right in front of the real arguments and we have // to pre-spill all the argument registers explicitly because we only have // have symbols for the declared ones, not any potential variadic ones. @@ -1152,7 +1152,7 @@ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo) { codeGen->regSet.rsMaskPreSpillRegArg |= intArgMasks[ix]; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef DEBUG if (verbose) @@ -1179,7 +1179,7 @@ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo) varDscInfo->varNum++; varDscInfo->varDsc++; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) varDsc->lvStkOffs = compArgSize; // Allocate a temp to point at the beginning of the args @@ -1187,7 +1187,7 @@ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo) lvaVarargsBaseOfStkArgs = lvaGrabTemp(false DEBUGARG("Varargs BaseOfStkArgs")); lvaTable[lvaVarargsBaseOfStkArgs].lvType = TYP_I_IMPL; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } } @@ -1289,9 +1289,9 @@ void Compiler::lvaInitVarDsc(LclVarDsc* varDsc, varDsc->lvOverlappingFields = StructHasOverlappingFields(cFlags); } -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) varDsc->lvIsImplicitByRef = 0; -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) // Set the lvType (before this point it is TYP_UNDEF). @@ -1517,16 +1517,16 @@ int __cdecl Compiler::lvaFieldOffsetCmp(const void* field1, const void* field2) Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) : compiler(compiler) , structPromotionInfo() -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM , requiresScratchVar(false) -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef DEBUG , retypedFieldsMap(compiler->getAllocator(CMK_DebugOnly)) #endif // DEBUG { } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM //-------------------------------------------------------------------------------------------- // GetRequiresScratchVar - do we need a stack area to assemble small fields in order to place them in a register. // @@ -1538,7 +1538,7 @@ bool Compiler::StructPromotionHelper::GetRequiresScratchVar() return requiresScratchVar; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM //-------------------------------------------------------------------------------------------- // TryPromoteStructVar - promote struct var if it is possible and profitable. @@ -1630,13 +1630,13 @@ bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE // Note: MaxOffset is used below to declare a local array, and therefore must be a compile-time constant. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_SIMD) -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // This will allow promotion of 4 Vector fields on AVX2 or Vector256 on AVX, // or 8 Vector/Vector128 fields on SSE2. const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * YMM_REGSIZE_BYTES; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * FP_REGSIZE_BYTES; -#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) #else // !FEATURE_SIMD const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * sizeof(double); #endif // !FEATURE_SIMD @@ -1676,10 +1676,10 @@ bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE return false; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On ARM, we have a requirement on the struct alignment; see below. unsigned structAlignment = roundUp(compHandle->getClassAlignmentRequirement(typeHnd), TARGET_POINTER_SIZE); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM unsigned fieldsSize = 0; @@ -1715,7 +1715,7 @@ bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE fieldsSize += fieldInfo.fldSize; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On ARM, for struct types that don't use explicit layout, the alignment of the struct is // at least the max alignment of its fields. We take advantage of this invariant in struct promotion, // so verify it here. @@ -1732,7 +1732,7 @@ bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE { requiresScratchVar = true; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } // If we saw any GC pointer or by-ref fields above then CORINFO_FLG_CONTAINS_GC_PTR or @@ -1877,7 +1877,7 @@ bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) structPromotionInfo.fieldCnt, varDsc->lvFieldAccessed); shouldPromote = false; } -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_ARM_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // TODO-PERF - Only do this when the LclVar is used in an argument context // TODO-ARM64 - HFA support should also eliminate the need for this. // TODO-ARM32 - HFA support should also eliminate the need for this. @@ -1894,7 +1894,7 @@ bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } -#endif // _TARGET_AMD64_ || _TARGET_ARM64_ || _TARGET_ARM_ +#endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM else if (varDsc->lvIsParam && !compiler->lvaIsImplicitByRefLocal(lclNum)) { #if FEATURE_MULTIREG_STRUCT_PROMOTE @@ -2179,7 +2179,7 @@ void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum) fieldVarDsc->lvFldOrdinal = pFieldInfo->fldOrdinal; fieldVarDsc->lvParentLcl = lclNum; fieldVarDsc->lvIsParam = varDsc->lvIsParam; -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Reset the implicitByRef flag. fieldVarDsc->lvIsImplicitByRef = 0; @@ -2224,7 +2224,7 @@ void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum) } } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) //------------------------------------------------------------------------ // lvaPromoteLongVars: "Struct promote" all register candidate longs as if they are structs of two ints. // @@ -2309,7 +2309,7 @@ void Compiler::lvaPromoteLongVars() } #endif // DEBUG } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) //-------------------------------------------------------------------------------------------- // lvaGetFieldLocal - returns the local var index for a promoted field in a promoted struct var. @@ -2464,7 +2464,7 @@ void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregister assert(varDsc->lvPinned); break; #endif -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case DNER_LongParamField: JITDUMP("it is a decomposed field of a long parameter\n"); break; @@ -2496,7 +2496,7 @@ bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVarArg) return true; } -#if defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_) +#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) if (howToPassStruct == SPK_ByValue) { assert(type == TYP_STRUCT); @@ -2536,7 +2536,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool var_types simdBaseType = TYP_UNKNOWN; varDsc->lvType = impNormStructType(typeHnd, &simdBaseType); -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Mark implicit byref struct parameters if (varDsc->lvIsParam && !varDsc->lvIsStructField) { @@ -2549,7 +2549,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool varDsc->lvIsImplicitByRef = 1; } } -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #if FEATURE_SIMD if (simdBaseType != TYP_UNKNOWN) @@ -2586,9 +2586,9 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool #endif // FEATURE_SIMD } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT BOOL fDoubleAlignHint = FALSE; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 fDoubleAlignHint = TRUE; #endif @@ -2602,7 +2602,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool #endif varDsc->lvStructDoubleAlign = 1; } -#endif // not _TARGET_64BIT_ +#endif // not TARGET_64BIT unsigned classAttribs = info.compCompHnd->getClassAttribs(typeHnd); @@ -2634,12 +2634,12 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool void Compiler::lvaSetStructUsedAsVarArg(unsigned varNum) { #ifdef FEATURE_HFA -#if defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) LclVarDsc* varDsc = &lvaTable[varNum]; // For varargs methods incoming and outgoing arguments should not be treated // as HFA. varDsc->SetHfaType(TYP_UNDEF); -#endif // defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) #endif // FEATURE_HFA } @@ -2874,8 +2874,8 @@ unsigned Compiler::lvaLclSize(unsigned varNum) default: // This must be a primitive var. Fall out of switch statement break; } -#ifdef _TARGET_64BIT_ - // We only need this Quirk for _TARGET_64BIT_ +#ifdef TARGET_64BIT + // We only need this Quirk for TARGET_64BIT if (lvaTable[varNum].lvQuirkToLong) { noway_assert(lvaTable[varNum].lvAddrExposed); @@ -3047,7 +3047,7 @@ class LclVarDsc_SmallCode_Less unsigned weight1 = dsc1->lvRefCnt(); unsigned weight2 = dsc2->lvRefCnt(); -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. @@ -3154,7 +3154,7 @@ class LclVarDsc_BlendedCode_Less unsigned weight1 = dsc1->lvRefCntWtd(); unsigned weight2 = dsc2->lvRefCntWtd(); -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. @@ -3254,12 +3254,12 @@ void Compiler::lvaSortByRefCount() varDsc->setLvRefCntWtd(0); } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { varDsc->lvTracked = 0; } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) // Variables that are address-exposed, and all struct locals, are never enregistered, or tracked. // (The struct may be promoted, and its field variables enregistered/tracked, or the VM may "normalize" @@ -3447,25 +3447,25 @@ size_t LclVarDsc::lvArgStackSize() const #if defined(WINDOWS_AMD64_ABI) // Structs are either passed by reference or can be passed by value using one pointer stackSize = TARGET_POINTER_SIZE; -#elif defined(_TARGET_ARM64_) || defined(UNIX_AMD64_ABI) +#elif defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // lvSize performs a roundup. stackSize = this->lvSize(); -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) if ((stackSize > TARGET_POINTER_SIZE * 2) && (!this->lvIsHfa())) { // If the size is greater than 16 bytes then it will // be passed by reference. stackSize = TARGET_POINTER_SIZE; } -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) -#else // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI +#else // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI NYI("Unsupported target."); unreached(); -#endif // !_TARGET_ARM64_ !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI +#endif // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI } else { @@ -3482,7 +3482,7 @@ var_types LclVarDsc::lvaArgType() { var_types type = TypeGet(); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI if (type == TYP_STRUCT) { @@ -3511,16 +3511,16 @@ var_types LclVarDsc::lvaArgType() } } #endif // !UNIX_AMD64_ABI -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (type == TYP_STRUCT) { NYI("lvaArgType"); } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) // Nothing to do; use the type as is. #else NYI("lvaArgType"); -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 return type; } @@ -4203,7 +4203,7 @@ inline void Compiler::lvaIncrementFrameSize(unsigned size) */ bool Compiler::lvaTempsHaveLargerOffsetThanVars() { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // We never want to place the temps with larger offsets for ARM return false; #else @@ -4731,7 +4731,7 @@ void Compiler::lvaFixVirtualFrameOffsets() { LclVarDsc* varDsc; -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_AMD64_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // We need to fix the offset of the PSPSym so there is no padding between it and the outgoing argument space. @@ -4747,7 +4747,7 @@ void Compiler::lvaFixVirtualFrameOffsets() // The delta to be added to virtual offset to adjust it relative to frame pointer or SP int delta = 0; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH delta += REGSIZE_BYTES; // pushed PC (return address) for x86/x64 if (codeGen->doubleAlignOrFramePointerUsed()) @@ -4761,19 +4761,19 @@ void Compiler::lvaFixVirtualFrameOffsets() // pushed registers, return address, and padding delta += codeGen->genTotalFrameSize(); } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) else { // We set FP to be after LR, FP delta += 2 * REGSIZE_BYTES; } -#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) else { // FP is used. delta += codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta(); } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 unsigned lclNum; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) @@ -4800,15 +4800,15 @@ void Compiler::lvaFixVirtualFrameOffsets() if (!varDsc->lvOnFrame) { if (!varDsc->lvIsParam -#if !defined(_TARGET_AMD64_) +#if !defined(TARGET_AMD64) || (varDsc->lvIsRegArg -#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED) +#if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) && compIsProfilerHookNeeded() && !lvaIsPreSpilled(lclNum, codeGen->regSet.rsMaskPreSpillRegs(false)) // We need assign stack offsets // for prespilled arguments #endif ) -#endif // !defined(_TARGET_AMD64_) +#endif // !defined(TARGET_AMD64) ) { doAssignStkOffs = false; // Not on frame or an incomming stack arg @@ -4862,13 +4862,13 @@ void Compiler::lvaFixVirtualFrameOffsets() #endif // FEATURE_FIXED_OUT_ARGS } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool Compiler::lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask) { const LclVarDsc& desc = lvaTable[lclNum]; return desc.lvIsRegArg && (preSpillMask & genRegMask(desc.GetArgReg())); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM /***************************************************************************** * lvaUpdateArgsWithInitialReg() : For each argument variable descriptor, update @@ -4935,7 +4935,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG); noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES; #endif @@ -4947,10 +4947,10 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() if (!info.compIsStatic) { noway_assert(lclNum == info.compThisArg); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); -#endif // _TARGET_X86_ +#endif // TARGET_X86 lclNum++; } @@ -4960,10 +4960,10 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() { noway_assert(lclNum == info.compRetBuffArg); noway_assert(lvaTable[lclNum].lvIsRegArg); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); -#endif // _TARGET_X86_ +#endif // TARGET_X86 lclNum++; } @@ -4988,7 +4988,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; unsigned argSigLen = info.compMethodInfo->args.numArgs; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // // struct_n { int; int; ... n times }; // @@ -5056,7 +5056,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() } lclNum += argLcls; -#else // !_TARGET_ARM_ +#else // !TARGET_ARM for (unsigned i = 0; i < argSigLen; i++) { unsigned argumentSize = eeGetArgSize(argLst, &info.compMethodInfo->args); @@ -5071,7 +5071,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); argLst = info.compCompHnd->getArgNext(argLst); } -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM #if !USER_ARGS_COME_LAST @@ -5260,7 +5260,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, * when updating the current offset on the stack */ CLANG_FORMAT_COMMENT_ANCHOR; -#if !defined(_TARGET_ARMARCH_) +#if !defined(TARGET_ARMARCH) #if DEBUG // TODO: Remove this noway_assert and replace occurrences of TARGET_POINTER_SIZE with argSize // Also investigate why we are incrementing argOffs for X86 as this seems incorrect @@ -5269,13 +5269,13 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, #endif // DEBUG #endif -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) argOffs += TARGET_POINTER_SIZE; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // Register arguments on AMD64 also takes stack space. (in the backing store) varDsc->lvStkOffs = argOffs; argOffs += TARGET_POINTER_SIZE; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Register arguments on ARM64 only take stack space when they have a frame home. // Unless on windows and in a vararg method. #if FEATURE_ARG_SPLIT @@ -5292,7 +5292,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, } #endif // FEATURE_ARG_SPLIT -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, so we have to fill in lvStkOffs here // @@ -5376,7 +5376,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, } else { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Dev11 Bug 42817: incorrect codegen for DrawFlatCheckBox causes A/V in WinForms // // Here we have method with a signature (int a1, struct a2, struct a3, int a4, int a5). @@ -5484,7 +5484,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, // No alignment of argOffs required break; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM varDsc->lvStkOffs = argOffs; } @@ -5494,7 +5494,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, // For a dependent promoted struct we also assign the struct fields stack offset CLANG_FORMAT_COMMENT_ANCHOR; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if ((varDsc->TypeGet() == TYP_LONG) && varDsc->lvPromoted) { noway_assert(varDsc->lvFieldCnt == 2); @@ -5503,7 +5503,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, lvaTable[fieldVarNum + 1].lvStkOffs = varDsc->lvStkOffs + genTypeSize(TYP_INT); } else -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) if (varDsc->lvPromotedStruct()) { lvaPromotionType promotionType = lvaGetPromotionType(varDsc); @@ -5546,7 +5546,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() codeGen->setFramePointerUsed(codeGen->isFramePointerRequired()); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // Decide where to save FP and LR registers. We store FP/LR registers at the bottom of the frame if there is // a frame pointer used (so we get positive offsets from the frame pointer to access locals), but not if we // need a GS cookie AND localloc is used, since we need the GS cookie to protect the saved return value, @@ -5568,9 +5568,9 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(true); // Force using new frames } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // On x86/amd64, the return address has already been pushed by the call instruction in the caller. stkOffs -= TARGET_POINTER_SIZE; // return address; @@ -5581,24 +5581,24 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() { stkOffs -= REGSIZE_BYTES; } -#endif //_TARGET_XARCH_ +#endif // TARGET_XARCH int preSpillSize = 0; bool mustDoubleAlign = false; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM mustDoubleAlign = true; preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; -#else // !_TARGET_ARM_ +#else // !TARGET_ARM #if DOUBLE_ALIGN if (genDoubleAlign()) { mustDoubleAlign = true; // X86 only } #endif -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // If the frame pointer is used, then we'll save FP/LR at the bottom of the stack. // Otherwise, we won't store FP, and we'll store LR at the top, with the other callee-save // registers (if any). @@ -5624,13 +5624,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() stkOffs -= (compCalleeRegsPushed - 2) * REGSIZE_BYTES; } -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 compLclFrameSize = 0; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // In case of Amd64 compCalleeRegsPushed includes float regs (Xmm6-xmm15) that // need to be pushed. But Amd64 doesn't support push/pop of xmm registers. // Instead we need to allocate space for them on the stack and save them in prolog. @@ -5672,9 +5672,9 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() stkOffs -= compVSQuirkStackPaddingNeeded; lvaIncrementFrameSize(compVSQuirkStackPaddingNeeded); } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARMARCH_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARMARCH) if (lvaPSPSym != BAD_VAR_NUM) { // On ARM/ARM64, if we need a PSPSym, allocate it first, before anything else, including @@ -5683,7 +5683,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } -#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARMARCH_) +#endif // FEATURE_EH_FUNCLETS && defined(TARGET_ARMARCH) if (mustDoubleAlign) { @@ -5964,13 +5964,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() if (varDsc->lvIsParam) { -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On Windows AMD64 we can use the caller-reserved stack area that is already setup assert(varDsc->lvStkOffs != BAD_STK_OFFS); continue; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // A register argument that is not enregistered ends up as // a local variable which will need stack frame space. @@ -5980,7 +5980,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() continue; } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (info.compIsVarArgs && varDsc->GetArgReg() != theFixedRetBuffArgNum()) { // Stack offset to varargs (parameters) should point to home area which will be preallocated. @@ -5991,7 +5991,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, thus they don't need stack frame space. // @@ -6002,7 +6002,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } #endif -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } /* Make sure the type is appropriate */ @@ -6046,12 +6046,12 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() /* Need to align the offset? */ if (mustDoubleAlign && (varDsc->lvType == TYP_DOUBLE // Align doubles for ARM and x86 -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM || varDsc->lvType == TYP_LONG // Align longs for ARM #endif -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT || varDsc->lvStructDoubleAlign // Align when lvStructDoubleAlign is true -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT )) { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); @@ -6084,7 +6084,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() // Reserve the stack space for this variable stkOffs = lvaAllocLocalAndSetVirtualOffset(lclNum, lvaLclSize(lclNum), stkOffs); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // If we have an incoming register argument that has a struct promoted field // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // @@ -6095,8 +6095,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() unsigned fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].lvStkOffs = varDsc->lvStkOffs; } -#endif // _TARGET_ARM64_ -#ifdef _TARGET_ARM_ +#endif // TARGET_ARM64 +#ifdef TARGET_ARM // If we have an incoming register argument that has a promoted long // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // @@ -6108,7 +6108,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() lvaTable[fieldVarNum].lvStkOffs = varDsc->lvStkOffs; lvaTable[fieldVarNum + 1].lvStkOffs = varDsc->lvStkOffs + 4; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } } @@ -6190,7 +6190,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } } -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_AMD64_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument @@ -6199,21 +6199,21 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } -#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_AMD64_) +#endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have a frame pointer { // Create space for saving FP and LR. stkOffs -= 2 * REGSIZE_BYTES; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceSize > 0) { -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V. +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V. noway_assert(lvaOutgoingArgSpaceSize >= (4 * TARGET_POINTER_SIZE)); #endif noway_assert((lvaOutgoingArgSpaceSize % TARGET_POINTER_SIZE) == 0); @@ -6230,14 +6230,14 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() // and the pushed frame pointer register which for some strange reason isn't part of 'compCalleeRegsPushed'. int pushedCount = compCalleeRegsPushed; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (info.compIsVarArgs) { pushedCount += MAX_REG_ARG; } #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (codeGen->doubleAlignOrFramePointerUsed()) { pushedCount += 1; // pushed EBP (frame pointer) @@ -6252,7 +6252,7 @@ int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, i { noway_assert(lclNum != BAD_VAR_NUM); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Before final frame layout, assume the worst case, that every >=8 byte local will need // maximum padding to be aligned. This is because we generate code based on the stack offset // computed during tentative frame layout. These offsets cannot get bigger during final @@ -6324,7 +6324,7 @@ int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, i } #endif } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT /* Reserve space on the stack by bumping the frame size */ @@ -6344,7 +6344,7 @@ int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, i return stkOffs; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 /***************************************************************************** * lvaIsCalleeSavedIntRegCountEven() : returns true if the number of integer registers * pushed onto stack is even including RBP if used as frame pointer @@ -6358,7 +6358,7 @@ bool Compiler::lvaIsCalleeSavedIntRegCountEven() unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); return (regsPushed % (16 / REGSIZE_BYTES)) == 0; } -#endif //_TARGET_AMD64_ +#endif // TARGET_AMD64 /***************************************************************************** * lvaAlignFrame() : After allocating everything on the frame, reserve any @@ -6366,7 +6366,7 @@ bool Compiler::lvaIsCalleeSavedIntRegCountEven() */ void Compiler::lvaAlignFrame() { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Leaf frames do not need full alignment, but the unwind info is smaller if we // are at least 8 byte aligned (and we assert as much) @@ -6412,7 +6412,7 @@ void Compiler::lvaAlignFrame() lvaIncrementFrameSize(REGSIZE_BYTES); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // The stack on ARM64 must be 16 byte aligned. @@ -6442,7 +6442,7 @@ void Compiler::lvaAlignFrame() lvaIncrementFrameSize(REGSIZE_BYTES); } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Ensure that stack offsets will be double-aligned by grabbing an unused DWORD if needed. // @@ -6455,7 +6455,7 @@ void Compiler::lvaAlignFrame() lvaIncrementFrameSize(TARGET_POINTER_SIZE); } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) #if DOUBLE_ALIGN if (genDoubleAlign()) @@ -6499,7 +6499,7 @@ void Compiler::lvaAlignFrame() #else NYI("TARGET specific lvaAlignFrame"); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } /***************************************************************************** @@ -6518,7 +6518,7 @@ void Compiler::lvaAssignFrameOffsetsToPromotedStructs() // if (varDsc->lvIsStructField #ifndef UNIX_AMD64_ABI -#if !defined(_TARGET_ARM_) +#if !defined(TARGET_ARM) // ARM: lo/hi parts of a promoted long arg need to be updated. // For System V platforms there is no outgoing args space. @@ -6526,7 +6526,7 @@ void Compiler::lvaAssignFrameOffsetsToPromotedStructs() // The offset of these structs is already calculated in lvaAssignVirtualFrameOffsetToArg methos. // Make sure the code below is not executed for these structs and the offset is not changed. && !varDsc->lvIsParam -#endif // !defined(_TARGET_ARM_) +#endif // !defined(TARGET_ARM) #endif // !UNIX_AMD64_ABI ) { @@ -6567,7 +6567,7 @@ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign) if (lvaDoneFrameLayout == FINAL_FRAME_LAYOUT) { int preSpillSize = 0; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * TARGET_POINTER_SIZE; #endif @@ -6585,7 +6585,7 @@ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign) /* Need to align the offset? */ CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (varTypeIsGC(tempType) && ((stkOffs % TARGET_POINTER_SIZE) != 0)) { // Calculate 'pad' as the number of bytes to align up 'stkOffs' to be a multiple of TARGET_POINTER_SIZE @@ -6622,7 +6622,7 @@ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign) stkOffs -= size; temp->tdSetTempOffs(stkOffs); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Only required for the ARM platform that we have an accurate estimate for the spillTempSize noway_assert(spillTempSize <= lvaGetMaxSpillTempSize()); #endif @@ -6650,14 +6650,14 @@ void Compiler::lvaDumpRegLocation(unsigned lclNum) { LclVarDsc* varDsc = lvaTable + lclNum; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_DOUBLE) { // The assigned registers are `lvRegNum:RegNext(lvRegNum)` printf("%3s:%-3s ", getRegName(varDsc->GetRegNum()), getRegName(REG_NEXT(varDsc->GetRegNum()))); } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { printf("%3s ", getRegName(varDsc->GetRegNum())); } @@ -6675,7 +6675,7 @@ void Compiler::lvaDumpFrameLocation(unsigned lclNum) int offset; regNumber baseReg; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM offset = lvaFrameAddress(lclNum, compLocallocUsed, &baseReg, 0, /* isFloatUsage */ false); #else bool EBPbased; @@ -6901,10 +6901,10 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t r { printf(" EH-live"); } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (varDsc->lvStructDoubleAlign) printf(" double-align"); -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT if (varDsc->lvOverlappingFields) { printf(" overlapping-fields"); @@ -6924,14 +6924,14 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t r if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = &lvaTable[varDsc->lvParentLcl]; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (varTypeIsLong(parentvarDsc)) { bool isLo = (lclNum == parentvarDsc->lvFieldLclStart); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, isLo ? "lo" : "hi", isLo ? 0 : genTypeSize(TYP_INT)); } else -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) { CORINFO_CLASS_HANDLE typeHnd = parentvarDsc->lvVerTypeInfo.GetClassHandle(); CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(typeHnd, varDsc->lvFldOrdinal); @@ -7081,12 +7081,12 @@ unsigned Compiler::lvaFrameSize(FrameLayoutState curState) compCalleeRegsPushed = CNT_CALLEE_SAVED; -#if defined(_TARGET_ARMARCH_) +#if defined(TARGET_ARMARCH) if (compFloatingPointUsed) compCalleeRegsPushed += CNT_CALLEE_SAVED_FLOAT; compCalleeRegsPushed++; // we always push LR. See genPushCalleeSavedRegisters -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) if (compFloatingPointUsed) { compCalleeFPRegsSavedMask = RBM_FLT_CALLEE_SAVED; @@ -7105,7 +7105,7 @@ unsigned Compiler::lvaFrameSize(FrameLayoutState curState) } #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // Since FP/EBP is included in the SAVED_REG_MAXSZ we need to // subtract 1 register if codeGen->isFramePointerUsed() is true. if (codeGen->isFramePointerUsed()) @@ -7117,7 +7117,7 @@ unsigned Compiler::lvaFrameSize(FrameLayoutState curState) lvaAssignFrameOffsets(curState); unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; -#if defined(_TARGET_ARMARCH_) +#if defined(TARGET_ARMARCH) if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; @@ -7221,7 +7221,7 @@ int Compiler::lvaGetInitialSPRelativeOffset(unsigned varNum) int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (isFpBased) { // Currently, the frame starts by pushing ebp, ebp points to the saved ebp @@ -7235,9 +7235,9 @@ int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased) { // The offset is correct already! } -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 NYI("lvaToInitialSPRelativeOffset"); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 return offset; } @@ -7374,13 +7374,13 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTree** pTree, fgWalkData* // Calculate padding unsigned padding = LCL_FLD_PADDING(lclNum); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // We need to support alignment requirements to access memory on ARM ARCH unsigned alignment = 1; pComp->codeGen->InferOpSizeAlign(lcl, &alignment); alignment = roundUp(alignment, TARGET_POINTER_SIZE); padding = roundUp(padding, alignment); -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH // Change the variable to a TYP_BLK if (varType != TYP_BLK) diff --git a/src/coreclr/src/jit/liveness.cpp b/src/coreclr/src/jit/liveness.cpp index 712998773c5f6..3368e6567043d 100644 --- a/src/coreclr/src/jit/liveness.cpp +++ b/src/coreclr/src/jit/liveness.cpp @@ -11,7 +11,7 @@ #pragma hdrstop #endif -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) #include "decomposelongs.h" #endif #include "lower.h" // for LowerRange() @@ -1006,10 +1006,10 @@ void Compiler::fgExtendDbgLifetimes() LIR::Range initRange = LIR::EmptyRange(); initRange.InsertBefore(nullptr, zero, store); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) unsigned blockWeight = block->getBBWeight(this); DecomposeLongs::DecomposeRange(this, blockWeight, initRange); -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) m_pLowering->LowerRange(block, initRange); // Naively inserting the initializer at the end of the block may add code after the block's @@ -1639,9 +1639,9 @@ void Compiler::fgComputeLifeUntrackedLocal(VARSET_TP& life, for (unsigned i = varDsc.lvFieldLclStart; i < varDsc.lvFieldLclStart + varDsc.lvFieldCnt; ++i) { -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (!varTypeIsLong(lvaTable[i].lvType) || !lvaTable[i].lvPromoted) -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) { noway_assert(lvaTable[i].lvIsStructField); } diff --git a/src/coreclr/src/jit/lower.cpp b/src/coreclr/src/jit/lower.cpp index cc15aa40408a0..937d83783d7c6 100644 --- a/src/coreclr/src/jit/lower.cpp +++ b/src/coreclr/src/jit/lower.cpp @@ -23,9 +23,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "lower.h" -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) #include "decomposelongs.h" -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) //------------------------------------------------------------------------ // MakeSrcContained: Make "childNode" a contained node @@ -131,7 +131,7 @@ GenTree* Lowering::LowerNode(GenTree* node) LowerAdd(node->AsOp()); break; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: @@ -146,7 +146,7 @@ GenTree* Lowering::LowerNode(GenTree* node) case GT_MUL: case GT_MULHI: -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) case GT_MUL_LONG: #endif ContainCheckMul(node->AsOp()); @@ -201,7 +201,7 @@ GenTree* Lowering::LowerNode(GenTree* node) LowerCast(node); break; -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) case GT_ARR_BOUNDS_CHECK: #ifdef FEATURE_SIMD case GT_SIMD_CHK: @@ -211,7 +211,7 @@ GenTree* Lowering::LowerNode(GenTree* node) #endif // FEATURE_HW_INTRINSICS ContainCheckBoundsChk(node->AsBoundsChk()); break; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH case GT_ARR_ELEM: return LowerArrElem(node); @@ -224,17 +224,17 @@ GenTree* Lowering::LowerNode(GenTree* node) LowerRotate(node); break; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: ContainCheckShiftRotate(node->AsOp()); break; -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT case GT_LSH: case GT_RSH: case GT_RSZ: -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) LowerShift(node->AsOp()); #else ContainCheckShiftRotate(node->AsOp()); @@ -251,11 +251,11 @@ GenTree* Lowering::LowerNode(GenTree* node) ContainCheckLclHeap(node->AsOp()); break; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: @@ -286,7 +286,7 @@ GenTree* Lowering::LowerNode(GenTree* node) case GT_STORE_LCL_FLD: { -#if defined(_TARGET_AMD64_) && defined(FEATURE_SIMD) +#if defined(TARGET_AMD64) && defined(FEATURE_SIMD) GenTreeLclVarCommon* const store = node->AsLclVarCommon(); if ((store->TypeGet() == TYP_SIMD8) != (store->gtOp1->TypeGet() == TYP_SIMD8)) { @@ -295,7 +295,7 @@ GenTree* Lowering::LowerNode(GenTree* node) store->gtOp1 = bitcast; BlockRange().InsertBefore(store, bitcast); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // TODO-1stClassStructs: Once we remove the requirement that all struct stores // are block stores (GT_STORE_BLK or GT_STORE_OBJ), here is where we would put the local // store under a block store if codegen will require it. @@ -312,7 +312,7 @@ GenTree* Lowering::LowerNode(GenTree* node) break; } -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) case GT_CMPXCHG: CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand); break; @@ -320,7 +320,7 @@ GenTree* Lowering::LowerNode(GenTree* node) case GT_XADD: CheckImmedAndMakeContained(node, node->AsOp()->gtOp2); break; -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) case GT_XADD: if (node->IsUnusedValue()) { @@ -336,7 +336,7 @@ GenTree* Lowering::LowerNode(GenTree* node) break; #endif -#ifndef _TARGET_ARMARCH_ +#ifndef TARGET_ARMARCH // TODO-ARMARCH-CQ: We should contain this as long as the offset fits. case GT_OBJ: if (node->AsObj()->Addr()->OperIsLocalAddr()) @@ -344,7 +344,7 @@ GenTree* Lowering::LowerNode(GenTree* node) node->AsObj()->Addr()->SetContained(); } break; -#endif // !_TARGET_ARMARCH_ +#endif // !TARGET_ARMARCH case GT_KEEPALIVE: node->gtGetOp1()->SetRegOptional(); @@ -527,13 +527,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) minSwitchTabJumpCnt++; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // On ARM for small switch tables we will // generate a sequence of compare and branch instructions // because the code to load the base of the switch // table is huge and hideous due to the relocation... :( minSwitchTabJumpCnt += 2; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM // Once we have the temporary variable, we construct the conditional branch for // the default case. As stated above, this conditional is being shared between @@ -577,7 +577,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; -#if defined(_TARGET_UNIX_) && defined(_TARGET_ARM_) +#if defined(TARGET_UNIX) && defined(TARGET_ARM) // Force using an inlined jumping instead switch table generation. // Switch jump table is generated with incorrect values in CoreRT case, // so any large switch will crash after loading to PC any such value. @@ -587,7 +587,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // See also https://github.com/dotnet/coreclr/issues/13194 // Also https://github.com/dotnet/coreclr/pull/13197 useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI); -#endif // defined(_TARGET_UNIX_) && defined(_TARGET_ARM_) +#endif // defined(TARGET_UNIX) && defined(TARGET_ARM) // If we originally had 2 unique successors, check to see whether there is a unique // non-default case, in which case we can eliminate the switch altogether. @@ -760,7 +760,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (tempLclType != TYP_I_IMPL) { // SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL. @@ -822,7 +822,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) bool Lowering::TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue) { -#ifndef _TARGET_XARCH_ +#ifndef TARGET_XARCH // Other architectures may use this if they substitute GT_BT with equivalent code. return false; #else @@ -895,7 +895,7 @@ bool Lowering::TryLowerSwitchToBitTest( return false; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // // See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1 // then inverting the bit table will make them 0 so that the table now fits in 32 bits. @@ -956,7 +956,7 @@ bool Lowering::TryLowerSwitchToBitTest( LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc); return true; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH } // NOTE: this method deliberately does not update the call arg table. It must only @@ -1014,7 +1014,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf bool isOnStack = (info->GetRegNum() == REG_STK); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // Mark contained when we pass struct // GT_FIELD_LIST is always marked contained when it is generated if (type == TYP_STRUCT) @@ -1035,9 +1035,9 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf // TODO: Need to check correctness for FastTailCall if (call->IsFastTailCall()) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM NYI_ARM("lower: struct argument by fast tail call"); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } putArg = new (comp, GT_PUTARG_SPLIT) @@ -1170,7 +1170,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf { assert(!varTypeIsSIMD(arg)); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On x86 VM lies about the type of a struct containing a pointer sized // integer field by returning the type of its field as the type of struct. // Such struct can be passed in a register depending its position in @@ -1205,7 +1205,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(Compiler::DNER_VMNeedsStackAddr)); } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } else if (!arg->OperIs(GT_FIELD_LIST)) { @@ -1274,7 +1274,7 @@ void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) } #if defined(FEATURE_SIMD) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their // allocated size (see lvSize()). However, when passing the variables as arguments, and // storing the variables to the outgoing argument area on the stack, we must use their @@ -1297,7 +1297,7 @@ void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) } } } -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // TYP_SIMD8 parameters that are passed as longs if (type == TYP_SIMD8 && genIsValidIntReg(info->GetRegNum())) { @@ -1308,13 +1308,13 @@ void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) assert(info->GetNode() == arg); type = TYP_LONG; } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) #endif // defined(FEATURE_SIMD) // If we hit this we are probably double-lowering. assert(!arg->OperIsPutArg()); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (varTypeIsLong(type)) { noway_assert(arg->OperIs(GT_LONG)); @@ -1348,10 +1348,10 @@ void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) BlockRange().Remove(arg); } else -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) { -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH if (call->IsVarargs() || comp->opts.compUseSoftFP) { // For vararg call or on armel, reg args should be all integer. @@ -1362,7 +1362,7 @@ void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) type = newNode->TypeGet(); } } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH GenTree* putArg = NewPutArg(call, arg, info, type); @@ -1376,7 +1376,7 @@ void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) } } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH //------------------------------------------------------------------------ // LowerFloatArg: Lower float call arguments on the arm platform. // @@ -1460,7 +1460,7 @@ GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum) var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT; GenTree* intArg = comp->gtNewBitCastNode(intType, arg); intArg->SetRegNum(regNum); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (floatType == TYP_DOUBLE) { regNumber nextReg = REG_NEXT(regNum); @@ -1700,7 +1700,7 @@ void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call) { assert(comp->opts.IsJit64Compat()); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Confine this to IL stub calls which aren't marked as unmanaged. if (call->IsPInvoke() && !call->IsUnmanaged()) { @@ -1751,7 +1751,7 @@ void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call) comp->compVSQuirkStackPaddingNeeded = VSQUIRK_STACK_PAD; } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } // Inserts profiler hook, GT_PROF_HOOK for a tail call node. @@ -1796,14 +1796,14 @@ void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint assert(call->IsTailCall()); assert(comp->compIsProfilerHookNeeded()); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (insertionPoint == nullptr) { insertionPoint = call; } -#else // !defined(_TARGET_X86_) +#else // !defined(TARGET_X86) if (insertionPoint == nullptr) { @@ -1839,7 +1839,7 @@ void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint } } -#endif // !defined(_TARGET_X86_) +#endif // !defined(TARGET_X86) assert(insertionPoint != nullptr); GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID); @@ -1879,9 +1879,9 @@ void Lowering::LowerFastTailCall(GenTreeCall* call) assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // We expect to see a call that meets the following conditions assert(call->IsFastTailCall()); @@ -1973,7 +1973,7 @@ void Lowering::LowerFastTailCall(GenTreeCall* call) unsigned int overwrittenStart = put->getArgOffset(); unsigned int overwrittenEnd = overwrittenStart + put->getArgSize(); -#if !(defined(_TARGET_WINDOWS_) && defined(_TARGET_64BIT_)) +#if !(defined(TARGET_WINDOWS) && defined(TARGET_64BIT)) int baseOff = -1; // Stack offset of first arg on stack #endif @@ -1986,7 +1986,7 @@ void Lowering::LowerFastTailCall(GenTreeCall* call) continue; } -#if defined(_TARGET_WINDOWS_) && defined(_TARGET_64BIT_) +#if defined(TARGET_WINDOWS) && defined(TARGET_64BIT) // On Win64, the argument position determines the stack slot uniquely, and even the // register args take up space in the stack frame (shadow space). unsigned int argStart = callerArgLclNum * TARGET_POINTER_SIZE; @@ -2183,9 +2183,9 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree* callTarget assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // We expect to see a call that meets the following conditions assert(call->IsTailCallViaHelper()); @@ -2219,7 +2219,7 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree* callTarget // The callTarget tree needs to be sequenced. LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget); -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) // For ARM32 and AMD64, first argument is CopyRoutine and second argument is a place holder node. fgArgTabEntry* argEntry; @@ -2249,7 +2249,7 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree* callTarget argEntry->GetNode()->AsUnOp()->gtOp1 = callTarget; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) // Verify the special args are what we expect, and replace the dummy args with real values. // We need to figure out the size of the outgoing stack arguments, not including the special args. @@ -2334,7 +2334,7 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree* callTarget return result; } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT //------------------------------------------------------------------------ // Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node. // @@ -2551,7 +2551,7 @@ GenTree* Lowering::DecomposeLongCompare(GenTree* cmp) return cmp->gtNext; } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT //------------------------------------------------------------------------ // Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations. @@ -2577,12 +2577,12 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) { assert(cmp->gtGetOp2()->IsIntegralConst()); -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) GenTree* op1 = cmp->gtGetOp1(); GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon(); ssize_t op2Value = op2->IconValue(); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH var_types op1Type = op1->TypeGet(); if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genSmallTypeCanRepresentValue(op1Type, op2Value)) { @@ -2616,11 +2616,11 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) // the result of bool returning calls. // bool removeCast = -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 (op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) && #endif (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH || IsContainableMemoryOp(castOp) #endif ); @@ -2629,7 +2629,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) { assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 bool cmpEq = cmp->OperIs(GT_EQ); cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE); @@ -2695,7 +2695,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) andOp1->ClearContained(); andOp2->ClearContained(); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst()) { // @@ -2733,7 +2733,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE)) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // // Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT // results in smaller and faster code. It also doesn't have special register @@ -2778,7 +2778,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) return cmp->gtNext; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH } else if (cmp->OperIs(GT_EQ, GT_NE)) { @@ -2792,9 +2792,9 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) // test instruction. if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) && -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG)) -#else // _TARGET_ARM64_ +#else // TARGET_ARM64 op1->OperIs(GT_AND, GT_ADD, GT_SUB)) #endif { @@ -2839,7 +2839,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) return next; } } -#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) return cmp; } @@ -2855,7 +2855,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) // GenTree* Lowering::LowerCompare(GenTree* cmp) { -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (cmp->gtGetOp1()->TypeGet() == TYP_LONG) { return DecomposeLongCompare(cmp); @@ -2873,7 +2873,7 @@ GenTree* Lowering::LowerCompare(GenTree* cmp) } } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet()) { if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet())) @@ -2889,7 +2889,7 @@ GenTree* Lowering::LowerCompare(GenTree* cmp) cmp->gtFlags |= GTF_UNSIGNED; } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH ContainCheckCompare(cmp->AsOp()); return cmp->gtNext; } @@ -2909,7 +2909,7 @@ GenTree* Lowering::LowerCompare(GenTree* cmp) // GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 GenTree* relop = jtrue->gtGetOp1(); GenTree* relopOp2 = relop->AsOp()->gtGetOp2(); @@ -2946,7 +2946,7 @@ GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue) return nullptr; } } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 ContainCheckJTrue(jtrue); @@ -3083,7 +3083,7 @@ void Lowering::LowerRet(GenTree* ret) DISPNODE(ret); JITDUMP("============"); -#if defined(_TARGET_AMD64_) && defined(FEATURE_SIMD) +#if defined(TARGET_AMD64) && defined(FEATURE_SIMD) GenTreeUnOp* const unOp = ret->AsUnOp(); if ((unOp->TypeGet() == TYP_LONG) && (unOp->gtOp1->TypeGet() == TYP_SIMD8)) { @@ -3091,7 +3091,7 @@ void Lowering::LowerRet(GenTree* ret) unOp->gtOp1 = bitcast; BlockRange().InsertBefore(unOp, bitcast); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // Method doing PInvokes has exactly one return block unless it has tail calls. if (comp->compMethodRequiresPInvokeFrame() && (comp->compCurBB == comp->genReturnBB)) @@ -3235,13 +3235,13 @@ GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call) GenTree* thisArgNode; if (call->IsTailCallViaHelper()) { -#ifdef _TARGET_X86_ // x86 tailcall via helper follows normal calling convention, but with extra stack args. +#ifdef TARGET_X86 // x86 tailcall via helper follows normal calling convention, but with extra stack args. const unsigned argNum = 0; -#else // !_TARGET_X86_ +#else // !TARGET_X86 // In case of helper dispatched tail calls, "thisptr" will be the third arg. // The first two args are: real call target and addr of args copy routine. const unsigned argNum = 2; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum); thisArgNode = thisArgTabEntry->GetNode(); @@ -3259,7 +3259,7 @@ GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call) unsigned lclNum; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (call->IsTailCallViaHelper() && originalThisExpr->IsLocal()) { // For ordering purposes for the special tailcall arguments on x86, we forced the @@ -3271,7 +3271,7 @@ GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call) lclNum = originalThisExpr->AsLclVarCommon()->GetLclNum(); } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call")); @@ -3311,7 +3311,7 @@ GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call) GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (call->gtCallCookie != nullptr) { NYI_X86("Morphing indirect non-virtual call with non-standard args"); @@ -3504,7 +3504,7 @@ void Lowering::InsertPInvokeMethodProlog() // for x86, don't pass the secretArg. CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr); #else GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr, PhysReg(REG_SECRET_STUB_PARAM)); @@ -3528,7 +3528,7 @@ void Lowering::InsertPInvokeMethodProlog() firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store)); DISPTREERANGE(firstBlockRange, store); -#if !defined(_TARGET_X86_) && !defined(_TARGET_ARM_) +#if !defined(TARGET_X86) && !defined(TARGET_ARM) // For x86, this step is done at the call site (due to stack pointer not being static in the function). // For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. @@ -3543,9 +3543,9 @@ void Lowering::InsertPInvokeMethodProlog() firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP)); DISPTREERANGE(firstBlockRange, storeSP); -#endif // !defined(_TARGET_X86_) && !defined(_TARGET_ARM_) +#endif // !defined(TARGET_X86) && !defined(TARGET_ARM) -#if !defined(_TARGET_ARM_) +#if !defined(TARGET_ARM) // For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- @@ -3559,14 +3559,14 @@ void Lowering::InsertPInvokeMethodProlog() firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP)); DISPTREERANGE(firstBlockRange, storeFP); -#endif // !defined(_TARGET_ARM_) +#endif // !defined(TARGET_ARM) // -------------------------------------------------------- // On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto // the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Push a frame - if we are NOT in an IL stub, this is done right before the call @@ -3576,7 +3576,7 @@ void Lowering::InsertPInvokeMethodProlog() ContainCheckStoreIndir(frameUpd->AsIndir()); DISPTREERANGE(firstBlockRange, frameUpd); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } //------------------------------------------------------------------------ @@ -3644,9 +3644,9 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* // this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame); returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); @@ -3715,7 +3715,7 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) if (callType == CT_INDIRECT) { -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) // On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum. const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotNum() * TARGET_POINTER_SIZE; @@ -3727,7 +3727,7 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) { src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL); } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) } else { @@ -3763,7 +3763,7 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) InsertTreeBeforeAndContainCheck(insertBefore, store); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = SP @@ -3794,7 +3794,7 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) // contains PInvokes; on 64-bit targets this is necessary in non-stubs. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Set the TCB's frame to be the one we just created. @@ -3806,7 +3806,7 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsIndir()); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT // IMPORTANT **** This instruction must be the last real instruction **** // It changes the thread's state to Preemptive mode @@ -3870,7 +3870,7 @@ void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call) // happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { tree = CreateFrameLinkUpdate(PopFrame); @@ -3894,7 +3894,7 @@ void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call) BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker); ContainCheckStoreLoc(storeCallSiteTracker); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } //------------------------------------------------------------------------ @@ -4033,14 +4033,14 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call) int thisPtrArgNum; regNumber thisPtrArgReg; -#ifndef _TARGET_X86_ // x86 tailcall via helper follows normal calling convention, but with extra stack args. +#ifndef TARGET_X86 // x86 tailcall via helper follows normal calling convention, but with extra stack args. if (call->IsTailCallViaHelper()) { thisPtrArgNum = 2; thisPtrArgReg = REG_ARG_2; } else -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 { thisPtrArgNum = 0; thisPtrArgReg = comp->codeGen->genGetThisArgReg(call); @@ -4190,7 +4190,7 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call) GenTree* result = nullptr; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Non-tail calls: Jump Stubs are not taken into account by VM for mapping an AV into a NullRef // exception. Therefore, JIT needs to emit an explicit null check. Note that Jit64 too generates // an explicit null check. @@ -4246,7 +4246,7 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call) // accessed via an indirection. GenTree* addr = AddrGen(stubAddr); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as // the target address, and we set a flag that it's a VSD call. The helper then // handles any necessary indirection. @@ -4254,7 +4254,7 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call) { result = addr; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (result == nullptr) { @@ -4491,7 +4491,7 @@ bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable) // void Lowering::LowerAdd(GenTreeOp* node) { -#ifndef _TARGET_ARMARCH_ +#ifndef TARGET_ARMARCH if (varTypeIsIntegralOrI(node->TypeGet())) { LIR::Use use; @@ -4506,7 +4506,7 @@ void Lowering::LowerAdd(GenTreeOp* node) } } } -#endif // !_TARGET_ARMARCH_ +#endif // !TARGET_ARMARCH if (node->OperIs(GT_ADD)) { @@ -4540,14 +4540,14 @@ bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) } assert(varTypeIsFloating(divMod->TypeGet())); #endif // USE_HELPERS_FOR_INT_DIV -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) assert(divMod->OperGet() != GT_UMOD); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (dividend->OperIs(GT_LONG)) { return false; @@ -4621,7 +4621,7 @@ bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) } // TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32 -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) if (!comp->opts.MinOpts() && (divisorValue >= 3)) { size_t magic; @@ -4634,7 +4634,7 @@ bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) } else { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT magic = MagicDivide::GetUnsigned64Magic(static_cast(divisorValue), &add, &shift); #else unreached(); @@ -4743,9 +4743,9 @@ GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) #if defined(USE_HELPERS_FOR_INT_DIV) assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls"); #endif // USE_HELPERS_FOR_INT_DIV -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) assert(node->OperGet() != GT_MOD); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 if (!divisor->IsCnsIntOrI()) { @@ -4799,7 +4799,7 @@ GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) return nullptr; } -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) ssize_t magic; int shift; @@ -4809,11 +4809,11 @@ GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) } else { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT magic = MagicDivide::GetSigned64Magic(static_cast(divisorValue), &shift); -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT unreached(); -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT } divisor->AsIntConCommon()->SetIconValue(magic); @@ -4900,7 +4900,7 @@ GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) } return mulhi; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Currently there's no GT_MULHI for ARM32 return nullptr; #else @@ -5024,7 +5024,7 @@ void Lowering::LowerShift(GenTreeOp* shift) assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ)); size_t mask = 0x1f; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (varTypeIsLong(shift->TypeGet())) { mask = 0x3f; @@ -5263,25 +5263,25 @@ void Lowering::DoPhase() InsertPInvokeMethodProlog(); } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) DecomposeLongs decomp(comp); // Initialize the long decomposition class. if (comp->compLongUsed) { decomp.PrepareForDecomposition(); } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext) { /* Make the block publicly available */ comp->compCurBB = block; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (comp->compLongUsed) { decomp.DecomposeBlock(block); } -#endif //!_TARGET_64BIT_ +#endif //! TARGET_64BIT LowerBlock(block); } @@ -5417,7 +5417,7 @@ void Lowering::CheckNode(Compiler* compiler, GenTree* node) case GT_SIMD: assert(node->TypeGet() != TYP_SIMD12); break; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case GT_LCL_VAR: case GT_STORE_LCL_VAR: { @@ -5426,7 +5426,7 @@ void Lowering::CheckNode(Compiler* compiler, GenTree* node) assert(node->TypeGet() != TYP_SIMD12 || compiler->lvaIsFieldOfDependentlyPromotedStruct(lclVar)); } break; -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT #endif // SIMD default: @@ -5631,7 +5631,7 @@ void Lowering::ContainCheckNode(GenTree* node) case GT_ADD: case GT_SUB: -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: @@ -5643,7 +5643,7 @@ void Lowering::ContainCheckNode(GenTree* node) ContainCheckBinary(node->AsOp()); break; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) case GT_MUL_LONG: #endif case GT_MUL: @@ -5661,7 +5661,7 @@ void Lowering::ContainCheckNode(GenTree* node) case GT_RSZ: case GT_ROL: case GT_ROR: -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: #endif @@ -5692,11 +5692,11 @@ void Lowering::ContainCheckNode(GenTree* node) // The regNum must have been set by the lowering of the call. assert(node->GetRegNum() != REG_NA); break; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: ContainCheckSIMD(node->AsSIMD()); @@ -5720,14 +5720,14 @@ void Lowering::ContainCheckNode(GenTree* node) // void Lowering::ContainCheckReturnTrap(GenTreeOp* node) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH assert(node->OperIs(GT_RETURNTRAP)); // This just turns into a compare of its child with an int + a conditional call if (node->gtOp1->isIndir()) { MakeSrcContained(node, node->gtOp1); } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH } //------------------------------------------------------------------------ @@ -5772,14 +5772,14 @@ void Lowering::ContainCheckRet(GenTreeOp* ret) { assert(ret->OperIs(GT_RETURN)); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (ret->TypeGet() == TYP_LONG) { GenTree* op1 = ret->gtGetOp1(); noway_assert(op1->OperGet() == GT_LONG); MakeSrcContained(ret, op1); } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) #if FEATURE_MULTIREG_RET if (varTypeIsStruct(ret)) { diff --git a/src/coreclr/src/jit/lower.h b/src/coreclr/src/jit/lower.h index e4817eea90a32..1ad3a725d85d5 100644 --- a/src/coreclr/src/jit/lower.h +++ b/src/coreclr/src/jit/lower.h @@ -96,10 +96,10 @@ class Lowering : public Phase void ContainCheckCompare(GenTreeOp* node); void ContainCheckBinary(GenTreeOp* node); void ContainCheckBoundsChk(GenTreeBoundsChk* node); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH void ContainCheckFloatBinary(GenTreeOp* node); void ContainCheckIntrinsic(GenTreeOp* node); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH #ifdef FEATURE_SIMD void ContainCheckSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD @@ -124,7 +124,7 @@ class Lowering : public Phase // Call Lowering // ------------------------------ void LowerCall(GenTree* call); -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT GenTree* DecomposeLongCompare(GenTree* cmp); #endif GenTree* OptimizeConstCompare(GenTree* cmp); @@ -150,7 +150,7 @@ class Lowering : public Phase void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type); void LowerArg(GenTreeCall* call, GenTree** ppTree); -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info); GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum); #endif @@ -221,7 +221,7 @@ class Lowering : public Phase // return true if this call target is within range of a pc-rel call on the machine bool IsCallTargetInRange(void* addr); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) GenTree* PreferredRegOptionalOperand(GenTree* tree); // ------------------------------------------------------------------ @@ -273,7 +273,7 @@ class Lowering : public Phase regOptionalOperand->SetRegOptional(); } } -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) // Per tree node member functions void LowerStoreIndir(GenTreeIndir* node); diff --git a/src/coreclr/src/jit/lowerarmarch.cpp b/src/coreclr/src/jit/lowerarmarch.cpp index 639db6114ec7b..7963a3ba71606 100644 --- a/src/coreclr/src/jit/lowerarmarch.cpp +++ b/src/coreclr/src/jit/lowerarmarch.cpp @@ -20,7 +20,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_ARMARCH_ // This file is ONLY used for ARM and ARM64 architectures +#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures #include "jit.h" #include "sideeffects.h" @@ -66,7 +66,7 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->gtIconVal; emitAttr attr = emitActualTypeSize(childNode->TypeGet()); emitAttr size = EA_SIZE(attr); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM insFlags flags = parentNode->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE; #endif @@ -74,18 +74,18 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) { case GT_ADD: case GT_SUB: -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case GT_CMPXCHG: case GT_LOCKADD: case GT_XADD: return comp->compSupports(InstructionSet_Atomics) ? false : emitter::emitIns_valid_imm_for_add(immVal, size); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return emitter::emitIns_valid_imm_for_add(immVal, flags); #endif break; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case GT_EQ: case GT_NE: case GT_LT: @@ -109,7 +109,7 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) case GT_JCMP: assert(((parentNode->gtFlags & GTF_JCMP_TST) == 0) ? (immVal == 0) : isPow2(immVal)); return true; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) case GT_EQ: case GT_NE: case GT_LT: @@ -121,9 +121,9 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) case GT_OR: case GT_XOR: return emitter::emitIns_valid_imm_for_alu(immVal); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case GT_STORE_LCL_FLD: case GT_STORE_LCL_VAR: if (immVal == 0) @@ -261,13 +261,13 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) if (fill == 0) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // On ARM64 we can just use REG_ZR instead of having to load // the constant into a real register like on ARM32. src->SetContained(); #endif } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 else if (size >= REGSIZE_BYTES) { fill *= 0x0101010101010101LL; @@ -379,7 +379,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT return; } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // If we're going to use LDP/STP we need to ensure that the offset is // a multiple of 8 since these instructions do not have an unscaled // offset variant. @@ -555,14 +555,14 @@ void Lowering::ContainCheckCallOperands(GenTreeCall* call) // void Lowering::ContainCheckStoreIndir(GenTreeIndir* node) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 GenTree* src = node->AsOp()->gtOp2; if (!varTypeIsFloating(src->TypeGet()) && src->IsIntegralConst(0)) { // an integer zero for 'src' can be contained. MakeSrcContained(node, src); } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 ContainCheckIndir(node); } @@ -604,7 +604,7 @@ void Lowering::ContainCheckIndir(GenTreeIndir* indirNode) bool makeContained = true; if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirNode, addr)) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // ARM floating-point load/store doesn't support a form similar to integer // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm. @@ -682,14 +682,14 @@ void Lowering::ContainCheckShiftRotate(GenTreeOp* node) GenTree* shiftBy = node->gtOp2; assert(node->OperIsShiftOrRotate()); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM GenTree* source = node->gtOp1; if (node->OperIs(GT_LSH_HI, GT_RSH_LO)) { assert(source->OperGet() == GT_LONG); MakeSrcContained(node, source); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (shiftBy->IsCnsIntOrI()) { @@ -727,12 +727,12 @@ void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) { MakeSrcContained(storeLoc, op1); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (op1->OperGet() == GT_LONG) { MakeSrcContained(storeLoc, op1); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } //------------------------------------------------------------------------ @@ -743,7 +743,7 @@ void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) // void Lowering::ContainCheckCast(GenTreeCast* node) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM GenTree* castOp = node->CastOp(); var_types castToType = node->CastToType(); var_types srcType = castOp->TypeGet(); @@ -753,7 +753,7 @@ void Lowering::ContainCheckCast(GenTreeCast* node) assert(castOp->OperGet() == GT_LONG); MakeSrcContained(node, castOp); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } //------------------------------------------------------------------------ @@ -872,4 +872,4 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) } #endif // FEATURE_HW_INTRINSICS -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH diff --git a/src/coreclr/src/jit/lowerxarch.cpp b/src/coreclr/src/jit/lowerxarch.cpp index e0c3ce066bf8d..86f7e71885a98 100644 --- a/src/coreclr/src/jit/lowerxarch.cpp +++ b/src/coreclr/src/jit/lowerxarch.cpp @@ -21,7 +21,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_XARCH_ // This file is only used for xarch +#ifdef TARGET_XARCH // This file is only used for xarch #include "jit.h" #include "sideeffects.h" @@ -192,7 +192,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) src->SetContained(); } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 else if (size >= REGSIZE_BYTES) { fill *= 0x0101010101010101LL; @@ -211,7 +211,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) } else { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper; #else // TODO-X86-CQ: Investigate whether a helper call would be beneficial on x86 @@ -314,7 +314,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper; #else // TODO-X86-CQ: Investigate whether a helper call would be beneficial on x86 @@ -386,7 +386,7 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) if (src->OperIs(GT_FIELD_LIST)) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Invalid; GenTreeFieldList* fieldList = src->AsFieldList(); @@ -463,7 +463,7 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 return; } @@ -488,9 +488,9 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) // push rdx if (IsContainableImmed(putArgStk, src) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) && !src->IsIntegralConst(0) -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 ) { MakeSrcContained(putArgStk, src); @@ -528,25 +528,25 @@ void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgStk) if (size <= CPBLK_UNROLL_LIMIT && !layout->HasGCPtr()) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (size < XMM_REGSIZE_BYTES) { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Unroll; } } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else if (layout->HasGCPtr()) { // On x86, we must use `push` to store GC references to the stack in order for the emitter to properly update // the function's GC info. These `putargstk` nodes will generate a sequence of `push` instructions. putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Push; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 else { putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::RepInstr; @@ -724,7 +724,7 @@ void Lowering::LowerSIMD(GenTreeSIMD* simdNode) } } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if ((simdNode->gtSIMDIntrinsicID == SIMDIntrinsicGetItem) && (simdNode->gtGetOp1()->OperGet() == GT_IND)) { // If SIMD vector is already in memory, we force its @@ -1523,11 +1523,11 @@ void Lowering::ContainCheckCallOperands(GenTreeCall* call) assert(call->gtCallAddr != nullptr); ctrlExpr = call->gtCallAddr; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Fast tail calls aren't currently supported on x86, but if they ever are, the code // below that handles indirect VSD calls will need to be fixed. assert(!call->IsFastTailCall() || !call->IsVirtualStub()); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } // set reg requirements on call target represented as control sequence. @@ -1540,7 +1540,7 @@ void Lowering::ContainCheckCallOperands(GenTreeCall* call) // computed into a register. if (!call->IsFastTailCall()) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On x86, we need to generate a very specific pattern for indirect VSD calls: // // 3-byte nop @@ -1554,7 +1554,7 @@ void Lowering::ContainCheckCallOperands(GenTreeCall* call) MakeSrcContained(call, ctrlExpr); } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (ctrlExpr->isIndir()) { // We may have cases where we have set a register target on the ctrlExpr, but if it @@ -1687,7 +1687,7 @@ void Lowering::ContainCheckStoreIndir(GenTreeIndir* node) // void Lowering::ContainCheckMul(GenTreeOp* node) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) assert(node->OperIs(GT_MUL, GT_MULHI, GT_MUL_LONG)); #else assert(node->OperIs(GT_MUL, GT_MULHI)); @@ -1729,7 +1729,7 @@ void Lowering::ContainCheckMul(GenTreeOp* node) { hasImpliedFirstOperand = true; } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) else if (node->OperGet() == GT_MUL_LONG) { hasImpliedFirstOperand = true; @@ -1866,7 +1866,7 @@ void Lowering::ContainCheckDivOrMod(GenTreeOp* node) GenTree* divisor = node->gtGetOp2(); bool divisorCanBeRegOptional = true; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 GenTree* dividend = node->gtGetOp1(); if (dividend->OperGet() == GT_LONG) { @@ -1897,14 +1897,14 @@ void Lowering::ContainCheckDivOrMod(GenTreeOp* node) void Lowering::ContainCheckShiftRotate(GenTreeOp* node) { assert(node->OperIsShiftOrRotate()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 GenTree* source = node->gtOp1; if (node->OperIsShiftLong()) { assert(source->OperGet() == GT_LONG); MakeSrcContained(node, source); } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 GenTree* shiftBy = node->gtOp2; if (IsContainableImmed(node, shiftBy) && (shiftBy->AsIntConCommon()->IconValue() <= 255) && @@ -1945,12 +1945,12 @@ void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) { MakeSrcContained(storeLoc, op1); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else if (op1->OperGet() == GT_LONG) { MakeSrcContained(storeLoc, op1); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } //------------------------------------------------------------------------ @@ -1997,13 +1997,13 @@ void Lowering::ContainCheckCast(GenTreeCast* node) } } } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (varTypeIsLong(srcType)) { noway_assert(castOp->OperGet() == GT_LONG); castOp->SetContained(); } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) } //------------------------------------------------------------------------ @@ -2441,7 +2441,7 @@ void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode) case SIMDIntrinsicInit: { op1 = simdNode->AsOp()->gtOp1; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (op1->OperGet() == GT_LONG) { MakeSrcContained(simdNode, op1); @@ -2456,7 +2456,7 @@ void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode) } } else -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT if (op1->IsFPZero() || op1->IsIntegralConst(0) || (varTypeIsIntegral(simdNode->gtSIMDBaseType) && op1->IsIntegralConst(-1))) { @@ -3534,4 +3534,4 @@ void Lowering::ContainCheckFloatBinary(GenTreeOp* node) } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH diff --git a/src/coreclr/src/jit/lsra.cpp b/src/coreclr/src/jit/lsra.cpp index a92a56e351bb5..5892e027619aa 100644 --- a/src/coreclr/src/jit/lsra.cpp +++ b/src/coreclr/src/jit/lsra.cpp @@ -130,14 +130,14 @@ void lsraAssignRegToTree(GenTree* tree, regNumber reg, unsigned regIdx) { tree->SetRegNum(reg); } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) else if (tree->OperIsMultiRegOp()) { assert(regIdx == 1); GenTreeMultiRegOp* mul = tree->AsMultiRegOp(); mul->gtOtherReg = reg; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT #if FEATURE_MULTIREG_RET else if (tree->OperGet() == GT_COPY) { @@ -242,7 +242,7 @@ regMaskTP LinearScan::allRegs(RegisterType rt) regMaskTP LinearScan::allByteRegs() { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 return availableIntRegs & RBM_BYTE_REGS; #else return availableIntRegs; @@ -507,7 +507,7 @@ class RegisterIterator void operator++(int dummy) // int dummy is c++ for "this is postfix ++" { currentRegNum = REG_NEXT(currentRegNum); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (regType == TYP_DOUBLE) currentRegNum = REG_NEXT(currentRegNum); #endif @@ -515,7 +515,7 @@ class RegisterIterator void operator++() // prefix operator++ { currentRegNum = REG_NEXT(currentRegNum); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (regType == TYP_DOUBLE) currentRegNum = REG_NEXT(currentRegNum); #endif @@ -682,7 +682,7 @@ LinearScan::LinearScan(Compiler* theCompiler) // set won't be recomputed until after Lowering (and this constructor is called prior to Lowering), // so we don't want to check that yet. enregisterLocalVars = ((compiler->opts.compFlags & CLFLG_REGVAR) != 0); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 availableIntRegs = (RBM_ALLINT & ~(RBM_PR | RBM_FP | RBM_LR) & ~compiler->codeGen->regSet.rsMaskResvd); #else availableIntRegs = (RBM_ALLINT & ~compiler->codeGen->regSet.rsMaskResvd); @@ -695,7 +695,7 @@ LinearScan::LinearScan(Compiler* theCompiler) availableFloatRegs = RBM_ALLFLOAT; availableDoubleRegs = RBM_ALLDOUBLE; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (compiler->opts.compDbgEnC) { // On x64 when the EnC option is set, we always save exactly RBP, RSI and RDI. @@ -705,7 +705,7 @@ LinearScan::LinearScan(Compiler* theCompiler) availableFloatRegs &= ~RBM_CALLEE_SAVED; availableDoubleRegs &= ~RBM_CALLEE_SAVED; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 compiler->rpFrameType = FT_NOT_SET; compiler->rpMustCreateEBPCalled = false; @@ -1421,7 +1421,7 @@ bool LinearScan::isRegCandidate(LclVarDsc* varDsc) return false; } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (varDsc->lvType == TYP_LONG) { // Long variables should not be register candidates. @@ -1655,9 +1655,9 @@ void LinearScan::identifyCandidates() { // Initialize all variables to REG_STK varDsc->SetRegNum(REG_STK); -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT varDsc->SetOtherReg(REG_STK); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT if (!enregisterLocalVars) { @@ -1857,7 +1857,7 @@ void LinearScan::identifyCandidates() VarSetOps::IntersectionD(compiler, exceptVars, registerCandidateVars); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #ifdef DEBUG if (VERBOSE) { @@ -1866,7 +1866,7 @@ void LinearScan::identifyCandidates() compiler->lvaTableDump(Compiler::FrameLayoutState::PRE_REGALLOC_FRAME_LAYOUT); } #endif // DEBUG -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } // TODO-Throughput: This mapping can surely be more efficiently done @@ -2441,7 +2441,7 @@ void LinearScan::setFrameType() compiler->rpFrameType = frameType; -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // Determine whether we need to reserve a register for large lclVar offsets. if (compiler->compRsvdRegCheck(Compiler::REGALLOC_FRAME_LAYOUT)) { @@ -2451,7 +2451,7 @@ void LinearScan::setFrameType() JITDUMP(" Reserved REG_OPT_RSVD (%s) due to large frame\n", getRegName(REG_OPT_RSVD)); removeMask |= RBM_OPT_RSVD; } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH if ((removeMask != RBM_NONE) && ((availableIntRegs & removeMask) != 0)) { @@ -2591,7 +2591,7 @@ bool LinearScan::registerIsAvailable(RegRecord* physRegRecord, *nextRefLocationPtr = nextRefLocation; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (regType == TYP_DOUBLE) { // Recurse, but check the other half this time (TYP_FLOAT) @@ -2599,7 +2599,7 @@ bool LinearScan::registerIsAvailable(RegRecord* physRegRecord, return false; nextRefLocation = *nextRefLocationPtr; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM return (nextRefLocation >= currentLoc); } @@ -2662,7 +2662,7 @@ bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPo if ((refPosition->treeNode->AsIntCon()->IconValue() == otherTreeNode->AsIntCon()->IconValue()) && (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode))) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // If the constant is negative, only reuse registers of the same type. // This is because, on a 64-bit system, we do not sign-extend immediates in registers to // 64-bits unless they are actually longs, as this requires a longer instruction. @@ -2671,7 +2671,7 @@ bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPo // than once, we won't have access to the instruction that originally defines the constant). if ((refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()) || (refPosition->treeNode->AsIntCon()->IconValue() >= 0)) -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { return true; } @@ -2800,7 +2800,7 @@ regNumber LinearScan::tryAllocateFreeReg(Interval* currentInterval, RefPosition* LsraLocation rangeEndLocation = refPosition->getRangeEndLocation(); bool preferCalleeSave = currentInterval->preferCalleeSave; bool avoidByteRegs = false; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((relatedPreferences & ~RBM_BYTE_REGS) != RBM_NONE) { avoidByteRegs = true; @@ -3209,7 +3209,7 @@ bool LinearScan::canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation, return true; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM //------------------------------------------------------------------------ // canSpillDoubleReg: Determine whether we can spill physRegRecord // @@ -3257,7 +3257,7 @@ bool LinearScan::canSpillDoubleReg(RegRecord* physRegRecord, } #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM //------------------------------------------------------------------------ // unassignDoublePhysReg: unassign a double register (pair) // @@ -3311,7 +3311,7 @@ void LinearScan::unassignDoublePhysReg(RegRecord* doubleRegRecord) } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM //------------------------------------------------------------------------ // isRefPositionActive: Determine whether a given RefPosition is active at the given location @@ -3366,7 +3366,7 @@ bool LinearScan::isRegInUse(RegRecord* regRec, RefPosition* refPosition) } else { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // In the case of TYP_DOUBLE, we may have the case where 'assignedInterval' is inactive, // but the other half register is active. If so, it must be have an active recentRefPosition, // as above. @@ -3440,7 +3440,7 @@ bool LinearScan::isSpillCandidate(Interval* current, { nextLocation = assignedInterval->getNextRefLocation(); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM RegRecord* physRegRecord2 = nullptr; Interval* assignedInterval2 = nullptr; @@ -3492,7 +3492,7 @@ bool LinearScan::isSpillCandidate(Interval* current, // In either case, we cannot use it CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (assignedInterval == nullptr && assignedInterval2 == nullptr) #else if (assignedInterval == nullptr) @@ -3509,7 +3509,7 @@ bool LinearScan::isSpillCandidate(Interval* current, return false; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (current->registerType == TYP_DOUBLE) { if (isRegInUse(physRegRecord2, refPosition)) @@ -3574,7 +3574,7 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio // prefering the one with the furthest ref position when considering // a candidate to spill RegRecord* farthestRefPhysRegRecord = nullptr; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM RegRecord* farthestRefPhysRegRecord2 = nullptr; #endif LsraLocation farthestLocation = MinLocation; @@ -3605,7 +3605,7 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio continue; } RegRecord* physRegRecord = getRegisterRecord(regNum); - RegRecord* physRegRecord2 = nullptr; // only used for _TARGET_ARM_ + RegRecord* physRegRecord2 = nullptr; // only used for TARGET_ARM LsraLocation nextLocation = MinLocation; LsraLocation physRegNextLocation; if (!isSpillCandidate(current, refPosition, physRegRecord, nextLocation)) @@ -3620,7 +3620,7 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio unsigned recentAssignedRefWeight = BB_ZERO_WEIGHT; RefPosition* recentAssignedRef = nullptr; RefPosition* recentAssignedRef2 = nullptr; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (current->registerType == TYP_DOUBLE) { recentAssignedRef = (assignedInterval == nullptr) ? nullptr : assignedInterval->recentRefPosition; @@ -3700,7 +3700,7 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio // need to be spilled as they are already in memory and // codegen considers them as contained memory operands. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // TODO-CQ-ARM: Just conservatively "and" two conditions. We may implement a better condition later. isBetterLocation = true; if (recentAssignedRef != nullptr) @@ -3724,7 +3724,7 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio { farthestLocation = nextLocation; farthestRefPhysRegRecord = physRegRecord; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM farthestRefPhysRegRecord2 = physRegRecord2; #endif farthestRefPosWeight = recentAssignedRefWeight; @@ -3752,7 +3752,7 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio candidatesAreStressLimited()); if (!isConstrained) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM Interval* assignedInterval = (farthestRefPhysRegRecord == nullptr) ? nullptr : farthestRefPhysRegRecord->assignedInterval; Interval* assignedInterval2 = @@ -3776,11 +3776,11 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio { assert(nextRefPosition2 != nullptr && nextRefPosition2->RegOptional()); } -#else // !_TARGET_ARM_ +#else // !TARGET_ARM Interval* assignedInterval = farthestRefPhysRegRecord->assignedInterval; RefPosition* nextRefPosition = assignedInterval->getNextRefPosition(); assert(nextRefPosition->RegOptional()); -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM } } else @@ -3794,7 +3794,7 @@ regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPositio { foundReg = farthestRefPhysRegRecord->regNum; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (current->registerType == TYP_DOUBLE) { assert(genIsValidDoubleReg(foundReg)); @@ -3908,7 +3908,7 @@ bool LinearScan::isAssigned(RegRecord* regRec, LsraLocation lastLocation ARM_ARG if ((assignedInterval == nullptr) || assignedInterval->getNextRefLocation() > lastLocation) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (newRegType == TYP_DOUBLE) { RegRecord* anotherRegRec = findAnotherHalfRegRec(regRec); @@ -3954,7 +3954,7 @@ void LinearScan::checkAndAssignInterval(RegRecord* regRec, Interval* interval) } unassignPhysReg(regRec->regNum); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // If 'interval' and 'assignedInterval' were both TYP_DOUBLE, then we have unassigned 'assignedInterval' // from both halves. Otherwise, if 'interval' is TYP_DOUBLE, we now need to unassign the other half. if ((interval->registerType == TYP_DOUBLE) && @@ -4207,7 +4207,7 @@ void LinearScan::checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefP void LinearScan::unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType)) { RegRecord* regRecToUnassign = regRec; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM RegRecord* anotherRegRec = nullptr; if ((regRecToUnassign->assignedInterval != nullptr) && @@ -4233,7 +4233,7 @@ void LinearScan::unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegTy { unassignPhysReg(regRecToUnassign, regRecToUnassign->assignedInterval->recentRefPosition); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if ((anotherRegRec != nullptr) && (anotherRegRec->assignedInterval != nullptr)) { unassignPhysReg(anotherRegRec, anotherRegRec->assignedInterval->recentRefPosition); @@ -4267,7 +4267,7 @@ void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPositio // Is assignedInterval actually still assigned to this register? bool intervalIsAssigned = (assignedInterval->physReg == thisRegNum); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM RegRecord* anotherRegRec = nullptr; // Prepare second half RegRecord of a double register for TYP_DOUBLE @@ -4284,18 +4284,18 @@ void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPositio intervalIsAssigned = true; } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM checkAndClearInterval(regRec, spillRefPosition); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (assignedInterval->registerType == TYP_DOUBLE) { // Both two RegRecords should have been unassigned together. assert(regRec->assignedInterval == nullptr); assert(anotherRegRec->assignedInterval == nullptr); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM RefPosition* nextRefPosition = nullptr; if (spillRefPosition != nullptr) @@ -4391,7 +4391,7 @@ void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPositio regRec->assignedInterval = regRec->previousInterval; regRec->previousInterval = nullptr; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Note: // We can not use updateAssignedInterval() and updatePreviousInterval() here, // because regRec may not be a even-numbered float register. @@ -4404,7 +4404,7 @@ void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPositio anotherHalfRegRec->assignedInterval = regRec->assignedInterval; anotherHalfRegRec->previousInterval = nullptr; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef DEBUG if (spill) @@ -4561,7 +4561,7 @@ regNumber LinearScan::rotateBlockStartLocation(Interval* interval, regNumber tar } #endif // DEBUG -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM //-------------------------------------------------------------------------------------- // isSecondHalfReg: Test if recRec is second half of double register // which is assigned to an interval. @@ -4679,7 +4679,7 @@ bool LinearScan::canRestorePreviousInterval(RegRecord* regRec, Interval* assigne (regRec->previousInterval != nullptr && regRec->previousInterval != assignedInterval && regRec->previousInterval->assignedReg == regRec && regRec->previousInterval->getNextRefPosition() != nullptr); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (retVal && regRec->previousInterval->registerType == TYP_DOUBLE) { RegRecord* anotherHalfRegRec = findAnotherHalfRegRec(regRec); @@ -4694,7 +4694,7 @@ bool LinearScan::canRestorePreviousInterval(RegRecord* regRec, Interval* assigne bool LinearScan::isAssignedToInterval(Interval* interval, RegRecord* regRec) { bool isAssigned = (interval->assignedReg == regRec); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM isAssigned |= isSecondHalfReg(regRec, interval); #endif return isAssigned; @@ -4937,7 +4937,7 @@ void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) } if (targetRegRecord->assignedInterval != interval) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // If this is a TYP_DOUBLE interval, and the assigned interval is either null or is TYP_FLOAT, // we also need to unassign the other half of the register. // Note that if the assigned interval is TYP_DOUBLE, it will be unassigned below. @@ -4949,7 +4949,7 @@ void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) unassignIntervalBlockStart(findAnotherHalfRegRec(targetRegRecord), allocationPassComplete ? nullptr : inVarToRegMap); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM unassignIntervalBlockStart(targetRegRecord, allocationPassComplete ? nullptr : inVarToRegMap); assignPhysReg(targetRegRecord, interval); } @@ -4993,7 +4993,7 @@ void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) updateAssignedInterval(physRegRecord, nullptr, assignedInterval->registerType); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // unassignPhysReg, above, may have restored a 'previousInterval', in which case we need to // get the value of 'physRegRecord->assignedInterval' rather than using 'assignedInterval'. if (physRegRecord->assignedInterval != nullptr) @@ -5006,10 +5006,10 @@ void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) assert(genIsValidDoubleReg(reg)); reg = REG_NEXT(reg); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else { RegRecord* physRegRecord = getRegisterRecord(reg); @@ -5022,7 +5022,7 @@ void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) reg = REG_NEXT(reg); } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } } @@ -5096,12 +5096,12 @@ bool LinearScan::registerIsFree(regNumber regNum, RegisterType regType) bool isFree = physRegRecord->isFree(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (isFree && regType == TYP_DOUBLE) { isFree = getSecondHalfRegRec(physRegRecord)->isFree(); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM return isFree; } @@ -5146,9 +5146,9 @@ void LinearScan::freeRegister(RegRecord* physRegRecord) // we wouldn't unnecessarily link separate live ranges to the same register. if (nextRefPosition == nullptr || RefTypeIsDef(nextRefPosition->refType)) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM assert((assignedInterval->registerType != TYP_DOUBLE) || genIsValidDoubleReg(physRegRecord->regNum)); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM unassignPhysReg(physRegRecord, nullptr); } } @@ -5398,7 +5398,7 @@ void LinearScan::allocateRegisters() { regRecord->assignedInterval = nullptr; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE if (assignedInterval->registerType == TYP_DOUBLE) { @@ -5797,7 +5797,7 @@ void LinearScan::allocateRegisters() allocateReg = false; } -#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(_TARGET_XARCH_) +#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(TARGET_XARCH) // We can also avoid allocating a register (in fact we don't want to) if we have // an UpperVectorRestore on xarch where the value is on the stack. if ((currentRefPosition->refType == RefTypeUpperVectorRestore) && (currentInterval->physReg == REG_NA)) @@ -5827,14 +5827,14 @@ void LinearScan::allocateRegisters() if (assignedRegister == REG_NA) { bool isAllocatable = currentRefPosition->IsActualRef(); -#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(_TARGET_ARM64_) +#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE && defined(TARGET_ARM64) if (currentInterval->isUpperVector) { // On Arm64, we can't save the upper half to memory without a register. isAllocatable = true; assert(!currentRefPosition->RegOptional()); } -#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE && _TARGET_ARM64_ +#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE && TARGET_ARM64 if (isAllocatable) { if (allocateReg) @@ -6080,7 +6080,7 @@ void LinearScan::allocateRegisters() // void LinearScan::updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE. Interval* oldAssignedInterval = reg->assignedInterval; if (regType == TYP_DOUBLE) @@ -6122,7 +6122,7 @@ void LinearScan::updatePreviousInterval(RegRecord* reg, Interval* interval, Regi { reg->previousInterval = interval; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Update overlapping floating point register for TYP_DOUBLE if (regType == TYP_DOUBLE) { @@ -6571,7 +6571,7 @@ void LinearScan::insertUpperVectorSave(GenTree* tree, // On Arm64, we must always have a register to save the upper half, // while on x86 we can spill directly to memory. regNumber spillReg = refPosition->assignedReg(); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 bool spillToMem = refPosition->spillAfter; assert(spillReg != REG_NA); #else @@ -6654,7 +6654,7 @@ void LinearScan::insertUpperVectorRestore(GenTree* tree, { // We need a stack location for this. assert(lclVarInterval->isSpilled); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 assert(refPosition->assignedReg() == REG_NA); simdNode->gtFlags |= GTF_NOREG_AT_USE; #else @@ -6745,7 +6745,7 @@ void LinearScan::recordMaxSpill() // Note: due to the temp normalization process (see tmpNormalizeType) // only a few types should actually be seen here. JITDUMP("Recording the maximum number of concurrent spills:\n"); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 var_types returnType = RegSet::tmpNormalizeType(compiler->info.compRetType); if (needDoubleTmpForFPCall || (returnType == TYP_DOUBLE)) { @@ -6757,7 +6757,7 @@ void LinearScan::recordMaxSpill() JITDUMP("Adding a spill temp for moving a float call/return value between xmm reg and x87 stack.\n"); maxSpill[TYP_FLOAT] += 1; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 for (int i = 0; i < TYP_COUNT; i++) { if (var_types(i) != RegSet::tmpNormalizeType(var_types(i))) @@ -6851,7 +6851,7 @@ void LinearScan::updateMaxSpill(RefPosition* refPosition) { typ = treeNode->AsPutArgSplit()->GetRegType(refPosition->getMultiRegIdx()); } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) else if (treeNode->OperIsPutArgReg()) { // For double arg regs, the type is changed to long since they must be passed via `r0-r3`. @@ -6859,7 +6859,7 @@ void LinearScan::updateMaxSpill(RefPosition* refPosition) var_types typNode = treeNode->TypeGet(); typ = (typNode == TYP_LONG) ? TYP_INT : typNode; } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT #endif // FEATURE_ARG_SPLIT else { @@ -7216,13 +7216,13 @@ void LinearScan::resolveRegisters() GenTreePutArgSplit* splitArg = treeNode->AsPutArgSplit(); splitArg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (treeNode->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = treeNode->AsMultiRegOp(); multiReg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #endif // FEATURE_ARG_SPLIT } @@ -7360,7 +7360,7 @@ void LinearScan::resolveRegisters() : genRegNumFromMask(initialRegMask); regNumber sourceReg = (varDsc->lvIsRegArg) ? varDsc->GetArgReg() : REG_STK; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (varTypeIsMultiReg(varDsc)) { // TODO-ARM-NYI: Map the hi/lo intervals back to lvRegNum and GetOtherReg() (these should NYI @@ -7368,7 +7368,7 @@ void LinearScan::resolveRegisters() assert(!"Multi-reg types not yet supported"); } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { varDsc->SetArgInitReg(initialReg); JITDUMP(" Set V%02u argument initial register to %s\n", lclNum, getRegName(initialReg)); @@ -7680,7 +7680,7 @@ regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum); VarToRegMap toVarToRegMap = getInVarToRegMap(toBlock->bbNum); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM regMaskTP freeRegs; if (type == TYP_DOUBLE) { @@ -7691,9 +7691,9 @@ regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* { freeRegs = allRegs(type); } -#else // !_TARGET_ARM_ +#else // !TARGET_ARM regMaskTP freeRegs = allRegs(type); -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM #ifdef DEBUG if (getStressLimitRegs() == LSRA_LIMIT_SMALL_SET) @@ -7721,7 +7721,7 @@ regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Exclude any doubles for which the odd half isn't in freeRegs. @@ -7740,7 +7740,7 @@ regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM //------------------------------------------------------------------------ // addResolutionForDouble: Add resolution move(s) for TYP_DOUBLE interval // and update location. @@ -7805,7 +7805,7 @@ void LinearScan::addResolutionForDouble(BasicBlock* block, return; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM //------------------------------------------------------------------------ // addResolution: Add a resolution move of the given interval @@ -7932,7 +7932,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) switchRegs |= genRegMask(op2->GetRegNum()); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // Next, if this blocks ends with a JCMP, we have to make sure not to copy // into the register that it uses or modify the local variable it must consume LclVarDsc* jcmpLocalVarDsc = nullptr; @@ -8028,7 +8028,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) sameToReg = REG_NA; } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (jcmpLocalVarDsc && (jcmpLocalVarDsc->lvVarIndex == outResolutionSetVarIndex)) { sameToReg = REG_NA; @@ -8403,7 +8403,7 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, break; } -#ifndef _TARGET_XARCH_ +#ifndef TARGET_XARCH // We record tempregs for beginning and end of each block. // For amd64/x86 we only need a tempReg for float - we'll use xchg for int. // TODO-Throughput: It would be better to determine the tempRegs on demand, but the code below @@ -8411,12 +8411,12 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, // we need to get the tempReg. regNumber tempRegInt = (resolveType == ResolveSharedCritical) ? REG_NA : getTempRegForResolution(fromBlock, toBlock, TYP_INT); -#endif // !_TARGET_XARCH_ +#endif // !TARGET_XARCH regNumber tempRegFlt = REG_NA; regNumber tempRegDbl = REG_NA; // Used only for ARM if ((compiler->compFloatingPointUsed) && (resolveType != ResolveSharedCritical)) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Try to reserve a double register for TYP_DOUBLE and use it for TYP_FLOAT too if available. tempRegDbl = getTempRegForResolution(fromBlock, toBlock, TYP_DOUBLE); if (tempRegDbl != REG_NA) @@ -8424,7 +8424,7 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, tempRegFlt = tempRegDbl; } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { tempRegFlt = getTempRegForResolution(fromBlock, toBlock, TYP_FLOAT); } @@ -8535,7 +8535,7 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, regNumber targetReg = genRegNumFromMask(targetRegMask); if (location[targetReg] == REG_NA) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM regNumber sourceReg = (regNumber)source[targetReg]; Interval* interval = sourceIntervals[sourceReg]; if (interval->registerType == TYP_DOUBLE) @@ -8549,7 +8549,7 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, } } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { targetRegsReady |= targetRegMask; } @@ -8583,7 +8583,7 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, { regMaskTP fromRegMask = genRegMask(fromReg); targetRegsReady |= fromRegMask; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (genIsValidDoubleReg(fromReg)) { // Ensure that either: @@ -8620,7 +8620,7 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, assert(sourceIntervals[lowerHalfSrcReg]->registerType == TYP_DOUBLE); targetRegsReady |= genRegMask(lowerHalfReg); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } } } @@ -8643,29 +8643,29 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, bool useSwap = false; if (emitter::isFloatReg(targetReg)) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE) { // ARM32 requires a double temp register for TYP_DOUBLE. tempReg = tempRegDbl; } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM tempReg = tempRegFlt; } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH else { useSwap = true; } -#else // !_TARGET_XARCH_ +#else // !TARGET_XARCH else { tempReg = tempRegInt; } -#endif // !_TARGET_XARCH_ +#endif // !TARGET_XARCH if (useSwap || tempReg == REG_NA) { // First, we have to figure out the destination register for what's currently in fromReg, @@ -8738,7 +8738,7 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, else { compiler->codeGen->regSet.rsSetRegsModified(genRegMask(tempReg) DEBUGARG(true)); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (sourceIntervals[fromReg]->registerType == TYP_DOUBLE) { assert(genIsValidDoubleReg(targetReg)); @@ -8748,7 +8748,7 @@ void LinearScan::resolveEdge(BasicBlock* fromBlock, resolveType); } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { assert(sourceIntervals[targetReg] != nullptr); diff --git a/src/coreclr/src/jit/lsra.h b/src/coreclr/src/jit/lsra.h index c5eacf32d31af..f3ff58ed7bd27 100644 --- a/src/coreclr/src/jit/lsra.h +++ b/src/coreclr/src/jit/lsra.h @@ -482,7 +482,7 @@ class RegRecord : public Referenceable void init(regNumber reg) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // The Zero register, or the SP if ((reg == REG_ZR) || (reg == REG_SP)) { @@ -690,7 +690,7 @@ class LinearScan : public LinearScanInterface InsertAtBottom }; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM void addResolutionForDouble(BasicBlock* block, GenTree* insertionPoint, Interval** sourceIntervals, @@ -744,7 +744,7 @@ class LinearScan : public LinearScanInterface // Hence the "SmallFPSet" has 5 elements. CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI // On System V the RDI and RSI are not callee saved. Use R12 ans R13 as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = @@ -755,15 +755,15 @@ class LinearScan : public LinearScanInterface (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_ESI | RBM_EDI); #endif // !UNIX_AMD64_ABI static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // On ARM, we may need two registers to set up the target register for a virtual call, so we need // to have at least the maximum number of arg registers, plus 2. static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5); static const regMaskTP LsraLimitSmallFPSet = (RBM_F0 | RBM_F1 | RBM_F2 | RBM_F16 | RBM_F17); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R19 | RBM_R20); static const regMaskTP LsraLimitSmallFPSet = (RBM_V0 | RBM_V1 | RBM_V2 | RBM_V8 | RBM_V9); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EDI); static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #else @@ -973,7 +973,7 @@ class LinearScan : public LinearScanInterface void processBlockStartLocations(BasicBlock* current); void processBlockEndLocations(BasicBlock* current); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool isSecondHalfReg(RegRecord* regRec, Interval* interval); RegRecord* getSecondHalfRegRec(RegRecord* regRec); RegRecord* findAnotherHalfRegRec(RegRecord* regRec); @@ -1448,13 +1448,13 @@ class LinearScan : public LinearScanInterface VARSET_TP exceptVars; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { return (type == TYP_SIMD32); } static const var_types LargeVectorSaveType = TYP_SIMD16; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes @@ -1462,9 +1462,9 @@ class LinearScan : public LinearScanInterface return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } static const var_types LargeVectorSaveType = TYP_DOUBLE; -#else // !defined(_TARGET_AMD64_) && !defined(_TARGET_ARM64_) +#else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") -#endif // !defined(_TARGET_AMD64_) && !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) // Set of large vector (TYP_SIMD32 on AVX) variables. VARSET_TP largeVectorVars; @@ -1520,9 +1520,9 @@ class LinearScan : public LinearScanInterface void setDelayFree(RefPosition* use); int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH int BuildRMWUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); -#endif // !_TARGET_XARCH_ +#endif // !TARGET_XARCH // This is the main entry point for building the RefPositions for a node. // These methods return the number of sources. int BuildNode(GenTree* tree); @@ -1542,12 +1542,12 @@ class LinearScan : public LinearScanInterface int BuildStoreLoc(GenTree* tree); int BuildReturn(GenTree* tree); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // This method, unlike the others, returns the number of sources, since it may be called when // 'tree' is contained. int BuildShiftRotate(GenTree* tree); -#endif // _TARGET_XARCH_ -#ifdef _TARGET_ARM_ +#endif // TARGET_XARCH +#ifdef TARGET_ARM int BuildShiftLongCarry(GenTree* tree); #endif int BuildPutArgReg(GenTreeUnOp* node); @@ -1561,14 +1561,14 @@ class LinearScan : public LinearScanInterface int BuildGCWriteBarrier(GenTree* tree); int BuildCast(GenTreeCast* cast); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // returns true if the tree can use the read-modify-write memory instruction form bool isRMWRegOper(GenTree* tree); int BuildMul(GenTree* tree); void SetContainsAVXFlags(unsigned sizeOfSIMDVector = 0); -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Move the last use bit, if any, from 'fromTree' to 'toTree'; 'fromTree' must be contained. void CheckAndMoveRMWLastUse(GenTree* fromTree, GenTree* toTree) { @@ -1588,7 +1588,7 @@ class LinearScan : public LinearScanInterface fromTree->gtFlags &= ~GTF_VAR_DEATH; toTree->gtFlags |= GTF_VAR_DEATH; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifdef FEATURE_SIMD int BuildSIMD(GenTreeSIMD* tree); diff --git a/src/coreclr/src/jit/lsraarm.cpp b/src/coreclr/src/jit/lsraarm.cpp index e7e33a7bd0fc1..0e633c1a5ad76 100644 --- a/src/coreclr/src/jit/lsraarm.cpp +++ b/src/coreclr/src/jit/lsraarm.cpp @@ -20,7 +20,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #include "jit.h" #include "sideeffects.h" @@ -717,7 +717,7 @@ int LinearScan::BuildNode(GenTree* tree) case GT_COPY: srcCount = 1; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // This case currently only occurs for double types that are passed as TYP_LONG; // actual long types would have been decomposed by now. if (tree->TypeGet() == TYP_LONG) @@ -810,4 +810,4 @@ int LinearScan::BuildNode(GenTree* tree) return srcCount; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM diff --git a/src/coreclr/src/jit/lsraarm64.cpp b/src/coreclr/src/jit/lsraarm64.cpp index e028bf929cb5b..961716a591f82 100644 --- a/src/coreclr/src/jit/lsraarm64.cpp +++ b/src/coreclr/src/jit/lsraarm64.cpp @@ -20,7 +20,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 #include "jit.h" #include "sideeffects.h" @@ -1198,4 +1198,4 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree) } #endif -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 diff --git a/src/coreclr/src/jit/lsraarmarch.cpp b/src/coreclr/src/jit/lsraarmarch.cpp index a1266ab6861be..3ddab72698ded 100644 --- a/src/coreclr/src/jit/lsraarmarch.cpp +++ b/src/coreclr/src/jit/lsraarmarch.cpp @@ -19,7 +19,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_ARMARCH_ // This file is ONLY used for ARM and ARM64 architectures +#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures #include "jit.h" #include "sideeffects.h" @@ -50,7 +50,7 @@ int LinearScan::BuildIndir(GenTreeIndir* indirTree) GenTree* index = nullptr; int cns = 0; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Unaligned loads/stores for floating point values must first be loaded into integer register(s) if (indirTree->gtFlags & GTF_IND_UNALIGNED) { @@ -177,7 +177,7 @@ int LinearScan::BuildCall(GenTreeCall* call) ctrlExprCandidates = RBM_FASTTAILCALL_TARGET; } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else { buildInternalIntRegisterDefForNode(call); @@ -188,13 +188,13 @@ int LinearScan::BuildCall(GenTreeCall* call) buildInternalIntRegisterDefForNode(call); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM RegisterType registerType = call->TypeGet(); // Set destination candidates for return value of the call. -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) { // The ARM CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with @@ -202,7 +202,7 @@ int LinearScan::BuildCall(GenTreeCall* call) dstCandidates = RBM_PINVOKE_TCB; } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (hasMultiRegRetVal) { assert(retTypeDesc != nullptr); @@ -260,13 +260,13 @@ int LinearScan::BuildCall(GenTreeCall* call) // Update argReg for the next putarg_reg (if any) argReg = genRegArgNext(argReg); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // A double register is modelled as an even-numbered single one if (use.GetNode()->TypeGet() == TYP_DOUBLE) { argReg = genRegArgNext(argReg); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #endif BuildUse(use.GetNode(), genRegMask(use.GetNode()->GetRegNum())); srcCount++; @@ -289,7 +289,7 @@ int LinearScan::BuildCall(GenTreeCall* call) assert(argNode->OperIs(GT_PUTARG_REG)); assert(argNode->GetRegNum() == argReg); HandleFloatVarArgs(call, argNode, &callHasFloatRegArgs); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // The `double` types have been transformed to `long` on armel, // while the actual long types have been decomposed. // On ARM we may have bitcasts from DOUBLE to LONG. @@ -301,7 +301,7 @@ int LinearScan::BuildCall(GenTreeCall* call) srcCount += 2; } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { BuildUse(argNode, genRegMask(argNode->GetRegNum())); srcCount++; @@ -408,9 +408,9 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* argNode) { // We can use a ldp/stp sequence so we need two internal registers for ARM64; one for ARM. buildInternalIntRegisterDefForNode(argNode); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 buildInternalIntRegisterDefForNode(argNode); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 if (putArgChild->OperGet() == GT_OBJ) { @@ -496,7 +496,7 @@ int LinearScan::BuildPutArgSplit(GenTreePutArgSplit* argNode) assert(!node->isContained()); // The only multi-reg nodes we should see are OperIsMultiRegOp() unsigned currentRegCount; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (node->OperIsMultiRegOp()) { currentRegCount = node->AsMultiRegOp()->GetRegCount(); @@ -640,7 +640,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) { case GenTreeBlk::BlkOpKindUnroll: buildInternalIntRegisterDefForNode(blkNode); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (size >= 2 * REGSIZE_BYTES) { // We will use ldp/stp to reduce code size and improve performance @@ -725,7 +725,7 @@ int LinearScan::BuildCast(GenTreeCast* cast) const var_types srcType = genActualType(src->TypeGet()); const var_types castType = cast->gtCastType; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM assert(!varTypeIsLong(srcType) || (src->OperIs(GT_LONG) && src->isContained())); // Floating point to integer casts requires a temporary register. @@ -749,4 +749,4 @@ int LinearScan::BuildCast(GenTreeCast* cast) return srcCount; } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH diff --git a/src/coreclr/src/jit/lsrabuild.cpp b/src/coreclr/src/jit/lsrabuild.cpp index ce565248acb25..f4e124cf91054 100644 --- a/src/coreclr/src/jit/lsrabuild.cpp +++ b/src/coreclr/src/jit/lsrabuild.cpp @@ -366,13 +366,13 @@ void LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* de // void LinearScan::applyCalleeSaveHeuristics(RefPosition* rp) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (compiler->opts.compDbgEnC) { // We only use RSI and RDI for EnC code, so we don't want to favor callee-save regs. return; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 Interval* theInterval = rp->getInterval(); @@ -606,7 +606,7 @@ RefPosition* LinearScan::newRefPosition(Interval* theInterval, // Spill info newRP->isFixedRegRef = isFixedRegister; -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 // We don't need this for AMD because the PInvoke method epilog code is explicit // at register allocation time. if (theInterval != nullptr && theInterval->isLocalVar && compiler->compMethodRequiresPInvokeFrame() && @@ -615,7 +615,7 @@ RefPosition* LinearScan::newRefPosition(Interval* theInterval, mask &= ~(RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME); noway_assert(mask != RBM_NONE); } -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 newRP->registerAssignment = mask; newRP->setMultiRegIdx(multiRegIdx); @@ -778,14 +778,14 @@ regMaskTP LinearScan::getKillSetForStoreInd(GenTreeStoreInd* tree) regMaskTP LinearScan::getKillSetForShiftRotate(GenTreeOp* shiftNode) { regMaskTP killMask = RBM_NONE; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH assert(shiftNode->OperIsShiftOrRotate()); GenTree* shiftBy = shiftNode->gtGetOp2(); if (!shiftBy->isContained()) { killMask = RBM_RCX; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH return killMask; } @@ -800,13 +800,13 @@ regMaskTP LinearScan::getKillSetForShiftRotate(GenTreeOp* shiftNode) regMaskTP LinearScan::getKillSetForMul(GenTreeOp* mulNode) { regMaskTP killMask = RBM_NONE; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH assert(mulNode->OperIsMul()); if (!mulNode->OperIs(GT_MUL) || (((mulNode->gtFlags & GTF_UNSIGNED) != 0) && mulNode->gtOverflowEx())) { killMask = RBM_RAX | RBM_RDX; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH return killMask; } @@ -821,14 +821,14 @@ regMaskTP LinearScan::getKillSetForMul(GenTreeOp* mulNode) regMaskTP LinearScan::getKillSetForModDiv(GenTreeOp* node) { regMaskTP killMask = RBM_NONE; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH assert(node->OperIs(GT_MOD, GT_DIV, GT_UMOD, GT_UDIV)); if (!varTypeIsFloating(node->TypeGet())) { // Both RAX and RDX are killed by the operation killMask = RBM_RAX | RBM_RDX; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH return killMask; } @@ -843,7 +843,7 @@ regMaskTP LinearScan::getKillSetForModDiv(GenTreeOp* node) regMaskTP LinearScan::getKillSetForCall(GenTreeCall* call) { regMaskTP killMask = RBM_NONE; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (compiler->compFloatingPointUsed) { if (call->TypeGet() == TYP_DOUBLE) @@ -855,15 +855,15 @@ regMaskTP LinearScan::getKillSetForCall(GenTreeCall* call) needFloatTmpForFPCall = true; } } -#endif // _TARGET_X86_ -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#endif // TARGET_X86 +#if defined(TARGET_X86) || defined(TARGET_ARM) if (call->IsHelperCall()) { CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd); killMask = compiler->compHelperCallKillSet(helpFunc); } else -#endif // defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#endif // defined(TARGET_X86) || defined(TARGET_ARM) { // if there is no FP used, we can ignore the FP kills if (compiler->compFloatingPointUsed) @@ -874,19 +874,19 @@ regMaskTP LinearScan::getKillSetForCall(GenTreeCall* call) { killMask = RBM_INT_CALLEE_TRASH; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (call->IsVirtualStub()) { killMask |= compiler->virtualStubParamInfo->GetRegMask(); } -#else // !_TARGET_ARM_ +#else // !TARGET_ARM // Verify that the special virtual stub call registers are in the kill mask. // We don't just add them unconditionally to the killMask because for most architectures // they are already in the RBM_CALLEE_TRASH set, // and we don't want to introduce extra checks and calls in this hot function. assert(!call->IsVirtualStub() || ((killMask & compiler->virtualStubParamInfo->GetRegMask()) == compiler->virtualStubParamInfo->GetRegMask())); -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM } return killMask; } @@ -914,7 +914,7 @@ regMaskTP LinearScan::getKillSetForBlockStore(GenTreeBlk* blkNode) bool isCopyBlk = varTypeIsStruct(blkNode->Data()); switch (blkNode->gtBlkOpKind) { -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 case GenTreeBlk::BlkOpKindHelper: if (isCopyBlk) { @@ -926,7 +926,7 @@ regMaskTP LinearScan::getKillSetForBlockStore(GenTreeBlk* blkNode) } break; #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH case GenTreeBlk::BlkOpKindRepInstr: if (isCopyBlk) { @@ -967,7 +967,7 @@ regMaskTP LinearScan::getKillSetForBlockStore(GenTreeBlk* blkNode) regMaskTP LinearScan::getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node) { regMaskTP killMask = RBM_NONE; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH switch (node->gtHWIntrinsicId) { case NI_SSE2_MaskMove: @@ -985,7 +985,7 @@ regMaskTP LinearScan::getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node) // Leave killMask as RBM_NONE break; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH return killMask; } #endif // FEATURE_HW_INTRINSICS @@ -1037,7 +1037,7 @@ regMaskTP LinearScan::getKillSetForNode(GenTree* tree) case GT_RSZ: case GT_ROL: case GT_ROR: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case GT_LSH_HI: case GT_RSH_LO: #endif @@ -1046,7 +1046,7 @@ regMaskTP LinearScan::getKillSetForNode(GenTree* tree) case GT_MUL: case GT_MULHI: -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_MUL_LONG: #endif killMask = getKillSetForMul(tree->AsOp()); @@ -1386,7 +1386,7 @@ void LinearScan::buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation cu RefPosition* pos = newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorSave, tree, RBM_FLT_CALLEE_SAVED); varInterval->isPartiallySpilled = true; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH pos->regOptional = true; #endif } @@ -1430,7 +1430,7 @@ void LinearScan::buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, Ls Interval* upperVectorInterval = getUpperVectorInterval(varIndex); RefPosition* pos = newRefPosition(upperVectorInterval, currentLoc, RefTypeUpperVectorRestore, node, RBM_NONE); lclVarInterval->isPartiallySpilled = false; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH pos->regOptional = true; #endif } @@ -1559,7 +1559,7 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree, BasicBlock* block, Lsra if (tree->isContained()) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // On XArch we can have contained candidate lclVars if they are part of a RMW // address computation. In this case we need to check whether it is a last use. if (tree->IsLocal() && ((tree->gtFlags & GTF_VAR_DEATH) != 0)) @@ -1572,9 +1572,9 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree, BasicBlock* block, Lsra VarSetOps::RemoveElemD(compiler, currentLiveVars, varIndex); } } -#else // _TARGET_XARCH_ +#else // TARGET_XARCH assert(!isCandidateLocalRef(tree)); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH JITDUMP("Contained\n"); return; } @@ -1594,10 +1594,10 @@ void LinearScan::buildRefPositionsForNode(GenTree* tree, BasicBlock* block, Lsra int produce = newDefListCount - oldDefListCount; assert((consume == 0) || (ComputeAvailableSrcCount(tree) == consume)); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Multi-reg call node is the only node that could produce multi-reg value assert(produce <= 1 || (tree->IsMultiRegCall() && produce == MAX_RET_REG_COUNT)); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif // DEBUG @@ -2414,7 +2414,7 @@ void LinearScan::validateIntervals() } #endif // DEBUG -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH //------------------------------------------------------------------------ // setTgtPref: Set a preference relationship between the given Interval // and a Use RefPosition. @@ -2445,7 +2445,7 @@ void setTgtPref(Interval* interval, RefPosition* tgtPrefUse) } } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH //------------------------------------------------------------------------ // BuildDef: Build a RefTypeDef RefPosition for the given node // @@ -2489,7 +2489,7 @@ RefPosition* LinearScan::BuildDef(GenTree* tree, regMaskTP dstCandidates, int mu assert(isSingleRegister(dstCandidates)); } } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else if (varTypeIsByte(tree)) { if (dstCandidates == RBM_NONE) @@ -2499,7 +2499,7 @@ RefPosition* LinearScan::BuildDef(GenTree* tree, regMaskTP dstCandidates, int mu dstCandidates &= ~RBM_NON_BYTE_REGS; assert(dstCandidates != RBM_NONE); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (pendingDelayFree) { interval->hasInterferingUses = true; @@ -2517,10 +2517,10 @@ RefPosition* LinearScan::BuildDef(GenTree* tree, regMaskTP dstCandidates, int mu RefInfoListNode* refInfo = listNodePool.GetNode(defRefPosition, tree); defList.Append(refInfo); } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH setTgtPref(interval, tgtPrefUse); setTgtPref(interval, tgtPrefUse2); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE assert(!interval->isPartiallySpilled); #endif @@ -2744,12 +2744,12 @@ int LinearScan::BuildOperandUses(GenTree* node, regMaskTP candidates) return 1; } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (node->OperIs(GT_LONG)) { return BuildBinaryUses(node->AsOp(), candidates); } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) if (node->OperIsIndir()) { return BuildIndirUses(node->AsIndir(), candidates); @@ -2861,12 +2861,12 @@ int LinearScan::BuildDelayFreeUses(GenTree* node, regMaskTP candidates) // int LinearScan::BuildBinaryUses(GenTreeOp* node, regMaskTP candidates) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (node->OperIsBinary() && isRMWRegOper(node)) { return BuildRMWUses(node, candidates); } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH int srcCount = 0; GenTree* op1 = node->gtOp1; GenTree* op2 = node->gtGetOp2IfPresent(); @@ -2930,7 +2930,7 @@ int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc) BuildUse(op1, RBM_NONE, i); } } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT else if (varTypeIsLong(op1)) { if (op1->OperIs(GT_MUL_LONG)) @@ -2947,7 +2947,7 @@ int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc) assert(srcCount == 2); } } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT else if (op1->isContained()) { srcCount = 0; @@ -2956,12 +2956,12 @@ int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc) { srcCount = 1; regMaskTP srcCandidates = RBM_NONE; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsByte(storeLoc)) { srcCandidates = allByteRegs(); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 singleUseRef = BuildUse(op1, srcCandidates); } @@ -3065,7 +3065,7 @@ int LinearScan::BuildReturn(GenTree* tree) { GenTree* op1 = tree->gtGetOp1(); -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (tree->TypeGet() == TYP_LONG) { assert((op1->OperGet() == GT_LONG) && op1->isContained()); @@ -3076,20 +3076,20 @@ int LinearScan::BuildReturn(GenTree* tree) return 2; } else -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) if ((tree->TypeGet() != TYP_VOID) && !op1->isContained()) { regMaskTP useCandidates = RBM_NONE; #if FEATURE_MULTIREG_RET -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 if (varTypeIsSIMD(tree)) { useCandidates = allSIMDRegs(); BuildUse(op1, useCandidates); return 1; } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 if (varTypeIsStruct(tree)) { @@ -3156,7 +3156,7 @@ int LinearScan::BuildReturn(GenTree* tree) bool LinearScan::supportsSpecialPutArg() { -#if defined(DEBUG) && defined(_TARGET_X86_) +#if defined(DEBUG) && defined(TARGET_X86) // On x86, `LSRA_LIMIT_CALLER` is too restrictive to allow the use of special put args: this stress mode // leaves only three registers allocatable--eax, ecx, and edx--of which the latter two are also used for the // first two integral arguments to a call. This can leave us with too few registers to succesfully allocate in @@ -3266,7 +3266,7 @@ int LinearScan::BuildPutArgReg(GenTreeUnOp* node) isSpecialPutArg = true; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // If type of node is `long` then it is actually `double`. // The actual `long` types must have been transformed as a field list with two fields. if (node->TypeGet() == TYP_LONG) @@ -3279,7 +3279,7 @@ int LinearScan::BuildPutArgReg(GenTreeUnOp* node) BuildDef(node, argMaskHi, 1); } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { RefPosition* def = BuildDef(node, argMask); if (isSpecialPutArg) @@ -3343,7 +3343,7 @@ int LinearScan::BuildGCWriteBarrier(GenTree* tree) regMaskTP addrCandidates = RBM_ARG_0; regMaskTP srcCandidates = RBM_ARG_1; -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST) // the 'src' goes into x15 (REG_WRITE_BARRIER_SRC) @@ -3351,7 +3351,7 @@ int LinearScan::BuildGCWriteBarrier(GenTree* tree) addrCandidates = RBM_WRITE_BARRIER_DST; srcCandidates = RBM_WRITE_BARRIER_SRC; -#elif defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS +#elif defined(TARGET_X86) && NOGC_WRITE_BARRIERS bool useOptimizedWriteBarrierHelper = compiler->codeGen->genUseOptimizedWriteBarriers(tree, src); if (useOptimizedWriteBarrierHelper) @@ -3363,7 +3363,7 @@ int LinearScan::BuildGCWriteBarrier(GenTree* tree) srcCandidates = RBM_WRITE_BARRIER_SRC; } -#endif // defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS +#endif // defined(TARGET_X86) && NOGC_WRITE_BARRIERS BuildUse(addr, addrCandidates); BuildUse(src, srcCandidates); @@ -3391,7 +3391,7 @@ int LinearScan::BuildCmp(GenTree* tree) GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // If the compare is used by a jump, we just need to set the condition codes. If not, then we need // to store the result into the low byte of a register, which requires the dst be a byteable register. if (tree->TypeGet() != TYP_VOID) @@ -3440,7 +3440,7 @@ int LinearScan::BuildCmp(GenTree* tree) op2Candidates = allByteRegs(); } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 int srcCount = BuildOperandUses(op1, op1Candidates); srcCount += BuildOperandUses(op2, op2Candidates); diff --git a/src/coreclr/src/jit/lsraxarch.cpp b/src/coreclr/src/jit/lsraxarch.cpp index ac406c7bc6034..400ab3da28213 100644 --- a/src/coreclr/src/jit/lsraxarch.cpp +++ b/src/coreclr/src/jit/lsraxarch.cpp @@ -20,7 +20,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH #include "jit.h" #include "sideeffects.h" @@ -180,7 +180,7 @@ int LinearScan::BuildNode(GenTree* tree) } break; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_LONG: assert(tree->IsUnusedValue()); // Contained nodes are already processed, only unused GT_LONG can reach here. @@ -194,7 +194,7 @@ int LinearScan::BuildNode(GenTree* tree) BuildUse(tree->gtGetOp2()); break; -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) case GT_BOX: case GT_COMMA: @@ -301,7 +301,7 @@ int LinearScan::BuildNode(GenTree* tree) srcCount = 0; break; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: @@ -340,7 +340,7 @@ int LinearScan::BuildNode(GenTree* tree) srcCount = BuildModDiv(tree->AsOp()); break; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) case GT_MUL_LONG: dstCount = 2; __fallthrough; @@ -421,7 +421,7 @@ int LinearScan::BuildNode(GenTree* tree) case GT_RSZ: case GT_ROL: case GT_ROR: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case GT_LSH_HI: case GT_RSH_LO: #endif @@ -668,14 +668,14 @@ int LinearScan::BuildNode(GenTree* tree) { assert(dstCount == 1); RefPosition* internalDef = nullptr; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // On 64-bit we always need a temporary register: // - if the index is `native int` then we need to load the array // length into a register to widen it to `native int` // - if the index is `int` (or smaller) then we need to widen // it to `long` to peform the address calculation internalDef = buildInternalIntRegisterDefForNode(tree); -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT assert(!varTypeIsLong(tree->AsIndexAddr()->Index()->TypeGet())); switch (tree->AsIndexAddr()->gtElemSize) { @@ -689,7 +689,7 @@ int LinearScan::BuildNode(GenTree* tree) internalDef = buildInternalIntRegisterDefForNode(tree); break; } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT srcCount = BuildBinaryUses(tree->AsOp()); if (internalDef != nullptr) { @@ -782,7 +782,7 @@ bool LinearScan::isRMWRegOper(GenTree* tree) case GT_STORE_OBJ: case GT_SWITCH_TABLE: case GT_LOCKADD: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case GT_LONG: #endif return false; @@ -823,7 +823,7 @@ int LinearScan::BuildRMWUses(GenTreeOp* node, regMaskTP candidates) regMaskTP op1Candidates = candidates; regMaskTP op2Candidates = candidates; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsByte(node)) { regMaskTP byteCandidates = (candidates == RBM_NONE) ? allByteRegs() : (candidates & allByteRegs()); @@ -838,7 +838,7 @@ int LinearScan::BuildRMWUses(GenTreeOp* node, regMaskTP candidates) op2Candidates = byteCandidates; } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 bool prefOp1 = false; bool prefOp2 = false; @@ -961,7 +961,7 @@ int LinearScan::BuildShiftRotate(GenTree* tree) // operand size. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // The first operand of a GT_LSH_HI and GT_RSH_LO oper is a GT_LONG so that // we can have a three operand form. if (tree->OperGet() == GT_LSH_HI || tree->OperGet() == GT_RSH_LO) @@ -1061,7 +1061,7 @@ int LinearScan::BuildCall(GenTreeCall* call) // Set destination candidates for return value of the call. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) { // The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with @@ -1070,7 +1070,7 @@ int LinearScan::BuildCall(GenTreeCall* call) dstCandidates = RBM_PINVOKE_TCB; } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (hasMultiRegRetVal) { assert(retTypeDesc != nullptr); @@ -1079,12 +1079,12 @@ int LinearScan::BuildCall(GenTreeCall* call) } else if (varTypeIsFloating(registerType)) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // The return value will be on the X87 stack, and we will need to move it. dstCandidates = allRegs(registerType); -#else // !_TARGET_X86_ +#else // !TARGET_X86 dstCandidates = RBM_FLOATRET; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } else if (registerType == TYP_LONG) { @@ -1236,7 +1236,7 @@ int LinearScan::BuildCall(GenTreeCall* call) // so that epilog sequence can generate "jmp rax" to achieve fast tail call. ctrlExprCandidates = RBM_RAX; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT)) { // On x86, we need to generate a very specific pattern for indirect VSD calls: @@ -1249,7 +1249,7 @@ int LinearScan::BuildCall(GenTreeCall* call) assert(ctrlExpr->isIndir() && ctrlExpr->isContained()); ctrlExprCandidates = RBM_VIRTUAL_STUB_TARGET; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if FEATURE_VARARG // If it is a fast tail call, it is already preferenced to use RAX. @@ -1313,7 +1313,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) SetContainsAVXFlags(); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((size & 1) != 0) { // We'll need to store a byte so a byte register is needed on x86. @@ -1328,7 +1328,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) sizeRegMask = RBM_RCX; break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case GenTreeBlk::BlkOpKindHelper: dstAddrRegMask = RBM_ARG_0; srcRegMask = RBM_ARG_1; @@ -1369,7 +1369,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) if ((size % XMM_REGSIZE_BYTES) != 0) { regMaskTP regMask = allRegs(TYP_INT); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((size & 1) != 0) { // We'll need to store a byte so a byte register is needed on x86. @@ -1392,7 +1392,7 @@ int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) sizeRegMask = RBM_RCX; break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case GenTreeBlk::BlkOpKindHelper: dstAddrRegMask = RBM_ARG_0; srcRegMask = RBM_ARG_1; @@ -1484,9 +1484,9 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) const var_types fieldType = fieldNode->TypeGet(); const unsigned fieldOffset = use.GetOffset(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(fieldType != TYP_LONG); -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if defined(FEATURE_SIMD) // Note that we need to check the GT_FIELD_LIST type, not 'fieldType'. This is because the @@ -1498,7 +1498,7 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) } #endif // defined(FEATURE_SIMD) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (putArgStk->gtPutArgStkKind == GenTreePutArgStk::Kind::Push) { // We can treat as a slot any field that is stored at a slot boundary, where the previous @@ -1517,7 +1517,7 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) intTemp->registerAssignment &= allByteRegs(); } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 prevOffset = fieldOffset; } @@ -1539,7 +1539,7 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) GenTree* src = putArgStk->gtOp1; var_types type = src->TypeGet(); -#if defined(FEATURE_SIMD) && defined(_TARGET_X86_) +#if defined(FEATURE_SIMD) && defined(TARGET_X86) // For PutArgStk of a TYP_SIMD12, we need an extra register. if (putArgStk->isSIMD12()) { @@ -1549,7 +1549,7 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) buildInternalRegisterUses(); return srcCount; } -#endif // defined(FEATURE_SIMD) && defined(_TARGET_X86_) +#endif // defined(FEATURE_SIMD) && defined(TARGET_X86) if (type != TYP_STRUCT) { @@ -1573,7 +1573,7 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) { regMaskTP regMask = allRegs(TYP_INT); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((size % 2) != 0) { regMask &= ~RBM_NON_BYTE_REGS; @@ -1582,11 +1582,11 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* putArgStk) buildInternalIntRegisterDefForNode(putArgStk, regMask); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (size >= 8) -#else // !_TARGET_X86_ +#else // !TARGET_X86 if (size >= XMM_REGSIZE_BYTES) -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 { // If we have a buffer larger than or equal to XMM_REGSIZE_BYTES on x64/ux, // or larger than or equal to 8 bytes on x86, reserve an XMM register to use it for a @@ -1670,7 +1670,7 @@ int LinearScan::BuildLclHeap(GenTree* tree) // No need to initialize allocated stack space. if (sizeVal < compiler->eeGetPageSize()) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // x86 needs a register here to avoid generating "sub" on ESP. buildInternalIntRegisterDefForNode(tree); #endif @@ -1738,7 +1738,7 @@ int LinearScan::BuildModDiv(GenTree* tree) dstCandidates = RBM_RAX; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (op1->OperGet() == GT_LONG) { assert(op1->isContained()); @@ -1811,12 +1811,12 @@ int LinearScan::BuildIntrinsic(GenTree* tree) internalFloatDef = buildInternalFloatRegisterDefForNode(tree, internalFloatRegCandidates()); break; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case CORINFO_INTRINSIC_Cos: case CORINFO_INTRINSIC_Sin: NYI_X86("Math intrinsics Cos and Sin"); break; -#endif // _TARGET_X86_ +#endif // TARGET_X86 case CORINFO_INTRINSIC_Sqrt: case CORINFO_INTRINSIC_Round: @@ -1893,7 +1893,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree) // we can use the full int value. CLANG_FORMAT_COMMENT_ANCHOR; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (op1->OperGet() == GT_LONG) { assert(op1->isContained()); @@ -1921,7 +1921,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree) } buildUses = false; } -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) } break; @@ -2109,7 +2109,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree) buildInternalFloatRegisterDefForNode(simdTree); } } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // This logic is duplicated from genSIMDIntrinsicGetItem(). // When we generate code for a SIMDIntrinsicGetItem, under certain circumstances we need to // generate a movzx/movsx. On x86, these require byteable registers. So figure out which @@ -2137,7 +2137,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree) dstCandidates = allByteRegs(); } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } } break; @@ -2198,7 +2198,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree) // We need an internal register different from targetReg. setInternalRegsDelayFree = true; buildInternalFloatRegisterDefForNode(simdTree); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (simdTree->gtSIMDBaseType == TYP_LONG) { buildInternalFloatRegisterDefForNode(simdTree); @@ -2463,7 +2463,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree) { buildInternalIntRegisterDefForNode(intrinsicTree); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else if (varTypeIsByte(baseType)) { dstCandidates = allByteRegs(); @@ -2472,7 +2472,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree) break; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case NI_SSE42_Crc32: case NI_SSE42_X64_Crc32: { @@ -2489,7 +2489,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree) buildUses = false; break; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 case NI_BMI2_MultiplyNoFlags: case NI_BMI2_X64_MultiplyNoFlags: @@ -2711,7 +2711,7 @@ int LinearScan::BuildCast(GenTreeCast* cast) const var_types castType = cast->gtCastType; regMaskTP candidates = RBM_NONE; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsByte(castType)) { candidates = allByteRegs(); @@ -2793,7 +2793,7 @@ int LinearScan::BuildIndir(GenTreeIndir* indirTree) { regMaskTP srcCandidates = RBM_NONE; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Determine if we need byte regs for the non-mem source, if any. // Note that BuildShiftRotate (above) will handle the byte requirement as needed, // but STOREIND isn't itself an RMW op, so we have to explicitly set it for that case. @@ -2829,14 +2829,14 @@ int LinearScan::BuildIndir(GenTreeIndir* indirTree) GenTree* dstIndex = indirTree->Index(); CheckAndMoveRMWLastUse(index, dstIndex); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 srcCount += BuildBinaryUses(source->AsOp(), srcCandidates); } } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (varTypeIsByte(indirTree) && !source->isContained()) { BuildUse(source, allByteRegs()); @@ -2899,7 +2899,7 @@ int LinearScan::BuildMul(GenTree* tree) // This special widening 32x32->64 MUL is not used on x64 CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (tree->OperGet() != GT_MUL_LONG) #endif { @@ -2924,7 +2924,7 @@ int LinearScan::BuildMul(GenTree* tree) // upper 32 bits of the result set the destination candidate to REG_RDX. dstCandidates = RBM_RDX; } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) else if (tree->OperGet() == GT_MUL_LONG) { // have to use the encoding:RDX:RAX = RAX * rm @@ -2967,4 +2967,4 @@ void LinearScan::SetContainsAVXFlags(unsigned sizeOfSIMDVector /* = 0*/) } } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp index fae4b25ef09e5..aed99029b4917 100644 --- a/src/coreclr/src/jit/morph.cpp +++ b/src/coreclr/src/jit/morph.cpp @@ -85,7 +85,7 @@ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall: tree->AsCall()->gtEntryPoint.accessType = IAT_VALUE; #endif -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (varTypeIsLong(tree)) { GenTreeCall* callNode = tree->AsCall(); @@ -94,7 +94,7 @@ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall: retTypeDesc->InitializeLongReturnType(this); callNode->ClearOtherRegs(); } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT if (tree->OperMayThrow(this)) { @@ -156,15 +156,15 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { if (srcType == TYP_FLOAT -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // Arm64: src = float, dst is overflow conversion. // This goes through helper and hence src needs to be converted to double. && tree->gtOverflow() -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // Amd64: src = float, dst = uint64 or overflow conversion. // This goes through helper and hence src needs to be converted to double. && (tree->gtOverflow() || (dstType == TYP_ULONG)) -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Arm: src = float, dst = int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType)) #else @@ -179,7 +179,7 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) // do we need to do it in two steps R -> I, '-> smallType CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_ARM64_) || defined(_TARGET_AMD64_) +#if defined(TARGET_ARM64) || defined(TARGET_AMD64) if (dstSize < genTypeSize(TYP_INT)) { oper = gtNewCastNodeL(TYP_INT, oper, tree->IsUnsigned(), TYP_INT); @@ -198,7 +198,7 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) /* Note that if we need to use a helper call then we can not morph oper */ if (!tree->gtOverflow()) { -#ifdef _TARGET_ARM64_ // On ARM64 All non-overflow checking conversions can be optimized +#ifdef TARGET_ARM64 // On ARM64 All non-overflow checking conversions can be optimized goto OPTIMIZECAST; #else switch (dstType) @@ -207,26 +207,26 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) goto OPTIMIZECAST; case TYP_UINT: -#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_) +#if defined(TARGET_ARM) || defined(TARGET_AMD64) goto OPTIMIZECAST; -#else // _TARGET_X86_ +#else // TARGET_X86 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper); -#endif // _TARGET_X86_ +#endif // TARGET_X86 case TYP_LONG: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // SSE2 has instructions to convert a float/double directly to a long goto OPTIMIZECAST; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); default: break; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } else { @@ -247,7 +247,7 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) noway_assert(!"Unexpected dstType"); } } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT // The code generation phase (for x86 & ARM32) does not handle casts // directly from [u]long to anything other than [u]int. Insert an // intermediate cast to native int. @@ -257,9 +257,9 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; } -#endif //!_TARGET_64BIT_ +#endif //! TARGET_64BIT -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && (oper->gtOper == GT_CAST) && !varTypeIsLong(oper->AsCast()->CastOp())) { @@ -290,9 +290,9 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } -#endif //_TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Do we have to do two step U4/8 -> R4/8 ? // Codegen supports the following conversion as one-step operation // a) Long -> R4/R8 @@ -327,9 +327,9 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) tree->gtFlags &= ~GTF_UNSIGNED; } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Do we have to do two step U4/8 -> R4/8 ? else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { @@ -351,7 +351,7 @@ GenTree* Compiler::fgMorphCast(GenTree* tree) { return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } -#endif //_TARGET_X86_ +#endif // TARGET_X86 else if (varTypeIsGC(srcType) != varTypeIsGC(dstType)) { // We are casting away GC information. we would like to just @@ -1398,7 +1398,7 @@ void fgArgInfo::ArgsComplete() { prevArgTabEntry->needPlace = true; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #endif } } @@ -1408,7 +1408,7 @@ void fgArgInfo::ArgsComplete() // with multiple indirections, so here we consider spilling it into a tmp LclVar. // CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM bool isMultiRegArg = (curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->numSlots > 1); #else bool isMultiRegArg = (curArgTabEntry->numRegs > 1); @@ -1433,7 +1433,7 @@ void fgArgInfo::ArgsComplete() curArgTabEntry->needTmp = true; needsTemps = true; } -#if defined(FEATURE_SIMD) && defined(_TARGET_ARM64_) +#if defined(FEATURE_SIMD) && defined(TARGET_ARM64) else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet())) { // SIMD types do not need the optimization below due to their sizes @@ -1446,7 +1446,7 @@ void fgArgInfo::ArgsComplete() } } #endif -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM // TODO-Arm: This optimization is not implemented for ARM32 // so we skip this for ARM32 until it is ported to use RyuJIT backend // @@ -1492,7 +1492,7 @@ void fgArgInfo::ArgsComplete() break; } } -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM } } #endif // FEATURE_MULTIREG_ARGS @@ -1925,7 +1925,7 @@ GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) if (varTypeIsStruct(type)) { -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_ARM_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // Can this type be passed as a primitive type? // If so, the following call will return the corresponding primitive type. @@ -1977,7 +1977,7 @@ GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) addrNode = arg; #if FEATURE_MULTIREG_ARGS -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 assert(varTypeIsStruct(type)); if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg())) { @@ -1993,11 +1993,11 @@ GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) #else // Always create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 #endif // FEATURE_MULTIREG_ARGS } -#else // not (_TARGET_AMD64_ or _TARGET_ARM64_ or _TARGET_ARM_) +#else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) // other targets, we pass the struct by value assert(varTypeIsStruct(type)); @@ -2008,7 +2008,7 @@ GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) // gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode); -#endif // not (_TARGET_AMD64_ or _TARGET_ARM64_ or _TARGET_ARM_) +#endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) } // (varTypeIsStruct(type)) @@ -2082,7 +2082,7 @@ void fgArgInfo::EvalArgsToTemps() } #endif -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) noway_assert(argx->gtType != TYP_STRUCT); #endif @@ -2137,7 +2137,7 @@ void fgArgInfo::EvalArgsToTemps() if (setupArg->OperIsCopyBlkOp()) { setupArg = compiler->fgMorphCopyBlock(setupArg); -#if defined(_TARGET_ARMARCH_) || defined(UNIX_AMD64_ABI) +#if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) if (lclVarType == TYP_STRUCT) { // This scalar LclVar widening step is only performed for ARM architectures. @@ -2148,7 +2148,7 @@ void fgArgInfo::EvalArgsToTemps() scalarType = compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg()); } -#endif // _TARGET_ARMARCH_ || defined (UNIX_AMD64_ABI) +#endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) } // scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 => @@ -2167,7 +2167,7 @@ void fgArgInfo::EvalArgsToTemps() curArgTabEntry->isTmp = true; curArgTabEntry->tmpNum = tmpVarNum; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Previously we might have thought the local was promoted, and thus the 'COPYBLK' // might have left holes in the used registers (see // fgAddSkippedRegsInPromotedStructArg). @@ -2182,7 +2182,7 @@ void fgArgInfo::EvalArgsToTemps() allUsedRegs |= genRegMask(argReg); } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } /* mark the assignment as a late argument */ @@ -2225,13 +2225,13 @@ void fgArgInfo::EvalArgsToTemps() // For a struct type we also need to record the class handle of the arg. CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // All structs are either passed (and retyped) as integral types, OR they // are passed by reference. noway_assert(argx->gtType != TYP_STRUCT); -#else // !defined(_TARGET_AMD64_) || defined(UNIX_AMD64_ABI) +#else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) if (defArg->TypeGet() == TYP_STRUCT) { @@ -2239,7 +2239,7 @@ void fgArgInfo::EvalArgsToTemps() noway_assert(clsHnd != NO_CLASS_HANDLE); } -#endif // !(defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) +#endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd); @@ -2422,12 +2422,12 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) bool callHasRetBuffArg = call->HasRetBufArg(); bool callIsVararg = call->IsVarargs(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM regMaskTP argSkippedRegMask = RBM_NONE; regMaskTP fltArgSkippedRegMask = RBM_NONE; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated #else const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number @@ -2443,7 +2443,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) // At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined. assert(call->gtCallLateArgs == nullptr); -#ifdef _TARGET_UNIX_ +#ifdef TARGET_UNIX if (callIsVararg) { // Currently native varargs is not implemented on non windows targets. @@ -2453,7 +2453,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) // such as amd64 Unix, which just expects RAX to pass numFPArguments. NYI("Morphing Vararg call not yet implemented on non Windows targets."); } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX // Data structure for keeping track of non-standard args. Non-standard args are those that are not passed // following the normal calling convention or in the normal argument registers. We either mark existing @@ -2584,7 +2584,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) // *********** END NOTE ********* CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) // The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention. // Set the argument registers correctly here. if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME)) @@ -2594,8 +2594,8 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME); } -#endif // defined(_TARGET_X86_) || defined(_TARGET_ARM_) -#if defined(_TARGET_ARM_) +#endif // defined(TARGET_X86) || defined(TARGET_ARM) +#if defined(TARGET_ARM) // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper // delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing // R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs @@ -2631,8 +2631,8 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) numArgs++; nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg()); } -#endif // defined(_TARGET_ARM_) -#if defined(_TARGET_X86_) +#endif // defined(TARGET_ARM) +#if defined(TARGET_X86) // The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the // hi part to be in EDX. This sets the argument registers up correctly. else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) || @@ -2648,7 +2648,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) assert(arg2 != nullptr); nonStandardArgs.Add(arg2, REG_LNGARG_HI); } -#else // !_TARGET_X86_ +#else // !TARGET_X86 // TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed. // If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling // convention for x86/SSE. @@ -2707,7 +2707,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) } } else -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr)) { assert(!call->IsUnmanaged()); @@ -2716,17 +2716,17 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) noway_assert(arg != nullptr); call->gtCallCookie = nullptr; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // x86 passes the cookie on the stack as the final argument to the call. GenTreeCall::Use** insertionPoint = &call->gtCallArgs; for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef())) { } *insertionPoint = gtNewCallArgs(arg); -#else // !defined(_TARGET_X86_) +#else // !defined(TARGET_X86) // All other architectures pass the cookie in a register. call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM); numArgs++; @@ -2742,7 +2742,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) call->gtCallType = CT_HELPER; call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI); } -#if defined(FEATURE_READYTORUN_COMPILER) && defined(_TARGET_ARMARCH_) +#if defined(FEATURE_READYTORUN_COMPILER) && defined(TARGET_ARMARCH) // For arm, we dispatch code same as VSD using virtualStubParamInfo->GetReg() // for indirection cell address, which ZapIndirectHelperThunk expects. if (call->IsR2RRelativeIndir()) @@ -2760,7 +2760,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum()); } -#endif // FEATURE_READYTORUN_COMPILER && _TARGET_ARMARCH_ +#endif // FEATURE_READYTORUN_COMPILER && TARGET_ARMARCH // Allocate the fgArgInfo for the call node; // @@ -2789,7 +2789,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) argSlots++; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Compute the maximum number of arguments that can be passed in registers. // For X86 we handle the varargs and unmanaged calling conventions @@ -2825,12 +2825,12 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) if (callHasRetBuffArg) maxRegArgs++; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* Morph the user arguments */ CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // The ARM ABI has a concept of back-filling of floating-point argument registers, according // to the "Procedure Call Standard for the ARM Architecture" document, especially @@ -2867,7 +2867,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) bool anyFloatStackArgs = false; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef UNIX_AMD64_ABI SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; @@ -2906,10 +2906,10 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) hfaType = GetHfaType(argx); isHfaArg = varTypeIsValidHfaType(hfaType); -#if defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) // Make sure for vararg methods isHfaArg is not true. isHfaArg = callIsVararg ? false : isHfaArg; -#endif // defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) +#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) if (isHfaArg) { @@ -2925,7 +2925,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) } #endif // FEATURE_HFA -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeIsFloating(argx)) && !opts.compUseSoftFP; bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG); @@ -2960,16 +2960,16 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) } } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) assert(!callIsVararg || !isHfaArg); passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeIsFloating(argx)); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) passUsingFloatRegs = varTypeIsFloating(argx); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) passUsingFloatRegs = false; @@ -3022,7 +3022,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) assert(structSize == info.compCompHnd->getClassSize(objClass)); } } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI if (!isStructArg) { @@ -3036,7 +3036,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) #else // !UNIX_AMD64_ABI size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot' #endif // UNIX_AMD64_ABI -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (isStructArg) { if (isHfaArg) @@ -3066,7 +3066,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) { size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot' } -#elif defined(_TARGET_ARM_) || defined(_TARGET_X86_) +#elif defined(TARGET_ARM) || defined(TARGET_X86) if (isStructArg) { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; @@ -3104,13 +3104,13 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) { // For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register. // For ARM or AMD64/Windows only power-of-2 structs are passed in registers. -#if !defined(_TARGET_ARM64_) && !defined(UNIX_AMD64_ABI) +#if !defined(TARGET_ARM64) && !defined(UNIX_AMD64_ABI) if (!isPow2(originalSize)) -#endif // !_TARGET_ARM64_ && !UNIX_AMD64_ABI +#endif // !TARGET_ARM64 && !UNIX_AMD64_ABI { passedInRegisters = true; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct, // or for a struct of two floats. This causes the struct to be address-taken. if (structBaseType == TYP_DOUBLE) @@ -3118,7 +3118,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) size = 2; } else -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { size = 1; } @@ -3142,7 +3142,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) #endif ) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (passUsingFloatRegs) { // First, see if it can be back-filled @@ -3172,7 +3172,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) { isRegArg = intArgRegNum < MAX_REG_ARG; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (passUsingFloatRegs) { // Check if the last register needed is still in the fp argument register range. @@ -3205,7 +3205,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) // if (!isRegArg && (size > 1)) { -#if defined(_TARGET_WINDOWS_) +#if defined(TARGET_WINDOWS) // Arm64 windows native varargs allows splitting a 16 byte struct between stack // and the last general purpose register. if (callIsVararg) @@ -3214,7 +3214,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) isRegArg = isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs; } else -#endif // defined(_TARGET_WINDOWS_) +#endif // defined(TARGET_WINDOWS) { // We also must update intArgRegNum so that we no longer try to // allocate any new general purpose registers for args @@ -3223,7 +3223,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) } } } -#else // not _TARGET_ARM_ or _TARGET_ARM64_ +#else // not TARGET_ARM or TARGET_ARM64 #if defined(UNIX_AMD64_ABI) @@ -3262,7 +3262,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) #else // !defined(UNIX_AMD64_ABI) isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; #endif // !defined(UNIX_AMD64_ABI) -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } else { @@ -3279,7 +3279,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) { isRegArg = (nonStdRegNum != REG_STK); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) else if (call->IsTailCallViaHelper()) { // We have already (before calling fgMorphArgs()) appended the 4 special args @@ -3291,12 +3291,12 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) isRegArg = false; } } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) // Now we know if the argument goes in registers or not and how big it is. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // If we ever allocate a floating point argument to the stack, then all // subsequent HFA/float/double arguments go on the stack. if (!isRegArg && passUsingFloatRegs) @@ -3322,7 +3322,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM // Now create the fgArgTabEntry. fgArgTabEntry* newArgEntry; @@ -3368,7 +3368,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) : genMapIntRegArgNumToRegNum(intArgRegNum); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI assert(size == 1); #endif @@ -3668,7 +3668,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) else // This is passed by value. { -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // Check to see if we can transform this into load of a primitive type. // 'size' must be the number of pointer sized items assert(size == roundupSize / TARGET_POINTER_SIZE); @@ -3692,7 +3692,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType()))); } -#if defined(_TARGET_ARM64_) || defined(UNIX_AMD64_ABI) +#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can // only transform in that case if the arg is a local. // TODO-CQ: This transformation should be applicable in general, not just for the ARM64 @@ -3702,12 +3702,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) canTransform = (lclVar != nullptr); passingSize = genTypeSize(structBaseType); } -#endif // _TARGET_ARM64_ || UNIX_AMD64_ABI +#endif // TARGET_ARM64 || UNIX_AMD64_ABI } if (!canTransform) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #ifndef UNIX_AMD64_ABI // On Windows structs are always copied and passed by reference (handled above) unless they are // passed by value in a single register. @@ -3751,14 +3751,14 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) } } #endif // UNIX_AMD64_ABI -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if ((passingSize != structSize) && (lclVar == nullptr)) { copyBlkClass = objClass; } #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // TODO-1stClassStructs: Unify these conditions across targets. if (((lclVar != nullptr) && (lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) || @@ -3771,7 +3771,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) { copyBlkClass = objClass; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } else { @@ -3780,7 +3780,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) // Change our argument, as needed, into a value of the appropriate type. CLANG_FORMAT_COMMENT_ANCHOR; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM assert((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2))); #else assert((size == 1) || @@ -3872,7 +3872,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) assert(varTypeIsEnregisterable(argObj->TypeGet()) || ((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType))); } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 #ifndef UNIX_AMD64_ABI // We still have a struct unless we converted the GT_OBJ into a GT_IND above... @@ -3927,7 +3927,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) // 'Lower' the MKREFANY tree and insert it. noway_assert(!reMorphing); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Build the mkrefany as a GT_FIELD_LIST GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF); @@ -3935,7 +3935,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); -#else // !_TARGET_X86_ +#else // !TARGET_X86 // Get a new temp // Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany @@ -3961,7 +3961,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) // EvalArgsToTemps will cause tmp to actually get loaded as the argument call->fgArgInfo->EvalToTmp(argEntry, tmp, asg); lvaSetVarAddrExposed(tmp); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } #if FEATURE_MULTIREG_ARGS @@ -3972,7 +3972,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) hasMultiregStructArgs = true; } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE)) { assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2)); @@ -3986,7 +3986,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) } #endif -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (isStructArg) { GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx); @@ -4024,7 +4024,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) } } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 flagsSummary |= args->GetNode()->gtFlags; @@ -4130,10 +4130,10 @@ void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call) unsigned flagsSummary = 0; fgArgInfo* allArgInfo = call->fgArgInfo; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert(!"Logic error: no MultiregStructArgs for X86"); #endif -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI"); #endif @@ -4260,11 +4260,11 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry { assert(varTypeIsStruct(arg->TypeGet())); -#if !defined(_TARGET_ARMARCH_) && !defined(UNIX_AMD64_ABI) +#if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) NYI("fgMorphMultiregStructArg requires implementation for this target"); #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if ((fgEntryPtr->IsSplit() && fgEntryPtr->numSlots + fgEntryPtr->numRegs > 4) || (!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK)) #else @@ -4365,9 +4365,9 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry hfaType = fgEntryPtr->GetHfaType(); if (varTypeIsValidHfaType(hfaType) -#if !defined(_HOST_UNIX_) && defined(_TARGET_ARM64_) +#if !defined(HOST_UNIX) && defined(TARGET_ARM64) && !fgEntryPtr->IsVararg() -#endif // !defined(_HOST_UNIX_) && defined(_TARGET_ARM64_) +#endif // !defined(HOST_UNIX) && defined(TARGET_ARM64) ) { elemType = hfaType; @@ -4431,11 +4431,11 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry case 2: type[lastElem] = TYP_SHORT; break; -#if defined(_TARGET_ARM64_) || defined(UNIX_AMD64_ABI) +#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) case 4: type[lastElem] = TYP_INT; break; -#endif // (_TARGET_ARM64_) || (UNIX_AMD64_ABI) +#endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) default: noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg"); break; @@ -4477,17 +4477,17 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry #ifndef UNIX_AMD64_ABI // This local variable must match the layout of the 'objClass' type exactly if (varDsc->lvIsHfa() -#if !defined(_HOST_UNIX_) && defined(_TARGET_ARM64_) +#if !defined(HOST_UNIX) && defined(TARGET_ARM64) && !fgEntryPtr->IsVararg() -#endif // !defined(_HOST_UNIX_) && defined(_TARGET_ARM64_) +#endif // !defined(HOST_UNIX) && defined(TARGET_ARM64) ) { // We have a HFA struct. // Note that GetHfaType may not be the same as elemType, since TYP_SIMD8 is handled the same as TYP_DOUBLE. var_types useElemType = elemType; -#if defined(_TARGET_ARM64_) & defined(FEATURE_SIMD) +#if defined(TARGET_ARM64) & defined(FEATURE_SIMD) useElemType = (elemType == TYP_SIMD8) ? TYP_DOUBLE : useElemType; -#endif // _TARGET_ARM64_ && FEATURE_SIMD +#endif // TARGET_ARM64 && FEATURE_SIMD noway_assert(useElemType == varDsc->GetHfaType()); noway_assert(elemSize == genTypeSize(elemType)); noway_assert(elemCount == (varDsc->lvExactSize / elemSize)); @@ -4500,10 +4500,10 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry } else { -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // We must have a 16-byte struct (non-HFA) noway_assert(elemCount == 2); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) noway_assert(elemCount <= 4); #endif @@ -4528,13 +4528,13 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry } #endif // !UNIX_AMD64_ABI -#if defined(_TARGET_ARM64_) || defined(UNIX_AMD64_ABI) +#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // Is this LclVar a promoted struct with exactly 2 fields? // TODO-ARM64-CQ: Support struct promoted HFA types here if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && (!varDsc->lvIsHfa() -#if !defined(_HOST_UNIX_) && defined(_TARGET_ARM64_) +#if !defined(HOST_UNIX) && defined(TARGET_ARM64) && !fgEntryPtr->IsVararg() -#endif // !defined(_HOST_UNIX_) && defined(_TARGET_ARM64_) +#endif // !defined(HOST_UNIX) && defined(TARGET_ARM64) )) { // See if we have two promoted fields that start at offset 0 and 8? @@ -4583,7 +4583,7 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DNER_LocalField)); } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Is this LclVar a promoted struct with exactly same size? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa()) { @@ -4640,7 +4640,7 @@ GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntry // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DNER_LocalField)); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } // If we didn't set newarg to a new List Node tree @@ -4979,7 +4979,7 @@ void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, return; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // See declaration for specification comment. void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, @@ -5019,7 +5019,7 @@ void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM //**************************************************************************** // fgFixupStructReturn: @@ -5494,7 +5494,7 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) // Next introduce a GT_ARR_BOUNDS_CHECK node var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check. -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case, // the comparison will have to be widen to 64 bits. @@ -5502,7 +5502,7 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) { bndsChkType = TYP_I_IMPL; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB); @@ -5527,7 +5527,7 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) GenTree* addr; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Widen 'index' on 64-bit targets if (index->TypeGet() != TYP_I_IMPL) { @@ -5540,7 +5540,7 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) index = gtNewCastNode(TYP_I_IMPL, index, false, TYP_I_IMPL); } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT /* Scale the index value if necessary */ if (elemSize > 1) @@ -5735,7 +5735,7 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) return tree; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /***************************************************************************** * * Wrap fixed stack arguments for varargs functions to go through varargs @@ -5803,7 +5803,7 @@ GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph) tree->gtFlags |= GTF_GLOB_REF; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0); @@ -5816,7 +5816,7 @@ GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph) return newTree; } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* If not during the global morphing phase bail */ @@ -6320,7 +6320,7 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) if (pFldAddr == nullptr) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (IMAGE_REL_BASED_REL32 != eeGetRelocTypeHint(fldAddr)) { // The address is not directly addressible, so force it into a @@ -6344,7 +6344,7 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) return fgMorphSmpOp(tree); } else -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { // Only volatile or classinit could be set, and they map over noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0); @@ -6839,13 +6839,13 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) // method. This is due to the ABI differences for native vararg methods for these platforms. There is // work required to shuffle arguments to the correct locations. -#if (defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM_)) || (defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_)) +#if (defined(TARGET_WINDOWS) && defined(TARGET_ARM)) || (defined(TARGET_WINDOWS) && defined(TARGET_ARM64)) if (info.compIsVarArgs || callee->IsVarargs()) { reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64"); return false; } -#endif // (defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM_)) || defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_)) +#endif // (defined(TARGET_WINDOWS) && defined(TARGET_ARM)) || defined(TARGET_WINDOWS) && defined(TARGET_ARM64)) if (callee->HasRetBufArg()) // RetBuf { @@ -6963,7 +6963,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) return nullptr; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Needed for Jit64 compat. // In future, enabling tail calls from methods that need GS cookie check // would require codegen side work to emit GS cookie check before a tail @@ -7120,12 +7120,12 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) return nullptr; } -#if defined(_TARGET_ARM64_) || defined(_TARGET_UNIX_) +#if defined(TARGET_ARM64) || defined(TARGET_UNIX) // NYI - TAILCALL_RECURSIVE/TAILCALL_HELPER. // So, bail out if we can't make fast tail call. failTailCall(failReason); return nullptr; -#elif !defined(_TARGET_X86_) +#elif !defined(TARGET_X86) // Ok, now we are _almost_ there. Since this needs helper make sure we // can get the required copy thunk. CorInfoHelperTailCallSpecialHandling handling = CORINFO_TAILCALL_NORMAL; @@ -7401,7 +7401,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) */ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) noway_assert(!"Slow tail calls not supported on non-Windows platforms."); #endif @@ -7416,7 +7416,7 @@ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) { call->gtFlags |= GTF_CALL_NULLCHECK; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // If we already inited arg info here then we will have added the VSD // arg on AMD64. So we remove it here as we will handle this case // specially below. @@ -7453,7 +7453,7 @@ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) #endif } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // For the helper-assisted tail calls, we need to push all the arguments // into a single list, and then add a few extra at the beginning @@ -7591,7 +7591,7 @@ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) call->gtCallMoreFlags |= GTF_CALL_M_VARARGS | GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_HELPER; call->gtFlags &= ~GTF_CALL_POP_ARGS; -#elif defined(_TARGET_XARCH_) +#elif defined(TARGET_XARCH) // For the helper-assisted tail calls, we need to push all the arguments // into a single list, and then add a few extra at the beginning or end. @@ -7674,7 +7674,7 @@ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) GenTree* objp = call->gtCallThisArg->GetNode(); call->gtCallThisArg = nullptr; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->IsLocal()) { // tmp = "this" @@ -7688,7 +7688,7 @@ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) objp = thisPtr; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (call->NeedsNullCheck()) { @@ -7735,7 +7735,7 @@ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Add the extra VSD parameter to arg list in case of VSD calls. // Tail call arg copying thunk will move this extra VSD parameter @@ -7763,7 +7763,7 @@ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) arg = gtNewIconHandleNode(ssize_t(pfnCopyArgs), GTF_ICON_FTN_ADDR); call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will // append to the list. @@ -7798,15 +7798,15 @@ void Compiler::fgMorphTailCallViaHelper(GenTreeCall* call, void* pfnCopyArgs) GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg0); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 // It is now a varargs tail call dispatched via helper. call->gtCallMoreFlags |= GTF_CALL_M_VARARGS | GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_HELPER; call->gtFlags &= ~GTF_CALL_POP_ARGS; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) NYI_ARM64("Tail calls via stub are unsupported on this platform."); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 // The function is responsible for doing explicit null check when it is necessary. assert(!call->NeedsNullCheck()); @@ -8563,7 +8563,7 @@ GenTree* Compiler::fgMorphLeaf(GenTree* tree) tree->gtFlags |= GTF_GLOB_REF; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(), @@ -8577,7 +8577,7 @@ GenTree* Compiler::fgMorphLeaf(GenTree* tree) return newTree; } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } else if (tree->gtOper == GT_FTN_ADDR) { @@ -8776,11 +8776,11 @@ GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) asgType = TYP_SHORT; break; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case 4: asgType = TYP_INT; break; -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT } } } @@ -9959,7 +9959,7 @@ GenTree* Compiler::fgMorphCopyBlock(GenTree* tree) requiresCopyBlock = true; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if ((rhs->OperIsIndir()) && (rhs->gtFlags & GTF_IND_UNALIGNED)) { JITDUMP(" rhs is unaligned"); @@ -9971,7 +9971,7 @@ GenTree* Compiler::fgMorphCopyBlock(GenTree* tree) JITDUMP(" asg is unaligned"); requiresCopyBlock = true; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM // Can't use field by field assignment if the src is a call. if (rhs->OperGet() == GT_CALL) @@ -10974,7 +10974,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) case GT_MUL: -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (typ == TYP_LONG) { /* For (long)int1 * (long)int2, we dont actually do the @@ -11091,7 +11091,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) assert(tree->gtIsValid64RsltMul()); } } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT break; case GT_DIV: @@ -11109,7 +11109,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) op2->AsDblCon()->gtDconVal = 1.0 / divisor; } } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_LDIV; @@ -11123,7 +11123,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) goto USE_HELPER_FOR_ARITH; } #endif -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT if (op2->gtOper == GT_CAST && op2->AsOp()->gtOp1->IsCnsIntOrI()) { @@ -11133,7 +11133,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) case GT_UDIV: -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_ULDIV; @@ -11146,7 +11146,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) goto USE_HELPER_FOR_ARITH; } #endif -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT break; case GT_MOD: @@ -11181,9 +11181,9 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) case GT_UMOD: -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // -// Note for _TARGET_ARMARCH_ we don't have a remainder instruction, so we don't do this optimization +// Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // _TARGET_XARCH /* If this is an unsigned long mod with op2 which is a cast to long from a @@ -11245,7 +11245,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) } } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD; @@ -11267,14 +11267,14 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) } } #endif -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT if (op2->gtOper == GT_CAST && op2->AsOp()->gtOp1->IsCnsIntOrI()) { op2 = gtFoldExprConst(op2); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // For ARM64 we don't have a remainder instruction, // The architecture manual suggests the following transformation to // generate code for such operator: @@ -11303,7 +11303,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) op2 = tree->AsOp()->gtOp2; } } -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 // If b is not a power of 2 constant then lowering replaces a % b // with a - (a / b) * b and applies magic division optimization to // a / b. The code may already contain an a / b expression (e.g. @@ -11325,7 +11325,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) op2 = tree->AsOp()->gtOp2; } } -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 break; USE_HELPER_FOR_ARITH: @@ -11410,7 +11410,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) case GT_RUNTIMELOOKUP: return fgMorphTree(op1); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicId == CORINFO_INTRINSIC_Round) { @@ -11984,10 +11984,10 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) } cns2->AsIntCon()->gtIconVal = ival2; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // we need to properly re-sign-extend or truncate as needed. cns2->AsIntCon()->TruncateOrSignExtend32(); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT op1 = tree->AsOp()->gtOp1 = op1->AsOp()->gtOp1; } @@ -12416,14 +12416,14 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) case GT_MUL: -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (typ == TYP_LONG) { // This must be GTF_MUL_64RSLT assert(tree->gtIsValid64RsltMul()); return tree; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT goto CM_OVF_OP; case GT_SUB: @@ -12474,7 +12474,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) } break; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case GT_DIV: if (!varTypeIsFloating(tree->gtType)) { @@ -12549,13 +12549,13 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) cns1 = op1->AsOp()->gtOp2; cns2 = op2->AsOp()->gtOp2; cns1->AsIntCon()->gtIconVal += cns2->AsIntCon()->gtIconVal; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (cns1->TypeGet() == TYP_INT) { // we need to properly re-sign-extend or truncate after adding two int constants above cns1->AsIntCon()->TruncateOrSignExtend32(); } -#endif //_TARGET_64BIT_ +#endif // TARGET_64BIT tree->AsOp()->gtOp2 = cns1; DEBUG_DESTROY_NODE(cns2); @@ -12585,13 +12585,13 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) cns1->AsIntConCommon()->SetIconValue(cns1->AsIntConCommon()->IconValue() + op2->AsIntConCommon()->IconValue()); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (cns1->TypeGet() == TYP_INT) { // we need to properly re-sign-extend or truncate after adding two int constants above cns1->AsIntCon()->TruncateOrSignExtend32(); } -#endif //_TARGET_64BIT_ +#endif // TARGET_64BIT if (cns1->OperGet() == GT_CNS_INT) { @@ -12655,9 +12655,9 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) /* See if we can fold GT_MUL by const nodes */ else if (oper == GT_MUL && op2->IsCnsIntOrI() && !optValnumCSE_phase) { -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT noway_assert(typ <= TYP_UINT); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT noway_assert(!tree->gtOverflow()); ssize_t mult = op2->AsIntConCommon()->IconValue(); @@ -12974,7 +12974,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) break; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Check for a LclVar TYP_STRUCT with misalignment on a Floating Point field // if (varTypeIsFloating(typ)) @@ -13756,13 +13756,13 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) op1->ChangeOper(GT_MUL); add->AsIntCon()->gtIconVal = imul; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (add->gtType == TYP_INT) { // we need to properly re-sign-extend or truncate after multiplying two int constants above add->AsIntCon()->TruncateOrSignExtend32(); } -#endif //_TARGET_64BIT_ +#endif // TARGET_64BIT } } @@ -13800,12 +13800,12 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) tree->ChangeOper(GT_ADD); ssize_t result = iadd << ishf; op2->AsIntConCommon()->SetIconValue(result); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (op1->gtType == TYP_INT) { op2->AsIntCon()->TruncateOrSignExtend32(); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT // we are reusing the shift amount node here, but the type we want is that of the shift result op2->gtType = op1->gtType; @@ -14124,7 +14124,7 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) // M & (N - 1) == N - 1 CLANG_FORMAT_COMMENT_ANCHOR; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64)) { // TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86. @@ -15680,13 +15680,13 @@ void Compiler::fgMorphBlocks() /* We'll jump to the genReturnBB */ CLANG_FORMAT_COMMENT_ANCHOR; -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) if (info.compFlags & CORINFO_FLG_SYNCH) { fgConvertSyncReturnToLeave(block); } else -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 { block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = genReturnBB; @@ -15806,12 +15806,12 @@ void Compiler::fgSetOptions() codeGen->setFramePointerRequired(true); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (compTailCallUsed) codeGen->setFramePointerRequired(true); -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (!opts.genFPopt) { @@ -15825,7 +15825,7 @@ void Compiler::fgSetOptions() // table pointer compHndBBtab is unreliable. assert(compHndBBtabAllocCount >= info.compXcptnsCount); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Note: this case, and the !X86 case below, should both use the // !X86 path. This would require a few more changes for X86 to use @@ -15841,14 +15841,14 @@ void Compiler::fgSetOptions() codeGen->setFramePointerRequiredEH(true); } -#else // !_TARGET_X86_ +#else // !TARGET_X86 if (compHndBBtabCount > 0) { codeGen->setFramePointerRequiredEH(true); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifdef UNIX_X86_ABI if (info.compXcptnsCount > 0) @@ -16638,7 +16638,7 @@ void Compiler::fgPromoteStructs() } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (structPromotionHelper->GetRequiresScratchVar()) { // Ensure that the scratch variable is allocated, in case we @@ -16650,7 +16650,7 @@ void Compiler::fgPromoteStructs() lvaTable[lvaPromotedStructAssemblyScratchVar].lvType = TYP_I_IMPL; } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef DEBUG if (verbose) @@ -16846,7 +16846,7 @@ void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) } if (fldOffset != BAD_VAR_NUM && genTypeSize(fldVarDsc->TypeGet()) == genTypeSize(tree->gtType) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 && varTypeIsFloating(fldVarDsc->TypeGet()) == varTypeIsFloating(tree->gtType) #endif ) @@ -16898,7 +16898,7 @@ void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) void Compiler::fgResetImplicitByRefRefCount() { -#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_) +#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { @@ -16920,7 +16920,7 @@ void Compiler::fgResetImplicitByRefRefCount() } } -#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_ +#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ @@ -16934,7 +16934,7 @@ void Compiler::fgResetImplicitByRefRefCount() void Compiler::fgRetypeImplicitByRefArgs() { -#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_) +#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { @@ -17111,7 +17111,7 @@ void Compiler::fgRetypeImplicitByRefArgs() } } -#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_ +#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ @@ -17122,7 +17122,7 @@ void Compiler::fgRetypeImplicitByRefArgs() void Compiler::fgMarkDemotedImplicitByRefArgs() { -#if (defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_) +#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { @@ -17186,7 +17186,7 @@ void Compiler::fgMarkDemotedImplicitByRefArgs() } } -#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_ +#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } /***************************************************************************** @@ -17196,11 +17196,11 @@ void Compiler::fgMarkDemotedImplicitByRefArgs() */ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) { -#if (!defined(_TARGET_AMD64_) || defined(UNIX_AMD64_ABI)) && !defined(_TARGET_ARM64_) +#if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) return false; -#else // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_ +#else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 bool changed = false; @@ -17234,7 +17234,7 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) } return changed; -#endif // (_TARGET_AMD64_ && !UNIX_AMD64_ABI) || _TARGET_ARM64_ +#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr) @@ -17645,7 +17645,7 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* #endif // FEATURE_SIMD -#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_) +#if !defined(FEATURE_CORECLR) && defined(TARGET_AMD64) Statement* SkipNopStmts(Statement* stmt) { while ((stmt != nullptr) && !stmt->IsNothingNode()) @@ -17655,7 +17655,7 @@ Statement* SkipNopStmts(Statement* stmt) return stmt; } -#endif // !FEATURE_CORECLR && _TARGET_AMD64_ +#endif // !FEATURE_CORECLR && TARGET_AMD64 //------------------------------------------------------------------------ // fgCheckStmtAfterTailCall: check that statements after the tail call stmt @@ -17677,7 +17677,7 @@ bool Compiler::fgCheckStmtAfterTailCall() Statement* nextMorphStmt = callStmt->GetNextStmt(); -#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_) +#if !defined(FEATURE_CORECLR) && defined(TARGET_AMD64) // Legacy Jit64 Compat: // There could be any number of GT_NOPs between tail call and GT_RETURN. // That is tail call pattern could be one of the following: @@ -17723,7 +17723,7 @@ bool Compiler::fgCheckStmtAfterTailCall() // Next skip any GT_NOP nodes after the pop nextMorphStmt = SkipNopStmts(nextMorphStmt); } -#endif // !FEATURE_CORECLR && _TARGET_AMD64_ +#endif // !FEATURE_CORECLR && TARGET_AMD64 // Check that the rest stmts in the block are in one of the following pattern: // 1) ret(void) diff --git a/src/coreclr/src/jit/namedintrinsiclist.h b/src/coreclr/src/jit/namedintrinsiclist.h index 77af04583715b..0caf2fb0e1d60 100644 --- a/src/coreclr/src/jit/namedintrinsiclist.h +++ b/src/coreclr/src/jit/namedintrinsiclist.h @@ -27,15 +27,15 @@ enum NamedIntrinsic : unsigned short NI_Throw_PlatformNotSupportedException, NI_HW_INTRINSIC_START, -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #define HARDWARE_INTRINSIC(id, name, isa, ival, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \ NI_##id, #include "hwintrinsiclistxarch.h" -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define HARDWARE_INTRINSIC(isa, name, ival, size, numarg, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \ NI_##isa##_##name, #include "hwintrinsiclistarm64.h" -#endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_XARCH) && !defined(TARGET_ARM64) NI_HW_INTRINSIC_END, #endif // FEATURE_HW_INTRINSICS diff --git a/src/coreclr/src/jit/optcse.cpp b/src/coreclr/src/jit/optcse.cpp index ff4343f76f446..cb106f4467f8b 100644 --- a/src/coreclr/src/jit/optcse.cpp +++ b/src/coreclr/src/jit/optcse.cpp @@ -1617,7 +1617,7 @@ class CSE_Heuristic onStack = true; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Treat floating point and 64 bit integers as always on the stack if (varTypeIsFloating(varDsc->TypeGet()) || varTypeIsLong(varDsc->TypeGet())) onStack = true; @@ -1653,7 +1653,7 @@ class CSE_Heuristic } } } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (frameSize > 0x080) { // We likely have a large stack frame. @@ -1679,7 +1679,7 @@ class CSE_Heuristic hugeFrame = true; break; // early out, we don't need to keep increasing frameSize } -#elif _TARGET_ARM64_ +#elif TARGET_ARM64 if (frameSize > 0x1000) { // We likely have a large stack frame. @@ -1730,7 +1730,7 @@ class CSE_Heuristic { enregCount++; // The primitive types, including TYP_SIMD types use one register -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT if (varTyp == TYP_LONG) { enregCount++; // on 32-bit targets longs use two registers @@ -2242,11 +2242,11 @@ class CSE_Heuristic printf("Codesize CSE Promotion (%s frame)\n", hugeFrame ? "huge" : "large"); } #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH /* The following formula is good choice when optimizing CSE for SMALL_CODE */ cse_def_cost = 6; // mov [EBP-0x00001FC],reg cse_use_cost = 5; // [EBP-0x00001FC] -#else // _TARGET_ARM_ +#else // TARGET_ARM if (hugeFrame) { cse_def_cost = 10 + 2; // movw/movt r10 and str reg,[sp+r10] @@ -2267,26 +2267,26 @@ class CSE_Heuristic printf("Codesize CSE Promotion (small frame)\n"); } #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH /* The following formula is good choice when optimizing CSE for SMALL_CODE */ cse_def_cost = 3; // mov [EBP-1C],reg cse_use_cost = 2; // [EBP-1C] -#else // _TARGET_ARM_ +#else // TARGET_ARM cse_def_cost = 2; // str reg,[sp+0x9c] cse_use_cost = 2; // ldr reg,[sp+0x9c] #endif } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (varTypeIsFloating(candidate->Expr()->TypeGet())) { // floating point loads/store encode larger cse_def_cost += 2; cse_use_cost += 1; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } else // not SMALL_CODE ... { @@ -3142,7 +3142,7 @@ bool Compiler::optIsCSEcandidate(GenTree* tree) return false; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (type == TYP_FLOAT) { // TODO-X86-CQ: Revisit this diff --git a/src/coreclr/src/jit/optimizer.cpp b/src/coreclr/src/jit/optimizer.cpp index 53702e0dcc0e3..bbee1db484bcf 100644 --- a/src/coreclr/src/jit/optimizer.cpp +++ b/src/coreclr/src/jit/optimizer.cpp @@ -1752,7 +1752,7 @@ class LoopSearch return false; } -#if defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Disqualify loops where the first block of the loop is a finally target. // The main problem is when multiple loops share a 'first' block that is a finally // target and we canonicalize the loops by adding a new loop head. In that case, we @@ -1767,7 +1767,7 @@ class LoopSearch JITDUMP("Loop 'first' " FMT_BB " is a finally target. Rejecting loop.\n", first->bbNum); return false; } -#endif // defined(FEATURE_EH_FUNCLETS) && defined(_TARGET_ARM_) +#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Compact the loop (sweep through it and move out any blocks that aren't part of the // flow cycle), and find the exits. @@ -5562,7 +5562,7 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu /* Constants can usually be narrowed by changing their value */ CLANG_FORMAT_COMMENT_ANCHOR; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT __int64 lval; __int64 lmask; @@ -5635,14 +5635,14 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu case TYP_USHORT: imask = 0x0000FFFF; break; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case TYP_INT: imask = 0x7FFFFFFF; break; case TYP_UINT: imask = 0xFFFFFFFF; break; -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT default: return false; } @@ -5652,7 +5652,7 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu return false; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (doit) { tree->gtType = TYP_INT; @@ -5662,7 +5662,7 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu fgValueNumberTreeConst(tree); } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT return true; @@ -6549,7 +6549,7 @@ void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt) pLoopDsc->lpLoopVarCount = VarSetOps::Count(this, loopVars); pLoopDsc->lpHoistedExprCount = 0; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT unsigned longVarsCount = VarSetOps::Count(this, lvaLongVars); if (longVarsCount > 0) @@ -6570,7 +6570,7 @@ void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt) pLoopDsc->lpLoopVarCount += VarSetOps::Count(this, loopLongVars); pLoopDsc->lpVarInOutCount += VarSetOps::Count(this, inOutLongVars); } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT #ifdef DEBUG if (verbose) @@ -6672,7 +6672,7 @@ bool Compiler::optIsProfitableToHoistableTree(GenTree* tree, unsigned lnum) { availRegCount += CNT_CALLEE_TRASH_FLOAT - 1; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // For ARM each double takes two FP registers // For now on ARM we won't track singles/doubles // and instead just assume that we always have doubles. @@ -6691,7 +6691,7 @@ bool Compiler::optIsProfitableToHoistableTree(GenTree* tree, unsigned lnum) { availRegCount += CNT_CALLEE_TRASH - 1; } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { @@ -7221,7 +7221,7 @@ void Compiler::optHoistCandidate(GenTree* tree, unsigned lnum, LoopHoistContext* if (!varTypeIsFloating(tree->TypeGet())) { optLoopTable[lnum].lpHoistedExprCount++; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT // For our 32-bit targets Long types take two registers. if (varTypeIsLong(tree->TypeGet())) { @@ -7630,7 +7630,7 @@ void Compiler::optComputeLoopSideEffects() } VarSetOps::AssignNoCopy(this, lvaFloatVars, VarSetOps::MakeEmpty(this)); -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT VarSetOps::AssignNoCopy(this, lvaLongVars, VarSetOps::MakeEmpty(this)); #endif @@ -7643,7 +7643,7 @@ void Compiler::optComputeLoopSideEffects() { VarSetOps::AddElemD(this, lvaFloatVars, varDsc->lvVarIndex); } -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT else if (varTypeIsLong(varDsc->lvType)) { VarSetOps::AddElemD(this, lvaLongVars, varDsc->lvVarIndex); @@ -8344,7 +8344,7 @@ bool Compiler::optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsN { return false; } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (index->gtOper != GT_CAST) { return false; @@ -8944,7 +8944,7 @@ void Compiler::optOptimizeBools() { continue; } -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // Skip the small operand which we cannot encode. if (varTypeIsSmall(c1->TypeGet())) continue; diff --git a/src/coreclr/src/jit/protononjit/CMakeLists.txt b/src/coreclr/src/jit/protononjit/CMakeLists.txt index 101ff259fa242..82492ed6a0edf 100644 --- a/src/coreclr/src/jit/protononjit/CMakeLists.txt +++ b/src/coreclr/src/jit/protononjit/CMakeLists.txt @@ -10,16 +10,16 @@ if(FEATURE_READYTORUN) endif(FEATURE_READYTORUN) if (CLR_CMAKE_HOST_ARCH_I386) - remove_definitions(-D_TARGET_X86_) + remove_definitions(-DTARGET_X86) remove_definitions(-DFEATURE_SIMD) remove_definitions(-DFEATURE_HW_INTRINSICS) - add_definitions(-D_TARGET_ARM_) + add_definitions(-DTARGET_ARM) add_definitions(-DFEATURE_EH_FUNCLETS) set(JIT_ARCH_SOURCES ${JIT_ARM_SOURCES}) set(JIT_ARCH_LINK_LIBRARIES gcinfo_arm) elseif(CLR_CMAKE_HOST_ARCH_AMD64) - remove_definitions(-D_TARGET_AMD64_) - add_definitions(-D_TARGET_ARM64_) + remove_definitions(-DTARGET_AMD64) + add_definitions(-DTARGET_ARM64) set(JIT_ARCH_SOURCES ${JIT_ARM64_SOURCES}) set(JIT_ARCH_LINK_LIBRARIES gcinfo_arm64) else() diff --git a/src/coreclr/src/jit/protononjit/protononjit.nativeproj b/src/coreclr/src/jit/protononjit/protononjit.nativeproj index c0dafe6808f6d..2e2fe0ae9a576 100644 --- a/src/coreclr/src/jit/protononjit/protononjit.nativeproj +++ b/src/coreclr/src/jit/protononjit/protononjit.nativeproj @@ -44,7 +44,7 @@ $(OutputName).def - $(ClDefines);_TARGET_ARM_=1 + $(ClDefines);TARGET_ARM=1 $(ClDefines);ALT_JIT $(SdkLibPath)\kernel32.lib;$(SdkLibPath)\user32.lib;$(SdkLibPath)\advapi32.lib;$(SdkLibPath)\oleaut32.lib;$(SdkLibPath)\uuid.lib diff --git a/src/coreclr/src/jit/rationalize.cpp b/src/coreclr/src/jit/rationalize.cpp index 2c3efd4e60c39..bfad0bf345ee9 100644 --- a/src/coreclr/src/jit/rationalize.cpp +++ b/src/coreclr/src/jit/rationalize.cpp @@ -709,7 +709,7 @@ Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, Compiler::Ge BlockRange().Remove(node); break; -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM) case GT_CLS_VAR: { // Class vars that are the target of an assignment will get rewritten into @@ -732,7 +732,7 @@ Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, Compiler::Ge } } break; -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH case GT_INTRINSIC: // Non-target intrinsics should have already been rewritten back into user calls. diff --git a/src/coreclr/src/jit/regalloc.cpp b/src/coreclr/src/jit/regalloc.cpp index 3e9691cfc8cc8..73c2816dc320b 100644 --- a/src/coreclr/src/jit/regalloc.cpp +++ b/src/coreclr/src/jit/regalloc.cpp @@ -137,7 +137,7 @@ regNumber Compiler::raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc regState->rsCalleeRegArgMaskLiveIn |= inArgMask; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (argDsc->lvType == TYP_DOUBLE) { if (info.compIsVarArgs || opts.compUseSoftFP) @@ -158,7 +158,7 @@ regNumber Compiler::raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc assert(!regState->rsIsFloat); regState->rsCalleeRegArgMaskLiveIn |= genRegMask((regNumber)(inArgReg + 1)); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #if FEATURE_MULTIREG_ARGS if (varTypeIsStruct(argDsc->lvType)) @@ -247,7 +247,7 @@ bool Compiler::rpMustCreateEBPFrame(INDEBUG(const char** wbReason)) result = true; } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // TODO-ARM64-NYI: This is temporary: force a frame pointer-based frame until genFnProlog can handle non-frame // pointer frames. if (!result) @@ -255,7 +255,7 @@ bool Compiler::rpMustCreateEBPFrame(INDEBUG(const char** wbReason)) INDEBUG(reason = "Temporary ARM64 force frame pointer"); result = true; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #ifdef DEBUG if ((result == true) && (wbReason != nullptr)) diff --git a/src/coreclr/src/jit/register.h b/src/coreclr/src/jit/register.h index fafafb470957b..f1fb9e2afba19 100644 --- a/src/coreclr/src/jit/register.h +++ b/src/coreclr/src/jit/register.h @@ -13,9 +13,9 @@ #define REGALIAS(alias, realname) #endif -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) /* REGDEF(name, rnum, mask, sname) */ REGDEF(EAX, 0, 0x01, "eax" ) @@ -35,7 +35,7 @@ REGALIAS(RBP, EBP) REGALIAS(RSI, ESI) REGALIAS(RDI, EDI) -#else // !defined(_TARGET_X86_) +#else // !defined(TARGET_X86) /* REGDEF(name, rnum, mask, sname) */ @@ -65,15 +65,15 @@ REGALIAS(EBP, RBP) REGALIAS(ESI, RSI) REGALIAS(EDI, RDI) -#endif // !defined(_TARGET_X86_) +#endif // !defined(TARGET_X86) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #define XMMBASE 16 #define XMMMASK(x) (__int64(1) << ((x)+XMMBASE)) -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 #define XMMBASE 8 #define XMMMASK(x) (__int32(1) << ((x)+XMMBASE)) -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 REGDEF(XMM0, 0+XMMBASE, XMMMASK(0), "mm0" ) REGDEF(XMM1, 1+XMMBASE, XMMMASK(1), "mm1" ) @@ -84,9 +84,9 @@ REGDEF(XMM5, 5+XMMBASE, XMMMASK(5), "mm5" ) REGDEF(XMM6, 6+XMMBASE, XMMMASK(6), "mm6" ) REGDEF(XMM7, 7+XMMBASE, XMMMASK(7), "mm7" ) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 REGDEF(STK, 8+XMMBASE, 0x0000, "STK" ) -#else // !_TARGET_X86_ +#else // !TARGET_X86 REGDEF(XMM8, 8+XMMBASE, XMMMASK(8), "mm8" ) REGDEF(XMM9, 9+XMMBASE, XMMMASK(9), "mm9" ) REGDEF(XMM10, 10+XMMBASE, XMMMASK(10), "mm10" ) @@ -96,12 +96,12 @@ REGDEF(XMM13, 13+XMMBASE, XMMMASK(13), "mm13" ) REGDEF(XMM14, 14+XMMBASE, XMMMASK(14), "mm14" ) REGDEF(XMM15, 15+XMMBASE, XMMMASK(15), "mm15" ) REGDEF(STK, 16+XMMBASE, 0x0000, "STK" ) -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #include "registerarm.h" -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #include "registerarm64.h" #else diff --git a/src/coreclr/src/jit/register_arg_convention.cpp b/src/coreclr/src/jit/register_arg_convention.cpp index 93c3b0bef885e..8bbce59cbb99a 100644 --- a/src/coreclr/src/jit/register_arg_convention.cpp +++ b/src/coreclr/src/jit/register_arg_convention.cpp @@ -16,7 +16,7 @@ unsigned InitVarDscInfo::allocRegArg(var_types type, unsigned numRegs /* = 1 */) unsigned resultArgNum = regArgNum(type); bool isBackFilled = false; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Check for back-filling if (varTypeIsFloating(type) && // We only back-fill the float registers !anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet) @@ -35,11 +35,11 @@ unsigned InitVarDscInfo::allocRegArg(var_types type, unsigned numRegs /* = 1 */) assert(resultArgNum < MAX_FLOAT_REG_ARG); isBackFilled = true; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (!isBackFilled) { -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // For System V the reg type counters should be independent. nextReg(TYP_INT, numRegs); nextReg(TYP_FLOAT, numRegs); @@ -58,7 +58,7 @@ bool InitVarDscInfo::enoughAvailRegs(var_types type, unsigned numRegs /* = 1 */) unsigned backFillCount = 0; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Check for back-filling if (varTypeIsFloating(type) && // We only back-fill the float registers !anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet) @@ -67,12 +67,12 @@ bool InitVarDscInfo::enoughAvailRegs(var_types type, unsigned numRegs /* = 1 */) { backFillCount = 1; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM return regArgNum(type) + numRegs - backFillCount <= maxRegArgNum(type); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM unsigned InitVarDscInfo::alignReg(var_types type, unsigned requiredRegAlignment) { assert(requiredRegAlignment > 0); @@ -103,7 +103,7 @@ unsigned InitVarDscInfo::alignReg(var_types type, unsigned requiredRegAlignment) return cAlignSkipped; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM bool InitVarDscInfo::canEnreg(var_types type, unsigned numRegs /* = 1 */) { diff --git a/src/coreclr/src/jit/register_arg_convention.h b/src/coreclr/src/jit/register_arg_convention.h index ad20b4a0f543c..3fcce3706a3f8 100644 --- a/src/coreclr/src/jit/register_arg_convention.h +++ b/src/coreclr/src/jit/register_arg_convention.h @@ -19,12 +19,12 @@ struct InitVarDscInfo bool hasRetBufArg; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Support back-filling of FP parameters. This is similar to code in gtMorphArgs() that // handles arguments. regMaskTP fltArgSkippedRegMask; bool anyFloatStackArgs; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #if FEATURE_FASTTAILCALL // It is used to calculate argument stack size information in byte @@ -44,10 +44,10 @@ struct InitVarDscInfo maxIntRegArgNum = MAX_REG_ARG; maxFloatRegArgNum = MAX_FLOAT_REG_ARG; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM fltArgSkippedRegMask = RBM_NONE; anyFloatStackArgs = false; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #if FEATURE_FASTTAILCALL stackArgSize = 0; @@ -71,14 +71,14 @@ struct InitVarDscInfo // Returns the first argument register of the allocated set. unsigned allocRegArg(var_types type, unsigned numRegs = 1); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // We are aligning the register to an ABI-required boundary, such as putting // double-precision floats in even-numbered registers, by skipping one register. // "requiredRegAlignment" is the amount to align to: 1 for no alignment (everything // is 1-aligned), 2 for "double" alignment. // Returns the number of registers skipped. unsigned alignReg(var_types type, unsigned requiredRegAlignment); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM // Return true if it is an enregisterable type and there is room. // Note that for "type", we only care if it is float or not. In particular, @@ -92,7 +92,7 @@ struct InitVarDscInfo regArgNum(type) = maxRegArgNum(type); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM void setAnyFloatStackArgs() { @@ -104,7 +104,7 @@ struct InitVarDscInfo return anyFloatStackArgs; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM private: // return max register arg for this type diff --git a/src/coreclr/src/jit/reglist.h b/src/coreclr/src/jit/reglist.h index 7b706110a8ab6..58b02b011cac3 100644 --- a/src/coreclr/src/jit/reglist.h +++ b/src/coreclr/src/jit/reglist.h @@ -9,7 +9,7 @@ #include "tinyarray.h" // The "regList" type is a small set of registerse -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 typedef TinyArray regList; #else // The regList is unused for all other targets. diff --git a/src/coreclr/src/jit/regset.cpp b/src/coreclr/src/jit/regset.cpp index 3fd505c3fbe8d..a550df415c030 100644 --- a/src/coreclr/src/jit/regset.cpp +++ b/src/coreclr/src/jit/regset.cpp @@ -24,12 +24,12 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX /*****************************************************************************/ -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 const regMaskSmall regMasks[] = { #define REGDEF(name, rnum, mask, xname, wname) mask, #include "register.h" }; -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 const regMaskSmall regMasks[] = { #define REGDEF(name, rnum, mask, sname) mask, #include "register.h" @@ -229,11 +229,11 @@ RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) : m_rsCompiler(compiler), m_r rsMaskResvd = RBM_NONE; -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH rsMaskCalleeSaved = RBM_NONE; -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM rsMaskPreSpillRegArg = RBM_NONE; rsMaskPreSpillAlign = RBM_NONE; #endif @@ -297,7 +297,7 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) GenTreeCall* call = nullptr; var_types treeType; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) GenTreePutArgSplit* splitArg = nullptr; GenTreeMultiRegOp* multiReg = nullptr; #endif @@ -308,7 +308,7 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); treeType = retTypeDesc->GetReturnRegType(regIdx); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (tree->OperIsPutArgSplit()) { splitArg = tree->AsPutArgSplit(); @@ -319,7 +319,7 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) multiReg = tree->AsMultiRegOp(); treeType = multiReg->GetRegType(regIdx); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM else { treeType = tree->TypeGet(); @@ -357,7 +357,7 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (splitArg != nullptr) { regFlags = splitArg->GetRegSpillFlagByIdx(regIdx); @@ -370,20 +370,20 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM else { assert(!varTypeIsMultiReg(tree)); tree->gtFlags &= ~GTF_SPILL; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) assert(tree->GetRegNum() == reg || (call != nullptr && call->GetRegNumByIdx(regIdx) == reg) || (splitArg != nullptr && splitArg->GetRegNumByIdx(regIdx) == reg) || (multiReg != nullptr && multiReg->GetRegNumByIdx(regIdx) == reg)); #else assert(tree->GetRegNum() == reg || (call != nullptr && call->GetRegNumByIdx(regIdx) == reg)); -#endif // !_TARGET_ARM_ +#endif // !TARGET_ARM // Are any registers free for spillage? SpillDsc* spill = SpillDsc::alloc(m_rsCompiler, this, tempType); @@ -434,7 +434,7 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) regFlags |= GTF_SPILLED; call->SetRegSpillFlagByIdx(regFlags, regIdx); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM else if (splitArg != nullptr) { regFlags |= GTF_SPILLED; @@ -445,10 +445,10 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) regFlags |= GTF_SPILLED; multiReg->SetRegSpillFlagByIdx(regFlags, regIdx); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) /***************************************************************************** * * Spill the top of the FP x87 stack. @@ -485,7 +485,7 @@ void RegSet::rsSpillFPStack(GenTreeCall* call) rsMarkSpill(call, reg); } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) /***************************************************************************** * @@ -549,7 +549,7 @@ TempDsc* RegSet::rsUnspillInPlace(GenTree* tree, regNumber oldReg, unsigned regI flags &= ~GTF_SPILLED; call->SetRegSpillFlagByIdx(flags, regIdx); } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) else if (tree->OperIsPutArgSplit()) { GenTreePutArgSplit* splitArg = tree->AsPutArgSplit(); @@ -564,7 +564,7 @@ TempDsc* RegSet::rsUnspillInPlace(GenTree* tree, regNumber oldReg, unsigned regI flags &= ~GTF_SPILLED; multiReg->SetRegSpillFlagByIdx(flags, regIdx); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM else { tree->gtFlags &= ~GTF_SPILLED; @@ -627,7 +627,7 @@ var_types RegSet::tmpNormalizeType(var_types type) { type = TYP_SIMD16; } -#endif // defined(FEATURE_SIMD) && !defined(_TARGET_64BIT_) +#endif // defined(FEATURE_SIMD) && !defined(TARGET_64BIT) return type; } @@ -719,14 +719,14 @@ void RegSet::tmpPreAllocateTemps(var_types type, unsigned count) tmpCount++; tmpSize += size; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Adjust tmpSize to accommodate possible alignment padding. // Note that at this point the offsets aren't yet finalized, so we don't yet know if it will be required. tmpSize += TARGET_POINTER_SIZE; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM TempDsc* temp = new (m_rsCompiler, CMK_Unknown) TempDsc(-((int)tmpCount), size, type); @@ -940,7 +940,7 @@ regNumber genRegArgNext(regNumber argReg) switch (argReg) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI // Linux x64 ABI: REG_RDI, REG_RSI, REG_RDX, REG_RCX, REG_R8, REG_R9 @@ -960,7 +960,7 @@ regNumber genRegArgNext(regNumber argReg) return REG_ARG_2; // REG_R8 #endif // !UNIX_AMD64_ABI -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 default: return REG_NEXT(argReg); diff --git a/src/coreclr/src/jit/regset.h b/src/coreclr/src/jit/regset.h index ac324e7086784..a354c12bc5896 100644 --- a/src/coreclr/src/jit/regset.h +++ b/src/coreclr/src/jit/regset.h @@ -43,12 +43,12 @@ class RegSet public: RegSet(Compiler* compiler, GCInfo& gcInfo); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM regMaskTP rsMaskPreSpillRegs(bool includeAlignment) const { return includeAlignment ? (rsMaskPreSpillRegArg | rsMaskPreSpillAlign) : rsMaskPreSpillRegArg; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM private: // The same descriptor is also used for 'multi-use' register tracking, BTW. @@ -124,21 +124,21 @@ class RegSet private: regMaskTP _rsMaskVars; // backing store for rsMaskVars property -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH regMaskTP rsMaskCalleeSaved; // mask of the registers pushed/popped in the prolog/epilog -#endif // _TARGET_ARM_ +#endif // TARGET_ARM public: // TODO-Cleanup: Should be private, but Compiler uses it regMaskTP rsMaskResvd; // mask of the registers that are reserved for special purposes (typically empty) public: // The PreSpill masks are used in LclVars.cpp -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM regMaskTP rsMaskPreSpillAlign; // Mask of alignment padding added to prespill to keep double aligned args // at aligned stack addresses. regMaskTP rsMaskPreSpillRegArg; // mask of incoming registers that are spilled at the start of the prolog // This includes registers used to pass a struct (or part of a struct) // and all enregistered user arguments in a varargs call -#endif // _TARGET_ARM_ +#endif // TARGET_ARM private: //------------------------------------------------------------------------- @@ -158,9 +158,9 @@ class RegSet void rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx = 0); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) void rsSpillFPStack(GenTreeCall* call); -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) SpillDsc* rsGetSpillInfo(GenTree* tree, regNumber reg, SpillDsc** pPrevDsc = nullptr); @@ -212,11 +212,11 @@ class RegSet enum TEMP_CONSTANTS : unsigned { #if defined(FEATURE_SIMD) -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) TEMP_MAX_SIZE = YMM_REGSIZE_BYTES, -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) TEMP_MAX_SIZE = FP_REGSIZE_BYTES, -#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) #else // !FEATURE_SIMD TEMP_MAX_SIZE = sizeof(double), #endif // !FEATURE_SIMD diff --git a/src/coreclr/src/jit/scopeinfo.cpp b/src/coreclr/src/jit/scopeinfo.cpp index 2d91597239763..13afb64db3d7c 100644 --- a/src/coreclr/src/jit/scopeinfo.cpp +++ b/src/coreclr/src/jit/scopeinfo.cpp @@ -292,11 +292,11 @@ void CodeGenInterface::siVarLoc::siFillStackVarLoc( case TYP_SIMD16: case TYP_SIMD32: #endif -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case TYP_LONG: case TYP_DOUBLE: -#endif // _TARGET_64BIT_ -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // TARGET_64BIT +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // In the AMD64 ABI we are supposed to pass a struct by reference when its // size is not 1, 2, 4 or 8 bytes in size. During fgMorph, the compiler modifies // the IR to comply with the ABI and therefore changes the type of the lclVar @@ -315,7 +315,7 @@ void CodeGenInterface::siVarLoc::siFillStackVarLoc( this->vlType = VLT_STK_BYREF; } else -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) { this->vlType = VLT_STK; } @@ -327,7 +327,7 @@ void CodeGenInterface::siVarLoc::siFillStackVarLoc( } break; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT case TYP_LONG: case TYP_DOUBLE: this->vlType = VLT_STK2; @@ -338,7 +338,7 @@ void CodeGenInterface::siVarLoc::siFillStackVarLoc( this->vlStk2.vls2BaseReg = (regNumber)ICorDebugInfo::REGNUM_AMBIENT_SP; } break; -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT default: noway_assert(!"Invalid type"); @@ -369,14 +369,14 @@ void CodeGenInterface::siVarLoc::siFillRegisterVarLoc( case TYP_INT: case TYP_REF: case TYP_BYREF: -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case TYP_LONG: -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT this->vlType = VLT_REG; this->vlReg.vlrReg = varDsc->GetRegNum(); break; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT case TYP_LONG: #if !CPU_HAS_FP_SUPPORT case TYP_DOUBLE: @@ -399,9 +399,9 @@ void CodeGenInterface::siVarLoc::siFillRegisterVarLoc( this->vlRegStk.vlrsStk.vlrssOffset = offset + sizeof(int); } break; -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case TYP_FLOAT: case TYP_DOUBLE: // TODO-AMD64-Bug: ndp\clr\src\inc\corinfo.h has a definition of RegNum that only goes up to R15, @@ -410,7 +410,7 @@ void CodeGenInterface::siVarLoc::siFillRegisterVarLoc( this->vlReg.vlrReg = varDsc->GetRegNum(); break; -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT #if CPU_HAS_FP_SUPPORT case TYP_FLOAT: @@ -423,7 +423,7 @@ void CodeGenInterface::siVarLoc::siFillRegisterVarLoc( break; #endif // CPU_HAS_FP_SUPPORT -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT #ifdef FEATURE_SIMD case TYP_SIMD8: @@ -537,7 +537,7 @@ void CodeGenInterface::dumpSiVarLoc(const siVarLoc* varLoc) const } break; -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 case VLT_REG_REG: printf("%s-%s", getRegName(varLoc->vlRegReg.vlrrReg1), getRegName(varLoc->vlRegReg.vlrrReg2)); break; @@ -576,7 +576,7 @@ void CodeGenInterface::dumpSiVarLoc(const siVarLoc* varLoc) const case VLT_FIXED_VA: printf("fxd_va[%d]", varLoc->vlFixedVarArg.vlfvOffset); break; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 default: unreached(); @@ -855,7 +855,7 @@ bool CodeGen::siVerifyLocalVarTab() // are what ICodeDebugInfo is expetecting. void CodeGen::checkICodeDebugInfo() { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 assert((unsigned)ICorDebugInfo::REGNUM_EAX == REG_EAX); assert((unsigned)ICorDebugInfo::REGNUM_ECX == REG_ECX); assert((unsigned)ICorDebugInfo::REGNUM_EDX == REG_EDX); @@ -1485,12 +1485,12 @@ NATIVE_OFFSET CodeGen::psiGetVarStackOffset(const LclVarDsc* lclVarDsc) const NATIVE_OFFSET stackOffset = 0; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // scOffset = offset from caller SP - REGSIZE_BYTES // TODO-Cleanup - scOffset needs to be understood. For now just matching with the existing definition. stackOffset = compiler->lvaToCallerSPRelativeOffset(lclVarDsc->lvStkOffs, lclVarDsc->lvFramePointerBased) + REGSIZE_BYTES; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 if (doubleAlignOrFramePointerUsed()) { // REGSIZE_BYTES - for the pushed value of EBP @@ -1500,7 +1500,7 @@ NATIVE_OFFSET CodeGen::psiGetVarStackOffset(const LclVarDsc* lclVarDsc) const { stackOffset = lclVarDsc->lvStkOffs - genTotalFrameSize(); } -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 return stackOffset; } diff --git a/src/coreclr/src/jit/simd.cpp b/src/coreclr/src/jit/simd.cpp index 13e1d3c88fac8..6a25c9ef35586 100644 --- a/src/coreclr/src/jit/simd.cpp +++ b/src/coreclr/src/jit/simd.cpp @@ -75,7 +75,7 @@ int Compiler::getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd) // int Compiler::getSIMDTypeAlignment(var_types simdType) { -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // Fixed length vectors have the following alignment preference // Vector2 = 8 byte alignment // Vector3/4 = 16-byte alignment @@ -96,7 +96,7 @@ int Compiler::getSIMDTypeAlignment(var_types simdType) assert(size == 32); return 32; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return 16; #else assert(!"getSIMDTypeAlignment() unimplemented on target arch"); @@ -373,7 +373,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u const size_t Vector128SizeBytes = 128 / 8; const size_t Vector256SizeBytes = 256 / 8; -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) static_assert_no_msg(YMM_REGSIZE_BYTES == Vector256SizeBytes); static_assert_no_msg(XMM_REGSIZE_BYTES == Vector128SizeBytes); @@ -500,7 +500,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u JITDUMP(" Known type Vector128\n"); } else -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) if (typeHnd == m_simdHandleCache->Vector64FloatHandle) { simdBaseType = TYP_FLOAT; @@ -561,7 +561,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u size = Vector64SizeBytes; JITDUMP(" Known type Vector64\n"); } -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) // slow path search if (simdBaseType == TYP_UNKNOWN) @@ -577,7 +577,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u JITDUMP("HW Intrinsic SIMD Candidate Type %s with Base Type %s\n", className, getClassNameFromMetadata(baseTypeHnd, nullptr)); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if (strcmp(className, "Vector256`1") == 0) { size = Vector256SizeBytes; @@ -639,7 +639,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u } } else -#endif // defined(_TARGET_XARCH_) +#endif // defined(TARGET_XARCH) if (strcmp(className, "Vector128`1") == 0) { size = Vector128SizeBytes; @@ -700,7 +700,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u JITDUMP(" Unknown Hardware Intrinsic SIMD Type Vector128\n"); } } -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) else if (strcmp(className, "Vector64`1") == 0) { size = Vector64SizeBytes; @@ -761,7 +761,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u JITDUMP(" Unknown Hardware Intrinsic SIMD Type Vector64\n"); } } -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) } } @@ -1170,7 +1170,7 @@ GenTreeSIMD* Compiler::impSIMDGetFixed(var_types simdType, var_types baseType, u return simdTree; } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // impSIMDLongRelOpEqual: transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. // @@ -1426,7 +1426,7 @@ SIMDIntrinsicID Compiler::impSIMDIntegralRelOpGreaterThanOrEqual( return SIMDIntrinsicBitwiseOr; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. @@ -1455,7 +1455,7 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, assert(isRelOpSIMDIntrinsic(relOpIntrinsicId)); SIMDIntrinsicID intrinsicID = relOpIntrinsicId; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH var_types baseType = *inOutBaseType; if (varTypeIsFloating(baseType)) @@ -1590,10 +1590,10 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, return impSIMDRelOp(intrinsicID, typeHnd, size, inOutBaseType, pOp1, pOp2); } } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // TODO-ARM64-CQ handle comparisons against zero - // _TARGET_ARM64_ doesn't support < and <= on register register comparisons + // TARGET_ARM64 doesn't support < and <= on register register comparisons // Therefore, we need to use > and >= with swapped operands. if (intrinsicID == SIMDIntrinsicLessThan || intrinsicID == SIMDIntrinsicLessThanOrEqual) { @@ -1604,10 +1604,10 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, intrinsicID = (intrinsicID == SIMDIntrinsicLessThan) ? SIMDIntrinsicGreaterThan : SIMDIntrinsicGreaterThanOrEqual; } -#else // !_TARGET_XARCH_ +#else // !TARGET_XARCH assert(!"impSIMDRelOp() unimplemented on target arch"); unreached(); -#endif // !_TARGET_XARCH_ +#endif // !TARGET_XARCH return intrinsicID; } @@ -1628,7 +1628,7 @@ GenTree* Compiler::impSIMDAbs(CORINFO_CLASS_HANDLE typeHnd, var_types baseType, var_types simdType = op1->TypeGet(); GenTree* retVal = nullptr; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // When there is no direct support, Abs(v) could be computed // on integer vectors as follows: // BitVector = v < vector.Zero @@ -1753,7 +1753,7 @@ GenTree* Compiler::impSIMDAbs(CORINFO_CLASS_HANDLE typeHnd, var_types baseType, retVal = gtNewSIMDNode(simdType, op1, SIMDIntrinsicAbs, baseType, size); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (varTypeIsUnsigned(baseType)) { // Abs is a no-op on unsigned integer type vectors @@ -1763,9 +1763,9 @@ GenTree* Compiler::impSIMDAbs(CORINFO_CLASS_HANDLE typeHnd, var_types baseType, { retVal = gtNewSIMDNode(simdType, op1, SIMDIntrinsicAbs, baseType, size); } -#else // !defined(_TARGET_XARCH)_ && !defined(_TARGET_ARM64_) +#else // !defined(_TARGET_XARCH)_ && !defined(TARGET_ARM64) assert(!"Abs intrinsic on non-xarch target not implemented"); -#endif // !_TARGET_XARCH_ +#endif // !TARGET_XARCH return retVal; } @@ -1812,7 +1812,7 @@ GenTree* Compiler::impSIMDSelect( GenTree* andExpr = gtNewSIMDNode(simdType, op2, tmp, SIMDIntrinsicBitwiseAnd, baseType, size); GenTree* dupOp1 = gtCloneExpr(tmp); assert(dupOp1 != nullptr); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // ARM64 implements SIMDIntrinsicBitwiseAndNot as Left & ~Right GenTree* andNotExpr = gtNewSIMDNode(simdType, op3, dupOp1, SIMDIntrinsicBitwiseAndNot, baseType, size); #else @@ -1855,10 +1855,10 @@ GenTree* Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId, var_types simdType = op1->TypeGet(); assert(op2->TypeGet() == simdType); -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) GenTree* simdTree = nullptr; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // SSE2 has direct support for float/double/signed word/unsigned byte. // SSE4.1 has direct support for int32/uint32/signed byte/unsigned word. // For other integer types we compute min/max as follows @@ -1932,7 +1932,7 @@ GenTree* Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId, tmp = gtNewLclvNode(tmp->AsLclVarCommon()->GetLclNum(), tmp->TypeGet()); simdTree = gtNewSIMDNode(simdType, simdTree, tmp, adjustIntrinsic, baseType, size); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Arm64 has direct support for all types except int64/uint64 // For which we compute min/max as follows // @@ -2011,10 +2011,10 @@ GenTree* Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId, assert(simdTree != nullptr); return simdTree; -#else // !(defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)) +#else // !(defined(TARGET_XARCH) || defined(TARGET_ARM64)) assert(!"impSIMDMinMax() unimplemented on target arch"); unreached(); -#endif // !(defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)) +#endif // !(defined(TARGET_XARCH) || defined(TARGET_ARM64)) } //------------------------------------------------------------------------ @@ -2922,7 +2922,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, { if (simdIntrinsicID == SIMDIntrinsicMul) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) if ((baseType != TYP_INT) && (baseType != TYP_SHORT)) { // TODO-CQ: implement mul on these integer vectors. @@ -2930,8 +2930,8 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, assert(!"Mul not supported on long/ulong/uint/small int vectors\n"); return nullptr; } -#endif // _TARGET_XARCH_ -#if defined(_TARGET_ARM64_) +#endif // TARGET_XARCH +#if defined(TARGET_ARM64) if ((baseType == TYP_ULONG) && (baseType == TYP_LONG)) { // TODO-CQ: implement mul on these integer vectors. @@ -2939,9 +2939,9 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, assert(!"Mul not supported on long/ulong vectors\n"); return nullptr; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) // common to all integer type vectors if (simdIntrinsicID == SIMDIntrinsicDiv) { @@ -2949,7 +2949,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, assert(!"Div not supported on integer type vectors\n"); return nullptr; } -#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) } #endif // DEBUG @@ -2958,7 +2958,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, op2 = impSIMDPopStack(simdType); op1 = impSIMDPopStack(simdType, instMethod); -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH if (simdIntrinsicID == SIMDIntrinsicBitwiseAndNot) { // XARCH implements SIMDIntrinsicBitwiseAndNot as ~op1 & op2, while the @@ -2968,7 +2968,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, op2 = op1; op1 = tmp; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH simdTree = gtNewSIMDNode(simdType, op1, op2, simdIntrinsicID, baseType, size); retVal = simdTree; @@ -3043,14 +3043,14 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, case SIMDIntrinsicDotProduct: { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Right now dot product is supported only for float/double vectors and // int vectors on SSE4/AVX. if (!varTypeIsFloating(baseType) && !(baseType == TYP_INT && getSIMDSupportLevel() >= SIMD_SSE4_Supported)) { return nullptr; } -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH // op1 is a SIMD variable that is the first source and also "this" arg. // op2 is a SIMD variable which is the second source. @@ -3068,7 +3068,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, case SIMDIntrinsicSqrt: { -#if (defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)) && defined(DEBUG) +#if (defined(TARGET_XARCH) || defined(TARGET_ARM64)) && defined(DEBUG) // SSE/AVX/ARM64 doesn't support sqrt on integer type vectors and hence // should never be seen as an intrinsic here. See SIMDIntrinsicList.h // for supported base types for this intrinsic. @@ -3077,7 +3077,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, assert(!"Sqrt not supported on integer vectors\n"); return nullptr; } -#endif // (defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)) && defined(DEBUG) +#endif // (defined(TARGET_XARCH) || defined(TARGET_ARM64)) && defined(DEBUG) op1 = impSIMDPopStack(simdType); @@ -3152,7 +3152,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, case SIMDIntrinsicConvertToInt64: { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT op1 = impSIMDPopStack(simdType, instMethod); simdTree = gtNewSIMDNode(simdType, op1, nullptr, simdIntrinsicID, baseType, size); @@ -3220,7 +3220,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, return nullptr; } -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) // XArch/Arm64: also indicate that we use floating point registers. // The need for setting this here is that a method may not have SIMD // type lclvars, but might be exercising SIMD intrinsics on fields of @@ -3228,7 +3228,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode, // // e.g. public Vector ComplexVecFloat::sqabs() { return this.r * this.r + this.i * this.i; } compFloatingPointUsed = true; -#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) // At this point, we have a tree that we are going to store into a destination. // TODO-1stClassStructs: This should be a simple store or assignment, and should not require diff --git a/src/coreclr/src/jit/simd.h b/src/coreclr/src/jit/simd.h index 8874f733da99a..70510dc317c57 100644 --- a/src/coreclr/src/jit/simd.h +++ b/src/coreclr/src/jit/simd.h @@ -19,7 +19,7 @@ enum SIMDLevel { SIMD_Not_Supported = 0, -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // SSE2 - The min bar of SIMD ISA on x86/x64. // Vector length is 128-bit. // Floating-point instructions are legacy SSE encoded. @@ -61,7 +61,7 @@ struct SIMDIntrinsicInfo var_types supportedBaseTypes[SIMD_INTRINSIC_MAX_BASETYPE_COUNT]; }; -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // SSE2 Shuffle control byte to shuffle vector // These correspond to shuffle immediate byte in shufps SSE2 instruction. #define SHUFFLE_XXXX 0x00 // 00 00 00 00 diff --git a/src/coreclr/src/jit/simdcodegenxarch.cpp b/src/coreclr/src/jit/simdcodegenxarch.cpp index 83d1093d9f382..ecb5ba856dab4 100644 --- a/src/coreclr/src/jit/simdcodegenxarch.cpp +++ b/src/coreclr/src/jit/simdcodegenxarch.cpp @@ -16,7 +16,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma warning(disable : 4310) // cast truncates constant value - happens for (int8_t)SHUFFLE_ZXXX #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH #ifdef FEATURE_SIMD #include "emit.h" @@ -765,7 +765,7 @@ void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode) instruction ins = INS_invalid; -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (op1->OperGet() == GT_LONG) { assert(varTypeIsLong(baseType)); @@ -825,7 +825,7 @@ void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode) } } else -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) if (op1->isContained()) { if (op1->IsIntegralConst(0) || op1->IsFPZero()) @@ -1108,7 +1108,7 @@ void CodeGen::genSIMDIntrinsic32BitConvert(GenTreeSIMD* simdNode) GetEmitter()->emitIns_R_I(INS_psrld, emitActualTypeSize(targetType), tmpReg2, 16); // prepare mask -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 GetEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X5300000053000000); inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_ULONG); #else @@ -1213,7 +1213,7 @@ void CodeGen::genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode) regNumber tmpReg3; SIMDLevel level = compiler->getSIMDSupportLevel(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (baseType == TYP_LONG) { tmpReg = simdNode->ExtractTempReg(RBM_ALLFLOAT); @@ -1271,7 +1271,7 @@ void CodeGen::genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode) GetEmitter()->emitIns_R_I(INS_psrlq, emitActualTypeSize(simdType), tmpReg2, 32); // prepare mask for converting upper 32 bits -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 GetEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X4530000000000000); inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_ULONG); #else @@ -1293,7 +1293,7 @@ void CodeGen::genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode) inst_RV_RV(INS_subpd, targetReg, tmpReg, simdType, emitActualTypeSize(simdType)); // prepare mask for converting lower 32 bits -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 GetEmitter()->emitIns_R_I(INS_mov, EA_8BYTE, tmpIntReg, (ssize_t)0X4330000000000000); inst_RV_RV(INS_mov_i2xmm, tmpReg, tmpIntReg, TYP_ULONG); #else @@ -1319,7 +1319,7 @@ void CodeGen::genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode) } else if ((intrinsicID == SIMDIntrinsicConvertToDouble) && (baseType == TYP_LONG)) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 instruction rightShiftIns = getOpForSIMDIntrinsic(SIMDIntrinsicShiftRightInternal, TYP_SIMD16); instruction leftShiftIns = getOpForSIMDIntrinsic(SIMDIntrinsicShiftLeftInternal, TYP_SIMD16); @@ -2977,7 +2977,7 @@ void CodeGen::genLoadLclTypeSIMD12(GenTree* treeNode) genProduceReg(treeNode); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //----------------------------------------------------------------------------- // genStoreSIMD12ToStack: store a TYP_SIMD12 (i.e. Vector3) type field to the stack. @@ -3033,7 +3033,7 @@ void CodeGen::genPutArgStkSIMD12(GenTree* treeNode) genStoreSIMD12ToStack(operandReg, tmpReg); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 //----------------------------------------------------------------------------- // genSIMDIntrinsicUpperSave: save the upper half of a TYP_SIMD32 vector to @@ -3238,4 +3238,4 @@ void CodeGen::genSIMDIntrinsic(GenTreeSIMD* simdNode) } #endif // FEATURE_SIMD -#endif //_TARGET_XARCH_ +#endif // TARGET_XARCH diff --git a/src/coreclr/src/jit/simdintrinsiclist.h b/src/coreclr/src/jit/simdintrinsiclist.h index ccec75ae79e22..9015abbedefe2 100644 --- a/src/coreclr/src/jit/simdintrinsiclist.h +++ b/src/coreclr/src/jit/simdintrinsiclist.h @@ -20,7 +20,7 @@ e) TODO-Cleanup: when we plumb TYP_SIMD through front-end, replace TYP_STRUCT with TYP_SIMD. */ -#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) // Max number of parameters that we model in the table for SIMD intrinsic methods. #define SIMD_INTRINSIC_MAX_MODELED_PARAM_COUNT 3 @@ -87,9 +87,9 @@ SIMD_INTRINSIC("op_Inequality", false, OpInEquality, SIMD_INTRINSIC("op_Addition", false, Add, "+", TYP_STRUCT, 2, {TYP_STRUCT, TYP_STRUCT, TYP_UNDEF}, {TYP_INT, TYP_FLOAT, TYP_DOUBLE, TYP_LONG, TYP_USHORT, TYP_UBYTE, TYP_BYTE, TYP_SHORT, TYP_UINT, TYP_ULONG}) SIMD_INTRINSIC("op_Subtraction", false, Sub, "-", TYP_STRUCT, 2, {TYP_STRUCT, TYP_STRUCT, TYP_UNDEF}, {TYP_INT, TYP_FLOAT, TYP_DOUBLE, TYP_LONG, TYP_USHORT, TYP_UBYTE, TYP_BYTE, TYP_SHORT, TYP_UINT, TYP_ULONG}) -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) SIMD_INTRINSIC("op_Multiply", false, Mul, "*", TYP_STRUCT, 2, {TYP_STRUCT, TYP_STRUCT, TYP_UNDEF}, {TYP_INT, TYP_FLOAT, TYP_DOUBLE, TYP_SHORT,TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}) -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // TODO-ARM64-CQ Investigate code sequence to accelerate LONG/ULONG vector multiply SIMD_INTRINSIC("op_Multiply", false, Mul, "*", TYP_STRUCT, 2, {TYP_STRUCT, TYP_STRUCT, TYP_UNDEF}, {TYP_INT, TYP_FLOAT, TYP_DOUBLE, TYP_USHORT, TYP_UBYTE, TYP_BYTE, TYP_SHORT, TYP_UINT, TYP_UNDEF, TYP_UNDEF}) #endif @@ -117,10 +117,10 @@ SIMD_INTRINSIC("op_BitwiseOr", false, BitwiseOr, SIMD_INTRINSIC("op_ExclusiveOr", false, BitwiseXor, "^", TYP_STRUCT, 2, {TYP_STRUCT, TYP_STRUCT, TYP_UNDEF}, {TYP_INT, TYP_FLOAT, TYP_DOUBLE, TYP_LONG, TYP_USHORT, TYP_UBYTE, TYP_BYTE, TYP_SHORT, TYP_UINT, TYP_ULONG}) // Dot Product -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // Is supported only on Vector on AVX. SIMD_INTRINSIC("Dot", false, DotProduct, "Dot", TYP_UNKNOWN, 2, {TYP_STRUCT, TYP_STRUCT, TYP_UNDEF}, {TYP_INT, TYP_FLOAT, TYP_DOUBLE, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}) -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Dot Product does not support LONG/ULONG due to lack of multiply support (see TODO-ARM64-CQ above) SIMD_INTRINSIC("Dot", false, DotProduct, "Dot", TYP_UNKNOWN, 2, {TYP_STRUCT, TYP_STRUCT, TYP_UNDEF}, {TYP_INT, TYP_FLOAT, TYP_DOUBLE, TYP_USHORT, TYP_UBYTE, TYP_BYTE, TYP_SHORT, TYP_UINT, TYP_UNDEF, TYP_UNDEF}) #endif @@ -147,7 +147,7 @@ SIMD_INTRINSIC("Widen", false, Widen, // Miscellaneous SIMD_INTRINSIC("get_IsHardwareAccelerated", false, HWAccel, "HWAccel", TYP_BOOL, 0, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}) -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // Shuffle and Shift operations - these are internal intrinsics as there is no corresponding managed method. // To prevent this being accidentally recognized as an intrinsic, all of the arg types and supported base types is made TYP_UNDEF SIMD_INTRINSIC("ShuffleSSE2", false, ShuffleSSE2, "ShuffleSSE2", TYP_STRUCT, 2, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}) @@ -155,7 +155,7 @@ SIMD_INTRINSIC("ShuffleSSE2", false, ShuffleSSE2, // Internal, logical shift operations that shift the entire vector register instead of individual elements of the vector. SIMD_INTRINSIC("ShiftLeftInternal", false, ShiftLeftInternal, "<< Internal", TYP_STRUCT, 2, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}) SIMD_INTRINSIC("ShiftRightInternal", false, ShiftRightInternal, ">> Internal", TYP_STRUCT, 2, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}) -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH // Internal intrinsics for saving & restoring the upper half of a vector register SIMD_INTRINSIC("UpperSave", false, UpperSave, "UpperSave Internal", TYP_STRUCT, 2, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}) @@ -167,9 +167,9 @@ SIMD_INTRINSIC("WidenLo", false, WidenLo, SIMD_INTRINSIC(nullptr, false, Invalid, "Invalid", TYP_UNDEF, 0, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}, {TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF, TYP_UNDEF}) #undef SIMD_INTRINSIC -#else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_) +#else // !defined(TARGET_XARCH) && !defined(TARGET_ARM64) #error SIMD intrinsics not defined for target arch -#endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_) +#endif // !defined(TARGET_XARCH) && !defined(TARGET_ARM64) #endif //FEATURE_SIMD diff --git a/src/coreclr/src/jit/stacklevelsetter.cpp b/src/coreclr/src/jit/stacklevelsetter.cpp index 147cf04eb95d7..4de25359367a0 100644 --- a/src/coreclr/src/jit/stacklevelsetter.cpp +++ b/src/coreclr/src/jit/stacklevelsetter.cpp @@ -339,7 +339,7 @@ void StackLevelSetter::CheckArgCnt() // void StackLevelSetter::CheckAdditionalArgs() { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (comp->compIsProfilerHookNeeded()) { if (maxStackLevel == 0) @@ -348,5 +348,5 @@ void StackLevelSetter::CheckAdditionalArgs() maxStackLevel = 1; } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } diff --git a/src/coreclr/src/jit/target.h b/src/coreclr/src/jit/target.h index 838fc817461be..cab9272052a9b 100644 --- a/src/coreclr/src/jit/target.h +++ b/src/coreclr/src/jit/target.h @@ -6,21 +6,21 @@ #ifndef _TARGET_H_ #define _TARGET_H_ -#if defined(FEATURE_CORECLR) && defined(_TARGET_UNIX_) +#if defined(FEATURE_CORECLR) && defined(TARGET_UNIX) #define FEATURE_VARARG 0 -#else // !(defined(FEATURE_CORECLR) && defined(_TARGET_UNIX_)) +#else // !(defined(FEATURE_CORECLR) && defined(TARGET_UNIX)) #define FEATURE_VARARG 1 -#endif // !(defined(FEATURE_CORECLR) && defined(_TARGET_UNIX_)) +#endif // !(defined(FEATURE_CORECLR) && defined(TARGET_UNIX)) /*****************************************************************************/ // The following are human readable names for the target architectures -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #define TARGET_READABLE_NAME "X86" -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #define TARGET_READABLE_NAME "AMD64" -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #define TARGET_READABLE_NAME "ARM" -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define TARGET_READABLE_NAME "ARM64" #else #error Unsupported or unset target architecture @@ -29,13 +29,13 @@ /*****************************************************************************/ // The following are intended to capture only those #defines that cannot be replaced // with static const members of Target -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #define REGMASK_BITS 32 -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #define REGMASK_BITS 64 -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define REGMASK_BITS 64 #else @@ -53,7 +53,7 @@ // be assigned during register allocation. // REG_NA - Used to indicate that a register is either not yet assigned or not required. // -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) enum _regNumber_enum : unsigned { #define REGDEF(name, rnum, mask, sname) REG_##name = rnum, @@ -73,7 +73,7 @@ enum _regMask_enum : unsigned __int64 #include "register.h" }; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) enum _regNumber_enum : unsigned { @@ -94,7 +94,7 @@ enum _regMask_enum : unsigned __int64 #include "register.h" }; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) enum _regNumber_enum : unsigned { @@ -116,7 +116,7 @@ enum _regMask_enum : unsigned #include "register.h" }; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) enum _regNumber_enum : unsigned { @@ -153,7 +153,7 @@ enum _regMask_enum : unsigned // In any case, we believe that is OK to freely cast between these types; no information will // be lost. -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH typedef unsigned __int64 regMaskTP; #else typedef unsigned regMaskTP; @@ -197,14 +197,14 @@ typedef unsigned char regNumberSmall; /*****************************************************************************/ // The pseudorandom nop insertion is not necessary for current CoreCLR scenarios -// #if defined(FEATURE_CORECLR) && !defined(_TARGET_ARM_) +// #if defined(FEATURE_CORECLR) && !defined(TARGET_ARM) // #define PSEUDORANDOM_NOP_INSERTION // #endif /*****************************************************************************/ // clang-format off -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #define CPU_LOAD_STORE_ARCH 0 #define CPU_HAS_FP_SUPPORT 1 @@ -482,7 +482,7 @@ typedef unsigned char regNumberSmall; #define RBM_STACK_PROBE_HELPER_TRASH RBM_NONE -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // TODO-AMD64-CQ: Fine tune the following xxBlk threshold values: #define CPU_LOAD_STORE_ARCH 0 @@ -872,13 +872,13 @@ typedef unsigned char regNumberSmall; #define REG_STACK_PROBE_HELPER_ARG REG_R11 #define RBM_STACK_PROBE_HELPER_ARG RBM_R11 -#ifdef _TARGET_UNIX_ +#ifdef TARGET_UNIX #define RBM_STACK_PROBE_HELPER_TRASH RBM_NONE -#else // !_TARGET_UNIX_ +#else // !TARGET_UNIX #define RBM_STACK_PROBE_HELPER_TRASH RBM_RAX -#endif // !_TARGET_UNIX_ +#endif // !TARGET_UNIX -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // TODO-ARM-CQ: Use shift for division by power of 2 // TODO-ARM-CQ: Check for sdiv/udiv at runtime and generate it if available @@ -1187,7 +1187,7 @@ typedef unsigned char regNumberSmall; #define RBM_STACK_PROBE_HELPER_CALL_TARGET RBM_R5 #define RBM_STACK_PROBE_HELPER_TRASH (RBM_R5 | RBM_LR) -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) #define CPU_LOAD_STORE_ARCH 1 #define CPU_HAS_FP_SUPPORT 1 @@ -1535,7 +1535,7 @@ typedef unsigned char regNumberSmall; #error Unsupported or unset target architecture #endif -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH #define JMP_DIST_SMALL_MAX_NEG (-128) #define JMP_DIST_SMALL_MAX_POS (+127) @@ -1552,7 +1552,7 @@ typedef unsigned char regNumberSmall; #define PUSH_INST_SIZE (5) #define CALL_INST_SIZE (5) -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH C_ASSERT(REG_FIRST == 0); C_ASSERT(REG_INT_FIRST < REG_INT_LAST); @@ -1647,7 +1647,7 @@ inline bool genIsValidFloatReg(regNumber reg) return reg >= REG_FP_FIRST && reg <= REG_FP_LAST; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM /***************************************************************************** * Return true if the register is a valid floating point double register @@ -1657,7 +1657,7 @@ inline bool genIsValidDoubleReg(regNumber reg) return genIsValidFloatReg(reg) && (((reg - REG_FP_FIRST) & 0x1) == 0); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM //------------------------------------------------------------------------------------------- // hasFixedRetBuffReg: @@ -1665,7 +1665,7 @@ inline bool genIsValidDoubleReg(regNumber reg) // inline bool hasFixedRetBuffReg() { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 return true; #else return false; @@ -1679,7 +1679,7 @@ inline bool hasFixedRetBuffReg() inline regNumber theFixedRetBuffReg() { assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 return REG_ARG_RET_BUFF; #else return REG_NA; @@ -1693,7 +1693,7 @@ inline regNumber theFixedRetBuffReg() inline regMaskTP theFixedRetBuffMask() { assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 return RBM_ARG_RET_BUFF; #else return 0; @@ -1707,7 +1707,7 @@ inline regMaskTP theFixedRetBuffMask() inline unsigned theFixedRetBuffArgNum() { assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 return RET_BUFF_ARGNUM; #else return BAD_VAR_NUM; @@ -1769,7 +1769,7 @@ inline bool isValidFloatArgReg(regNumber reg) * Can the register hold the argument type? */ -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM inline bool floatRegCanHoldType(regNumber reg, var_types type) { assert(genIsValidFloatReg(reg)); @@ -1807,7 +1807,7 @@ extern const regMaskSmall regMasks[REG_COUNT]; inline regMaskTP genRegMask(regNumber reg) { assert((unsigned)reg < ArrLen(regMasks)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // shift is faster than a L1 hit on modern x86 // (L1 latency on sandy bridge is 4 cycles for [base] and 5 for [base + index*c] ) // the reason this is AMD-only is because the x86 BE will try to get reg masks for REG_STK @@ -1827,11 +1827,11 @@ inline regMaskTP genRegMask(regNumber reg) inline regMaskTP genRegMaskFloat(regNumber reg, var_types type /* = TYP_DOUBLE */) { -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_X86_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_X86) assert(genIsValidFloatReg(reg)); assert((unsigned)reg < ArrLen(regMasks)); return regMasks[reg]; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) assert(floatRegCanHoldType(reg, type)); assert(reg >= REG_F0 && reg <= REG_F31); @@ -1869,7 +1869,7 @@ inline regMaskTP genRegMaskFloat(regNumber reg, var_types type /* = TYP_DOUBLE * // inline regMaskTP genRegMask(regNumber regNum, var_types type) { -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM return genRegMask(regNum); #else regMaskTP regMask = RBM_NONE; @@ -1907,7 +1907,7 @@ inline regNumber regNextOfType(regNumber reg, var_types type) { regNumber regReturn; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Skip odd FP registers for double-precision types @@ -1918,7 +1918,7 @@ inline regNumber regNextOfType(regNumber reg, var_types type) { regReturn = REG_NEXT(reg); } -#else // _TARGET_ARM_ +#else // TARGET_ARM regReturn = REG_NEXT(reg); #endif @@ -1954,10 +1954,10 @@ inline bool isFloatRegType(var_types type) #endif } -// If the WINDOWS_AMD64_ABI is defined make sure that _TARGET_AMD64_ is also defined. +// If the WINDOWS_AMD64_ABI is defined make sure that TARGET_AMD64 is also defined. #if defined(WINDOWS_AMD64_ABI) -#if !defined(_TARGET_AMD64_) -#error When WINDOWS_AMD64_ABI is defined you must define _TARGET_AMD64_ defined as well. +#if !defined(TARGET_AMD64) +#error When WINDOWS_AMD64_ABI is defined you must define TARGET_AMD64 defined as well. #endif #endif @@ -1974,13 +1974,13 @@ C_ASSERT((RBM_INT_CALLEE_SAVED & RBM_FPBASE) == RBM_NONE); #endif /*****************************************************************************/ -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT typedef unsigned __int64 target_size_t; typedef __int64 target_ssize_t; -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT typedef unsigned int target_size_t; typedef int target_ssize_t; -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT C_ASSERT(sizeof(target_size_t) == TARGET_POINTER_SIZE); C_ASSERT(sizeof(target_ssize_t) == TARGET_POINTER_SIZE); diff --git a/src/coreclr/src/jit/targetamd64.cpp b/src/coreclr/src/jit/targetamd64.cpp index 0cb302ae34984..78d77fea89361 100644 --- a/src/coreclr/src/jit/targetamd64.cpp +++ b/src/coreclr/src/jit/targetamd64.cpp @@ -9,11 +9,11 @@ #pragma hdrstop #endif -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #include "target.h" const char* Target::g_tgtCPUName = "x64"; const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 diff --git a/src/coreclr/src/jit/targetarm.cpp b/src/coreclr/src/jit/targetarm.cpp index f0ea5ca5341f2..64e0c92d21c9a 100644 --- a/src/coreclr/src/jit/targetarm.cpp +++ b/src/coreclr/src/jit/targetarm.cpp @@ -9,11 +9,11 @@ #pragma hdrstop #endif -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) #include "target.h" const char* Target::g_tgtCPUName = "arm"; const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM diff --git a/src/coreclr/src/jit/targetarm64.cpp b/src/coreclr/src/jit/targetarm64.cpp index 2acbe1a050adf..cba0916158f14 100644 --- a/src/coreclr/src/jit/targetarm64.cpp +++ b/src/coreclr/src/jit/targetarm64.cpp @@ -9,11 +9,11 @@ #pragma hdrstop #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) #include "target.h" const char* Target::g_tgtCPUName = "arm64"; const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 diff --git a/src/coreclr/src/jit/targetx86.cpp b/src/coreclr/src/jit/targetx86.cpp index 500f4e06512a1..63128689f27ff 100644 --- a/src/coreclr/src/jit/targetx86.cpp +++ b/src/coreclr/src/jit/targetx86.cpp @@ -9,11 +9,11 @@ #pragma hdrstop #endif -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #include "target.h" const char* Target::g_tgtCPUName = "x86"; const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_L2R; -#endif // _TARGET_X86_ +#endif // TARGET_X86 diff --git a/src/coreclr/src/jit/treelifeupdater.cpp b/src/coreclr/src/jit/treelifeupdater.cpp index 3e6f345f6a1c1..3396948705aa9 100644 --- a/src/coreclr/src/jit/treelifeupdater.cpp +++ b/src/coreclr/src/jit/treelifeupdater.cpp @@ -41,7 +41,7 @@ void TreeLifeUpdater::UpdateLifeVar(GenTree* tree) LclVarDsc* varDsc = compiler->lvaTable + lclNum; #ifdef DEBUG -#if !defined(_TARGET_AMD64_) +#if !defined(TARGET_AMD64) // There are no addr nodes on ARM and we are experimenting with encountering vars in 'random' order. // Struct fields are not traversed in a consistent order, so ignore them when // verifying that we see the var nodes in execution order @@ -59,7 +59,7 @@ void TreeLifeUpdater::UpdateLifeVar(GenTree* tree) // dereferenced, so we can't say that this is not a use or def. } } -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 #endif // DEBUG compiler->compCurLifeTree = tree; diff --git a/src/coreclr/src/jit/typeinfo.cpp b/src/coreclr/src/jit/typeinfo.cpp index 51429cca3867e..6dbafd9ec20c8 100644 --- a/src/coreclr/src/jit/typeinfo.cpp +++ b/src/coreclr/src/jit/typeinfo.cpp @@ -219,7 +219,7 @@ BOOL typeInfo::tiCompatibleWith(COMP_HANDLE CompHnd, { return tiCompatibleWithByRef(CompHnd, child, parent); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // On 64-bit targets we have precise representation for native int, so these rules // represent the fact that the ECMA spec permits the implicit conversion // between an int32 and a native int. @@ -231,7 +231,7 @@ BOOL typeInfo::tiCompatibleWith(COMP_HANDLE CompHnd, { return TRUE; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT return FALSE; } @@ -362,7 +362,7 @@ BOOL typeInfo::tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const { return tiCompatibleWithByRef(CompHnd, *pSrc, *pDest); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // On 64-bit targets we have precise representation for native int, so these rules // represent the fact that the ECMA spec permits the implicit conversion // between an int32 and a native int. @@ -376,7 +376,7 @@ BOOL typeInfo::tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const *changed = true; return TRUE; } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT FAIL: *pDest = typeInfo(); diff --git a/src/coreclr/src/jit/typelist.h b/src/coreclr/src/jit/typelist.h index 34ea543608c4e..a700d0fe6d8e8 100644 --- a/src/coreclr/src/jit/typelist.h +++ b/src/coreclr/src/jit/typelist.h @@ -7,7 +7,7 @@ #define PS EA_PTRSIZE #define PST (TARGET_POINTER_SIZE / sizeof(int)) -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define VTF_I32 0 #define VTF_I64 VTF_I #else diff --git a/src/coreclr/src/jit/unwind.cpp b/src/coreclr/src/jit/unwind.cpp index 5c9ada213b9d1..96c8a98fd5da4 100644 --- a/src/coreclr/src/jit/unwind.cpp +++ b/src/coreclr/src/jit/unwind.cpp @@ -118,7 +118,7 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, #endif // FEATURE_EH_FUNCLETS -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) void Compiler::createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR cfiOpcode, short dwarfReg, INT offset) { @@ -142,14 +142,14 @@ void Compiler::unwindPushPopCFI(regNumber reg) // since it is pushed as a frame register. | RBM_FPBASE #endif -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) | RBM_R11 | RBM_LR | RBM_PC #endif ; if (relOffsetMask & genRegMask(reg)) { -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM createCfiCode(func, cbProlog, CFI_ADJUST_CFA_OFFSET, DWARF_REG_ILLEGAL, REGSIZE_BYTES); #endif createCfiCode(func, cbProlog, CFI_REL_OFFSET, mapRegNumToDwarfReg(reg)); @@ -377,7 +377,7 @@ void Compiler::DumpCfiInfo(bool isHotCode, } #endif // DEBUG -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX //------------------------------------------------------------------------ // Compiler::unwindGetCurrentOffset: Calculate the current byte offset of the @@ -399,7 +399,7 @@ UNATIVE_OFFSET Compiler::unwindGetCurrentOffset(FuncInfoDsc* func) } else { -#if defined(_TARGET_AMD64_) || (defined(_TARGET_UNIX_) && (defined(_TARGET_ARMARCH_) || defined(_TARGET_X86_))) +#if defined(TARGET_AMD64) || (defined(TARGET_UNIX) && (defined(TARGET_ARMARCH) || defined(TARGET_X86))) assert(func->startLoc != nullptr); offset = func->startLoc->GetFuncletPrologOffset(GetEmitter()); #else @@ -410,19 +410,19 @@ UNATIVE_OFFSET Compiler::unwindGetCurrentOffset(FuncInfoDsc* func) return offset; } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // See unwindAmd64.cpp -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // See unwindArm64.cpp -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // See unwindArm.cpp -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) // See unwindX86.cpp diff --git a/src/coreclr/src/jit/unwind.h b/src/coreclr/src/jit/unwind.h index 06396f2a9af77..53105dd759f07 100644 --- a/src/coreclr/src/jit/unwind.h +++ b/src/coreclr/src/jit/unwind.h @@ -11,7 +11,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // Windows no longer imposes a maximum prolog size. However, we still have an // assert here just to inform us if we increase the size of the prolog @@ -19,7 +19,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // OS unwinder to having as few unwind codes as possible. // You can increase this "max" number if necessary. -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) const unsigned MAX_PROLOG_SIZE_BYTES = 44; const unsigned MAX_EPILOG_SIZE_BYTES = 44; #define UWC_END 0xFF // "end" unwind code @@ -27,7 +27,7 @@ const unsigned MAX_EPILOG_SIZE_BYTES = 44; #define UW_MAX_CODE_WORDS_COUNT 15 // Max number that can be encoded in the "Code Words" field of the .pdata record #define UW_MAX_EPILOG_START_INDEX 0xFFU // Max number that can be encoded in the "Epilog Start Index" field // of the .pdata record -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) const unsigned MAX_PROLOG_SIZE_BYTES = 100; const unsigned MAX_EPILOG_SIZE_BYTES = 100; #define UWC_END 0xE4 // "end" unwind code @@ -35,7 +35,7 @@ const unsigned MAX_EPILOG_SIZE_BYTES = 100; #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) #define UW_MAX_CODE_WORDS_COUNT 31 #define UW_MAX_EPILOG_START_INDEX 0x3FFU -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #define UW_MAX_EPILOG_COUNT 31 // Max number that can be encoded in the "Epilog count" field // of the .pdata record @@ -128,11 +128,11 @@ class UnwindCodesBase bool IsEndCode(BYTE b) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) return b >= 0xFD; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return (b == UWC_END); // TODO-ARM64-Bug?: what about the "end_c" code? -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } #ifdef DEBUG @@ -795,9 +795,9 @@ class UnwindInfo : public UnwindBase return uwiCurLoc; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) unsigned GetInstructionSize(); -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) void CaptureLocation(); @@ -810,15 +810,15 @@ class UnwindInfo : public UnwindBase #ifdef DEBUG -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Given the first byte of the unwind code, check that its opsize matches // the last instruction added in the emitter. void CheckOpsize(BYTE b1); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) void CheckOpsize(BYTE b1) { } // nothing to do; all instructions are 4 bytes -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) void Dump(bool isHotCode, int indent = 0); @@ -865,4 +865,4 @@ void DumpUnwindInfo(Compiler* comp, #endif // DEBUG -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH diff --git a/src/coreclr/src/jit/unwindamd64.cpp b/src/coreclr/src/jit/unwindamd64.cpp index 6f62b777e9ba5..7ff069e1da9a4 100644 --- a/src/coreclr/src/jit/unwindamd64.cpp +++ b/src/coreclr/src/jit/unwindamd64.cpp @@ -16,7 +16,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI short Compiler::mapRegNumToDwarfReg(regNumber reg) { @@ -889,4 +889,4 @@ void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 diff --git a/src/coreclr/src/jit/unwindarm.cpp b/src/coreclr/src/jit/unwindarm.cpp index ad980f9e5ab1e..843083efb90a1 100644 --- a/src/coreclr/src/jit/unwindarm.cpp +++ b/src/coreclr/src/jit/unwindarm.cpp @@ -16,7 +16,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(_TARGET_ARM_) && defined(_TARGET_UNIX_) +#if defined(TARGET_ARM) && defined(TARGET_UNIX) short Compiler::mapRegNumToDwarfReg(regNumber reg) { short dwarfReg = DWARF_REG_ILLEGAL; @@ -173,9 +173,9 @@ short Compiler::mapRegNumToDwarfReg(regNumber reg) return dwarfReg; } -#endif // _TARGET_ARM_ && _TARGET_UNIX_ +#endif // TARGET_ARM && TARGET_UNIX -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX @@ -190,13 +190,13 @@ void Compiler::unwindBegProlog() { assert(compGeneratingProlog); -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { unwindBegPrologCFI(); return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX FuncInfoDsc* func = funCurrentFunc(); @@ -222,12 +222,12 @@ void Compiler::unwindBegEpilog() { assert(compGeneratingEpilog); -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX funCurrentFunc()->uwi.AddEpilog(); } @@ -237,7 +237,7 @@ void Compiler::unwindEndEpilog() assert(compGeneratingEpilog); } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) void Compiler::unwindPushPopMaskInt(regMaskTP maskInt, bool useOpsize16) { @@ -368,7 +368,7 @@ void Compiler::unwindPushMaskInt(regMaskTP maskInt) ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | RBM_R11 | RBM_R12 | RBM_LR)) == 0); -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { // If we are pushing LR, we should give unwind codes in terms of caller's PC @@ -379,7 +379,7 @@ void Compiler::unwindPushMaskInt(regMaskTP maskInt) unwindPushPopMaskCFI(maskInt, false); return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX bool useOpsize16 = ((maskInt & (RBM_LOW_REGS | RBM_LR)) == maskInt); // Can PUSH use the 16-bit encoding? unwindPushPopMaskInt(maskInt, useOpsize16); @@ -390,25 +390,25 @@ void Compiler::unwindPushMaskFloat(regMaskTP maskFloat) // Only floating point registers should be in maskFloat assert((maskFloat & RBM_ALLFLOAT) == maskFloat); -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { unwindPushPopMaskCFI(maskFloat, true); return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX unwindPushPopMaskFloat(maskFloat); } void Compiler::unwindPopMaskInt(regMaskTP maskInt) { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX // Only r0-r12 and lr and pc are supported (pc is mapped to lr when encoding) assert((maskInt & @@ -431,12 +431,12 @@ void Compiler::unwindPopMaskInt(regMaskTP maskInt) void Compiler::unwindPopMaskFloat(regMaskTP maskFloat) { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX // Only floating point registers should be in maskFloat assert((maskFloat & RBM_ALLFLOAT) == maskFloat); @@ -445,7 +445,7 @@ void Compiler::unwindPopMaskFloat(regMaskTP maskFloat) void Compiler::unwindAllocStack(unsigned size) { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -454,7 +454,7 @@ void Compiler::unwindAllocStack(unsigned size) } return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -499,7 +499,7 @@ void Compiler::unwindAllocStack(unsigned size) void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset) { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -508,7 +508,7 @@ void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset) } return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -527,12 +527,12 @@ void Compiler::unwindSaveReg(regNumber reg, unsigned offset) void Compiler::unwindBranch16() { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -543,12 +543,12 @@ void Compiler::unwindBranch16() void Compiler::unwindNop(unsigned codeSizeInBytes) // codeSizeInBytes is 2 or 4 bytes for Thumb2 instruction { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -577,19 +577,19 @@ void Compiler::unwindNop(unsigned codeSizeInBytes) // codeSizeInBytes is 2 or 4 INDEBUG(pu->uwiAddingNOP = false); } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) // The instructions between the last captured "current state" and the current instruction // are in the prolog but have no effect for unwinding. Emit the appropriate NOP unwind codes // for them. void Compiler::unwindPadding() { -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX UnwindInfo* pu = &funCurrentFunc()->uwi; GetEmitter()->emitUnwindNopPadding(pu->GetCurrentEmitterLocation(), this); @@ -614,7 +614,7 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func) BOOL isFunclet = (func->funKind == FUNC_ROOT) ? FALSE : TRUE; bool funcHasColdSection = false; -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { DWORD unwindCodeBytes = 0; @@ -627,7 +627,7 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func) return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX // If there is cold code, split the unwind data between the hot section and the // cold section. This needs to be done before we split into fragments, as each @@ -687,13 +687,13 @@ void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER); static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER); -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) if (generateCFIUnwindCodes()) { unwindEmitFuncCFI(func, pHotCode, pColdCode); return; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX func->uwi.Allocate((CorJitFuncKind)func->funKind, pHotCode, pColdCode, true); @@ -703,7 +703,7 @@ void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode } } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX @@ -828,7 +828,7 @@ unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog) #endif // DEBUG -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) /////////////////////////////////////////////////////////////////////////////// // @@ -937,10 +937,10 @@ void UnwindPrologCodes::GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock, /* OUT */ U // // This is similar to UnwindEpilogInfo::Match(). // -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Note that if we wanted to handle 0xFD and 0xFE codes, by converting // an existing 0xFF code to one of those, we might do that here. -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) int UnwindPrologCodes::Match(UnwindEpilogInfo* pEpi) { @@ -1266,7 +1266,7 @@ void UnwindFragmentInfo::AddEpilog() void UnwindFragmentInfo::CopyPrologCodes(UnwindFragmentInfo* pCopyFrom) { ufiPrologCodes.CopyFrom(&pCopyFrom->ufiPrologCodes); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 ufiPrologCodes.AddCode(UWC_END_C); #endif } @@ -1478,22 +1478,22 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) // Compute the header -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) noway_assert((functionLength & 1) == 0); DWORD headerFunctionLength = functionLength / 2; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) noway_assert((functionLength & 3) == 0); DWORD headerFunctionLength = functionLength / 4; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 DWORD headerVers = 0; // Version of the unwind info is zero. No other version number is currently defined. DWORD headerXBit = 0; // We never generate "exception data", but the VM might add some. DWORD headerEBit; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) DWORD headerFBit = ufiHasPhantomProlog ? 1 : 0; // Is this data a fragment in the sense of the unwind data // specification? That is, do the prolog codes represent a real // prolog or not? -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) DWORD headerEpilogCount; // This depends on how we set headerEBit. DWORD headerCodeWords; DWORD headerExtendedEpilogCount = 0; // This depends on how we set headerEBit. @@ -1533,13 +1533,13 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) IMPL_LIMITATION("unwind data too large"); } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) | (headerFBit << 22) | (headerEpilogCount << 23) | (headerCodeWords << 28); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) | (headerEpilogCount << 22) | (headerCodeWords << 27); -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) ufiPrologCodes.AddHeaderWord(header); @@ -1570,9 +1570,9 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) { for (UnwindEpilogInfo* pEpi = ufiEpilogList; pEpi != NULL; pEpi = pEpi->epiNext) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) DWORD headerCondition = 0xE; // The epilog is unconditional. We don't have epilogs under the IT instruction. -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) // The epilog must strictly follow the prolog. The prolog is in the first fragment of // the hot section. If this epilog is at the start of a fragment, it can't be the @@ -1585,15 +1585,15 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) // NOT the offset from the beginning of the main function. DWORD headerEpilogStartOffset = pEpi->GetStartOffset() - GetStartOffset(); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) noway_assert((headerEpilogStartOffset & 1) == 0); headerEpilogStartOffset /= 2; // The unwind data stores the actual offset divided by 2 (since the low bit of // the actual offset is always zero) -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) noway_assert((headerEpilogStartOffset & 3) == 0); headerEpilogStartOffset /= 4; // The unwind data stores the actual offset divided by 4 (since the low 2 bits // of the actual offset is always zero) -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) DWORD headerEpilogStartIndex = pEpi->GetStartIndex(); @@ -1603,11 +1603,11 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) IMPL_LIMITATION("unwind data too large"); } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) DWORD epilogScopeWord = headerEpilogStartOffset | (headerCondition << 20) | (headerEpilogStartIndex << 24); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) DWORD epilogScopeWord = headerEpilogStartOffset | (headerEpilogStartIndex << 22); -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) ufiPrologCodes.AddHeaderWord(epilogScopeWord); } @@ -2012,7 +2012,7 @@ void UnwindInfo::AddEpilog() CaptureLocation(); } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) unsigned UnwindInfo::GetInstructionSize() { @@ -2020,7 +2020,7 @@ unsigned UnwindInfo::GetInstructionSize() return uwiComp->GetEmitter()->emitGetInstructionSize(uwiCurLoc); } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) void UnwindInfo::CaptureLocation() { @@ -2050,7 +2050,7 @@ void UnwindInfo::AddFragment(emitLocation* emitLoc) #ifdef DEBUG -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Given the first byte of the unwind code, check that its opsize matches // the last instruction added in the emitter. @@ -2068,7 +2068,7 @@ void UnwindInfo::CheckOpsize(BYTE b1) assert(opsizeInBytes == instrSizeInBytes); } -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) void UnwindInfo::Dump(bool isHotCode, int indent) { @@ -2096,7 +2096,7 @@ void UnwindInfo::Dump(bool isHotCode, int indent) #endif // DEBUG -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX @@ -2595,6 +2595,6 @@ void DumpUnwindInfo(Compiler* comp, #endif // DEBUG -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH diff --git a/src/coreclr/src/jit/unwindarm64.cpp b/src/coreclr/src/jit/unwindarm64.cpp index 81f98e2286206..9ae4eee3d5128 100644 --- a/src/coreclr/src/jit/unwindarm64.cpp +++ b/src/coreclr/src/jit/unwindarm64.cpp @@ -16,9 +16,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) short Compiler::mapRegNumToDwarfReg(regNumber reg) { short dwarfReg = DWARF_REG_ILLEGAL; @@ -27,7 +27,7 @@ short Compiler::mapRegNumToDwarfReg(regNumber reg) return dwarfReg; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM void Compiler::unwindPush(regNumber reg) { @@ -810,4 +810,4 @@ void DumpUnwindInfo(Compiler* comp, #endif // DEBUG -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 diff --git a/src/coreclr/src/jit/unwindx86.cpp b/src/coreclr/src/jit/unwindx86.cpp index f3273312235a6..997f94b8d01f8 100644 --- a/src/coreclr/src/jit/unwindx86.cpp +++ b/src/coreclr/src/jit/unwindx86.cpp @@ -16,11 +16,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 #error "This should be included only for x86" -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_UNIX_) +#if defined(TARGET_UNIX) short Compiler::mapRegNumToDwarfReg(regNumber reg) { short dwarfReg = DWARF_REG_ILLEGAL; @@ -29,7 +29,7 @@ short Compiler::mapRegNumToDwarfReg(regNumber reg) return dwarfReg; } -#endif // _TARGET_UNIX_ +#endif // TARGET_UNIX void Compiler::unwindBegProlog() { diff --git a/src/coreclr/src/jit/utils.cpp b/src/coreclr/src/jit/utils.cpp index b0249a48b7637..7ac1513528074 100644 --- a/src/coreclr/src/jit/utils.cpp +++ b/src/coreclr/src/jit/utils.cpp @@ -29,13 +29,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // same code for all platforms, hence it is here instead of in the targetXXX.cpp // files. -#ifdef _TARGET_UNIX_ +#ifdef TARGET_UNIX // Should we distinguish Mac? Can we? // Should we distinguish flavors of Unix? Can we? const char* Target::g_tgtPlatformName = "Unix"; -#else // !_TARGET_UNIX_ +#else // !TARGET_UNIX const char* Target::g_tgtPlatformName = "Windows"; -#endif // !_TARGET_UNIX_ +#endif // !TARGET_UNIX /*****************************************************************************/ @@ -143,7 +143,7 @@ const char* getRegName(regNumber reg, bool isFloat) return "NA"; } -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) static const char* const regNames[] = { #define REGDEF(name, rnum, mask, xname, wname) xname, #include "register.h" @@ -171,7 +171,7 @@ const char* getRegName(unsigned reg, const char* getRegNameFloat(regNumber reg, var_types type) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM assert(genIsValidFloatReg(reg)); if (type == TYP_FLOAT) return getRegName(reg); @@ -237,7 +237,7 @@ const char* getRegNameFloat(regNumber reg, var_types type) return regName; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) static const char* regNamesFloat[] = { #define REGDEF(name, rnum, mask, xname, wname) xname, @@ -311,7 +311,7 @@ void dspRegMask(regMaskTP regMask, size_t minSiz) // What kind of separator should we use for this range (if it is indeed going to be a range)? CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // For AMD64, create ranges for int registers R8 through R15, but not the "old" registers. if (regNum >= REG_R8) { @@ -319,7 +319,7 @@ void dspRegMask(regMaskTP regMask, size_t minSiz) inRegRange = true; sep = "-"; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // R17 and R28 can't be the start of a range, since the range would include TEB or FP if ((regNum < REG_R17) || ((REG_R19 <= regNum) && (regNum < REG_R28))) { @@ -327,28 +327,28 @@ void dspRegMask(regMaskTP regMask, size_t minSiz) inRegRange = true; sep = "-"; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if (regNum < REG_R12) { regHead = regNum; inRegRange = true; sep = "-"; } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) // No register ranges #else // _TARGET_* #error Unsupported or unset target architecture #endif // _TARGET_* } -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // We've already printed a register. Is this the end of a range? else if ((regNum == REG_INT_LAST) || (regNum == REG_R17) // last register before TEB || (regNum == REG_R28)) // last register before FP -#else // _TARGET_ARM64_ +#else // TARGET_ARM64 // We've already printed a register. Is this the end of a range? else if (regNum == REG_INT_LAST) -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 { const char* nam = getRegName(regNum); printf("%s%s", sep, nam); @@ -1849,7 +1849,7 @@ double FloatingPointUtils::convertUInt64ToDouble(unsigned __int64 uIntVal) double d; if (s64 < 0) { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) // RyuJIT codegen and clang (or gcc) may produce different results for casting uint64 to // double, and the clang result is more accurate. For example, // 1) (double)0x84595161401484A0UL --> 43e08b2a2c280290 (RyuJIT codegen or VC++) @@ -1899,7 +1899,7 @@ unsigned __int64 FloatingPointUtils::convertDoubleToUInt64(double d) return u64; } -#ifdef _TARGET_XARCH_ +#ifdef TARGET_XARCH // While the Ecma spec does not specifically call this out, // the case of conversion from negative double to unsigned integer is @@ -1913,7 +1913,7 @@ unsigned __int64 FloatingPointUtils::convertDoubleToUInt64(double d) u64 = UINT64(INT64(d)); #else u64 = UINT64(d); -#endif // _TARGET_XARCH_ +#endif // TARGET_XARCH return u64; } @@ -1992,7 +1992,7 @@ double FloatingPointUtils::round(double x) // We will redirect the macro to this other functions if the macro is not defined for the platform. // This has the side effect of a possible implicit upcasting for arguments passed in and an explicit // downcasting for the _copysign() call. -#if (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL) +#if (defined(TARGET_X86) || defined(TARGET_ARM) || defined(TARGET_ARM64)) && !defined(TARGET_UNIX) #if !defined(_copysignf) #define _copysignf (float)_copysign @@ -2301,7 +2301,7 @@ uint32_t GetUnsigned32Magic(uint32_t d, bool* add /*out*/, int* shift /*out*/) return GetUnsignedMagic(d, add, shift); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT uint64_t GetUnsigned64Magic(uint64_t d, bool* add /*out*/, int* shift /*out*/) { return GetUnsignedMagic(d, add, shift); @@ -2457,7 +2457,7 @@ int32_t GetSigned32Magic(int32_t d, int* shift /*out*/) return GetSignedMagic(d, shift); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT int64_t GetSigned64Magic(int64_t d, int* shift /*out*/) { return GetSignedMagic(d, shift); diff --git a/src/coreclr/src/jit/utils.h b/src/coreclr/src/jit/utils.h index b34e233ad3d12..7201022baffd6 100644 --- a/src/coreclr/src/jit/utils.h +++ b/src/coreclr/src/jit/utils.h @@ -23,7 +23,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Needed for unreached() #include "error.h" -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define BitScanForwardPtr BitScanForward64 #else #define BitScanForwardPtr BitScanForward @@ -752,11 +752,11 @@ class CritSecHolder namespace MagicDivide { uint32_t GetUnsigned32Magic(uint32_t d, bool* add /*out*/, int* shift /*out*/); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT uint64_t GetUnsigned64Magic(uint64_t d, bool* add /*out*/, int* shift /*out*/); #endif int32_t GetSigned32Magic(int32_t d, int* shift /*out*/); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT int64_t GetSigned64Magic(int64_t d, int* shift /*out*/); #endif } diff --git a/src/coreclr/src/jit/valuenum.cpp b/src/coreclr/src/jit/valuenum.cpp index 59d8d9a197a68..d8038937c1b6f 100644 --- a/src/coreclr/src/jit/valuenum.cpp +++ b/src/coreclr/src/jit/valuenum.cpp @@ -22,13 +22,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Windows x86 and Windows ARM/ARM64 may not define _isnanf() but they do define _isnan(). // We will redirect the macros to these other functions if the macro is not defined for the // platform. This has the side effect of a possible implicit upcasting for arguments passed. -#if (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL) +#if (defined(HOST_X86) || defined(HOST_ARM) || defined(HOST_ARM64)) && !defined(HOST_UNIX) #if !defined(_isnanf) #define _isnanf _isnan #endif -#endif // (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL) +#endif // (defined(HOST_X86) || defined(HOST_ARM) || defined(HOST_ARM64)) && !defined(HOST_UNIX) // We need to use target-specific NaN values when statically compute expressions. // Otherwise, cross crossgen (e.g. x86_arm) would have different binary outputs @@ -54,9 +54,9 @@ struct FloatTraits static float NaN() { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) unsigned bits = 0xFFC00000u; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) unsigned bits = 0x7FC00000u; #else #error Unsupported or unset target architecture @@ -80,9 +80,9 @@ struct DoubleTraits static double NaN() { -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) unsigned long long bits = 0xFFF8000000000000ull; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) unsigned long long bits = 0x7FF8000000000000ull; #else #error Unsupported or unset target architecture @@ -107,7 +107,7 @@ struct DoubleTraits template TFp FpAdd(TFp value1, TFp value2) { -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // If [value1] is negative infinity and [value2] is positive infinity // the result is NaN. // If [value1] is positive infinity and [value2] is negative infinity @@ -125,7 +125,7 @@ TFp FpAdd(TFp value1, TFp value2) return TFpTraits::NaN(); } } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH return value1 + value2; } @@ -143,7 +143,7 @@ TFp FpAdd(TFp value1, TFp value2) template TFp FpSub(TFp value1, TFp value2) { -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // If [value1] is positive infinity and [value2] is positive infinity // the result is NaN. // If [value1] is negative infinity and [value2] is negative infinity @@ -161,7 +161,7 @@ TFp FpSub(TFp value1, TFp value2) return TFpTraits::NaN(); } } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH return value1 - value2; } @@ -179,7 +179,7 @@ TFp FpSub(TFp value1, TFp value2) template TFp FpMul(TFp value1, TFp value2) { -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // From the ECMA standard: // // If [value1] is zero and [value2] is infinity @@ -195,7 +195,7 @@ TFp FpMul(TFp value1, TFp value2) { return TFpTraits::NaN(); } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH return value1 * value2; } @@ -213,7 +213,7 @@ TFp FpMul(TFp value1, TFp value2) template TFp FpDiv(TFp dividend, TFp divisor) { -#ifdef _TARGET_ARMARCH_ +#ifdef TARGET_ARMARCH // From the ECMA standard: // // If [dividend] is zero and [divisor] is zero @@ -229,7 +229,7 @@ TFp FpDiv(TFp dividend, TFp divisor) { return TFpTraits::NaN(); } -#endif // _TARGET_ARMARCH_ +#endif // TARGET_ARMARCH return dividend / divisor; } @@ -2494,7 +2494,7 @@ int ValueNumStore::GetConstantInt32(ValueNum argVN) case TYP_INT: result = ConstantValue(argVN); break; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT case TYP_REF: case TYP_BYREF: result = (int)ConstantValue(argVN); @@ -2806,7 +2806,7 @@ ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, Valu switch (castFromType) // GT_CAST source type { -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT case TYP_REF: case TYP_BYREF: #endif @@ -2836,7 +2836,7 @@ ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, Valu case TYP_LONG: case TYP_ULONG: assert(!IsVNHandle(arg0VN)); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (typ == TYP_LONG) { if (srcIsUnsigned) @@ -2889,7 +2889,7 @@ ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, Valu break; } { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case TYP_REF: case TYP_BYREF: #endif @@ -5735,7 +5735,7 @@ void Compiler::fgValueNumber() } break; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 bool isVarargParam = (lclNum == lvaVarargsBaseOfStkArgs || lclNum == lvaVarargsHandleArg); if (isVarargParam) initVal = vnStore->VNForExpr(fgFirstBB); // a new, unique VN. @@ -8463,7 +8463,7 @@ void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueN vnpUniq.SetBoth(vnStore->VNForExpr(compCurBB, call->TypeGet())); } -#if defined(FEATURE_READYTORUN_COMPILER) && defined(_TARGET_ARMARCH_) +#if defined(FEATURE_READYTORUN_COMPILER) && defined(TARGET_ARMARCH) if (call->IsR2RRelativeIndir()) { #ifdef DEBUG @@ -8478,7 +8478,7 @@ void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueN // in morph. So we do not need to use EntryPointAddrAsArg0, because arg0 is already an entry point addr. useEntryPointAddrAsArg0 = false; } -#endif // FEATURE_READYTORUN_COMPILER && _TARGET_ARMARCH_ +#endif // FEATURE_READYTORUN_COMPILER && TARGET_ARMARCH if (nArgs == 0) { diff --git a/src/coreclr/src/jit/valuenum.h b/src/coreclr/src/jit/valuenum.h index 2522ebcca845c..fdb6268892193 100644 --- a/src/coreclr/src/jit/valuenum.h +++ b/src/coreclr/src/jit/valuenum.h @@ -266,7 +266,7 @@ class ValueNumStore ValueNum VNForDoubleCon(double cnsVal); ValueNum VNForByrefCon(size_t byrefVal); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT ValueNum VNForPtrSizeIntCon(INT64 cnsVal) { return VNForLongCon(cnsVal); diff --git a/src/coreclr/src/jit/vartype.h b/src/coreclr/src/jit/vartype.h index 83824ac13576b..c6177a82707f7 100644 --- a/src/coreclr/src/jit/vartype.h +++ b/src/coreclr/src/jit/vartype.h @@ -36,7 +36,7 @@ enum var_types : BYTE * platform */ -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define TYP_I_IMPL TYP_LONG #define TYP_U_IMPL TYP_ULONG #define TYPE_REF_IIM TYPE_REF_LNG @@ -207,9 +207,9 @@ template inline bool varTypeIsIntOrI(T vt) { return ((TypeGet(vt) == TYP_INT) -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT || (TypeGet(vt) == TYP_I_IMPL) -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT ); } @@ -228,7 +228,7 @@ inline bool varTypeIsLong(T vt) template inline bool varTypeIsMultiReg(T vt) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return false; #else return (TypeGet(vt) == TYP_LONG); @@ -258,9 +258,9 @@ template inline bool varTypeIsPromotable(T vt) { return (varTypeIsStruct(vt) || (TypeGet(vt) == TYP_BLK) -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) || varTypeIsLong(vt) -#endif // !defined(_TARGET_64BIT_) +#endif // !defined(TARGET_64BIT) ); } @@ -281,7 +281,7 @@ inline bool varTypeUsesFloatReg(T vt) template inline bool varTypeUsesFloatArgReg(T vt) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // Arm64 passes SIMD types in floating point registers. return varTypeUsesFloatReg(vt); #else @@ -311,11 +311,11 @@ inline bool varTypeIsValidHfaType(T vt) bool isValid = (TypeGet(vt) != TYP_UNDEF); if (isValid) { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 assert(varTypeUsesFloatReg(vt)); -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 assert(varTypeIsFloating(vt)); -#endif // !_TARGET_ARM64_ +#endif // !TARGET_ARM64 } return isValid; #else // !FEATURE_HFA diff --git a/src/coreclr/src/md/ceefilegen/pesectionman.cpp b/src/coreclr/src/md/ceefilegen/pesectionman.cpp index 9bae7a0cbbd22..e4d96312b076d 100644 --- a/src/coreclr/src/md/ceefilegen/pesectionman.cpp +++ b/src/coreclr/src/md/ceefilegen/pesectionman.cpp @@ -308,7 +308,7 @@ HRESULT PESection::addBaseReloc(unsigned offset, CeeSectionRelocType reloc, switch (reloc) { -#ifdef BIT64 +#ifdef HOST_64BIT case srRelocDir64Ptr: #endif case srRelocAbsolutePtr: @@ -317,7 +317,7 @@ HRESULT PESection::addBaseReloc(unsigned offset, CeeSectionRelocType reloc, hr = S_OK; break; -#if defined (_TARGET_X86_) || defined (_TARGET_AMD64_) +#if defined (TARGET_X86) || defined (TARGET_AMD64) case srRelocRelativePtr: case srRelocRelative: hr = addSectReloc(offset, NULL, reloc, extra); diff --git a/src/coreclr/src/md/datablob.inl b/src/coreclr/src/md/datablob.inl index da9ffa632461a..5c71c59443b9f 100644 --- a/src/coreclr/src/md/datablob.inl +++ b/src/coreclr/src/md/datablob.inl @@ -56,11 +56,11 @@ DataBlob::DataBlob( m_cbSize = source.m_cbSize; } // DataBlob::DataBlob -#ifdef BIT64 +#ifdef HOST_64BIT #define const_pbBadFood (((BYTE *)NULL) + 0xbaadf00dbaadf00d) -#else //!BIT64 +#else //!HOST_64BIT #define const_pbBadFood (((BYTE *)NULL) + 0xbaadf00d) -#endif //!BIT64 +#endif //!HOST_64BIT // -------------------------------------------------------------------------------------- // diff --git a/src/coreclr/src/md/databuffer.inl b/src/coreclr/src/md/databuffer.inl index ae8c139a2f57c..bebedb06492e1 100644 --- a/src/coreclr/src/md/databuffer.inl +++ b/src/coreclr/src/md/databuffer.inl @@ -49,11 +49,11 @@ DataBuffer::DataBuffer( m_cbSize = source.m_cbSize; } // DataBuffer::DataBuffer -#ifdef BIT64 +#ifdef HOST_64BIT #define const_pbBadFood (((BYTE *)NULL) + 0xbaadf00dbaadf00d) -#else //!BIT64 +#else //!HOST_64BIT #define const_pbBadFood (((BYTE *)NULL) + 0xbaadf00d) -#endif //!BIT64 +#endif //!HOST_64BIT // -------------------------------------------------------------------------------------- // diff --git a/src/coreclr/src/md/enc/stgio.cpp b/src/coreclr/src/md/enc/stgio.cpp index 1a60975222324..bb9a917f85b36 100644 --- a/src/coreclr/src/md/enc/stgio.cpp +++ b/src/coreclr/src/md/enc/stgio.cpp @@ -225,7 +225,7 @@ HRESULT StgIO::Open( // Return code. { dwFileSharingFlags |= FILE_SHARE_READ; -#if !defined(DACCESS_COMPILE) && !defined(FEATURE_PAL) +#if !defined(DACCESS_COMPILE) && !defined(TARGET_UNIX) // PEDecoder is not defined in DAC // We prefer to use LoadLibrary if we can because it will share already loaded images (used for execution) @@ -265,7 +265,7 @@ HRESULT StgIO::Open( // Return code. } } } -#endif //!DACCESS_COMPILE && !FEATURE_PAL +#endif //!DACCESS_COMPILE && !TARGET_UNIX } if (m_hModule == NULL) diff --git a/src/coreclr/src/pal/inc/pal.h b/src/coreclr/src/pal/inc/pal.h index 6d4b792f8c01e..86cbc413b0cfa 100644 --- a/src/coreclr/src/pal/inc/pal.h +++ b/src/coreclr/src/pal/inc/pal.h @@ -95,14 +95,14 @@ typedef PVOID NATIVE_LIBRARY_HANDLE; #define _M_ARM64 1 #endif -#if defined(_M_IX86) && !defined(_X86_) -#define _X86_ -#elif defined(_M_AMD64) && !defined(_AMD64_) -#define _AMD64_ -#elif defined(_M_ARM) && !defined(_ARM_) -#define _ARM_ -#elif defined(_M_ARM64) && !defined(_ARM64_) -#define _ARM64_ +#if defined(_M_IX86) && !defined(HOST_X86) +#define HOST_X86 +#elif defined(_M_AMD64) && !defined(HOST_AMD64) +#define HOST_AMD64 +#elif defined(_M_ARM) && !defined(HOST_ARM) +#define HOST_ARM +#elif defined(_M_ARM64) && !defined(HOST_ARM64) +#define HOST_ARM64 #endif #endif // !_MSC_VER @@ -302,7 +302,7 @@ PAL_IsDebuggerPresent(); #ifndef PAL_STDCPP_COMPAT -#if BIT64 || _MSC_VER >= 1400 +#if HOST_64BIT || _MSC_VER >= 1400 typedef __int64 time_t; #else typedef long time_t; @@ -1484,7 +1484,7 @@ QueueUserAPC( IN HANDLE hThread, IN ULONG_PTR dwData); -#ifdef _X86_ +#ifdef HOST_X86 // // *********************************************************************************** @@ -1610,7 +1610,7 @@ typedef struct _KNONVOLATILE_CONTEXT_POINTERS { } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) // copied from winnt.h #define CONTEXT_AMD64 0x100000 @@ -1861,7 +1861,7 @@ typedef struct _KNONVOLATILE_CONTEXT_POINTERS { } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; -#elif defined(_ARM_) +#elif defined(HOST_ARM) #define CONTEXT_ARM 0x00200000L @@ -2041,7 +2041,7 @@ typedef struct _IMAGE_ARM_RUNTIME_FUNCTION_ENTRY { }; } IMAGE_ARM_RUNTIME_FUNCTION_ENTRY, * PIMAGE_ARM_RUNTIME_FUNCTION_ENTRY; -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) #define CONTEXT_ARM64 0x00400000L @@ -2345,13 +2345,13 @@ PALIMPORT BOOL PALAPI PAL_VirtualUnwindOutOfProc(CONTEXT *context, KNONVOLATILE_ #define PAL_CS_NATIVE_DATA_SIZE 76 #elif defined(__APPLE__) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 120 -#elif defined(__FreeBSD__) && defined(_X86_) +#elif defined(__FreeBSD__) && defined(HOST_X86) #define PAL_CS_NATIVE_DATA_SIZE 12 #elif defined(__FreeBSD__) && defined(__x86_64__) #define PAL_CS_NATIVE_DATA_SIZE 24 -#elif defined(__linux__) && defined(_ARM_) +#elif defined(__linux__) && defined(HOST_ARM) #define PAL_CS_NATIVE_DATA_SIZE 80 -#elif defined(__linux__) && defined(_ARM64_) +#elif defined(__linux__) && defined(HOST_ARM64) #define PAL_CS_NATIVE_DATA_SIZE 116 #elif defined(__linux__) && defined(__i386__) #define PAL_CS_NATIVE_DATA_SIZE 76 @@ -2934,7 +2934,7 @@ enum { // typedef struct _RUNTIME_FUNCTION { DWORD BeginAddress; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 DWORD EndAddress; #endif DWORD UnwindData; @@ -3219,7 +3219,7 @@ BitScanReverse64( FORCEINLINE void PAL_ArmInterlockedOperationBarrier() { -#ifdef _ARM64_ +#ifdef HOST_ARM64 // On arm64, most of the __sync* functions generate a code sequence like: // loop: // ldaxr (load acquire exclusive) @@ -3232,7 +3232,7 @@ FORCEINLINE void PAL_ArmInterlockedOperationBarrier() // require the load to occur after the store. This memory barrier should be used following a call to a __sync* function to // prevent that reordering. Code generated for arm32 includes a 'dmb' after 'cbnz', so no issue there at the moment. __sync_synchronize(); -#endif // _ARM64_ +#endif // HOST_ARM64 } /*++ @@ -3539,7 +3539,7 @@ InterlockedBitTestAndSet( return (InterlockedOr(Base, (1 << Bit)) & (1 << Bit)) != 0; } -#if defined(BIT64) +#if defined(HOST_64BIT) #define InterlockedExchangePointer(Target, Value) \ ((PVOID)InterlockedExchange64((PLONG64)(Target), (LONGLONG)(Value))) @@ -3577,11 +3577,11 @@ VOID PALAPI YieldProcessor() { -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) __asm__ __volatile__( "rep\n" "nop"); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) __asm__ __volatile__( "yield"); #else return; @@ -3928,10 +3928,10 @@ PAL_GetCurrentThreadAffinitySet(SIZE_T size, UINT_PTR* data); #define strnlen PAL_strnlen #define wcsnlen PAL_wcsnlen -#ifdef _AMD64_ +#ifdef HOST_AMD64 #define _mm_getcsr PAL__mm_getcsr #define _mm_setcsr PAL__mm_setcsr -#endif // _AMD64_ +#endif // HOST_AMD64 #endif // !PAL_STDCPP_COMPAT @@ -4119,9 +4119,9 @@ unsigned int __cdecl _rotl(unsigned int value, int shift) #endif // !HAS_ROTL // On 64 bit unix, make the long an int. -#ifdef BIT64 +#ifdef HOST_64BIT #define _lrotl _rotl -#endif // BIT64 +#endif // HOST_64BIT #if !HAS_ROTR diff --git a/src/coreclr/src/pal/inc/pal_endian.h b/src/coreclr/src/pal/inc/pal_endian.h index b119aba8f6d8d..92cef33a6ff92 100644 --- a/src/coreclr/src/pal/inc/pal_endian.h +++ b/src/coreclr/src/pal/inc/pal_endian.h @@ -92,13 +92,13 @@ inline void SwapGuid(GUID *pGuid) #endif // !BIGENDIAN -#ifdef BIT64 +#ifdef HOST_64BIT #define VALPTR(x) VAL64(x) #else #define VALPTR(x) VAL32(x) #endif -#ifdef _ARM_ +#ifdef HOST_ARM #define LOG2_PTRSIZE 2 #define ALIGN_ACCESS ((1<= 1300 +#if defined(HOST_X86) && _MSC_VER >= 1300 #define _W64 __w64 #else #define _W64 #endif -#ifdef BIT64 +#ifdef HOST_64BIT #define _atoi64 (__int64)atoll @@ -329,7 +329,7 @@ typedef unsigned __int64 DWORD_PTR, *PDWORD_PTR; #define __int3264 __int64 -#if !defined(BIT64) +#if !defined(HOST_64BIT) __inline unsigned long HandleToULong( @@ -482,7 +482,7 @@ UShortToPtr( return( (void *)(UINT_PTR)us ); } -#else // !defined(BIT64) +#else // !defined(HOST_64BIT) #define HandleToULong( h ) ((ULONG)(ULONG_PTR)(h) ) #define HandleToLong( h ) ((LONG)(LONG_PTR) (h) ) #define ULongToHandle( ul ) ((HANDLE)(ULONG_PTR) (ul) ) @@ -499,7 +499,7 @@ UShortToPtr( #define ULongToPtr( ul ) ((VOID *)(ULONG_PTR)((unsigned long)(ul))) #define ShortToPtr( s ) ((VOID *)(INT_PTR)((short)(s))) #define UShortToPtr( us ) ((VOID *)(UINT_PTR)((unsigned short)(s))) -#endif // !defined(BIT64) +#endif // !defined(HOST_64BIT) @@ -564,13 +564,13 @@ typedef LONG_PTR SSIZE_T, *PSSIZE_T; #ifndef PAL_STDCPP_COMPAT #if defined(__APPLE_CC__) || defined(__linux__) -#ifdef BIT64 +#ifdef HOST_64BIT typedef unsigned long size_t; typedef long ptrdiff_t; -#else // !BIT64 +#else // !HOST_64BIT typedef unsigned int size_t; typedef int ptrdiff_t; -#endif // !BIT64 +#endif // !HOST_64BIT #else typedef ULONG_PTR size_t; typedef LONG_PTR ptrdiff_t; @@ -592,13 +592,13 @@ typedef char16_t WCHAR; #ifndef PAL_STDCPP_COMPAT #if defined(__linux__) -#ifdef BIT64 +#ifdef HOST_64BIT typedef long int intptr_t; typedef unsigned long int uintptr_t; -#else // !BIT64 +#else // !HOST_64BIT typedef int intptr_t; typedef unsigned int uintptr_t; -#endif // !BIT64 +#endif // !HOST_64BIT #else typedef INT_PTR intptr_t; typedef UINT_PTR uintptr_t; diff --git a/src/coreclr/src/pal/inc/rt/intsafe.h b/src/coreclr/src/pal/inc/rt/intsafe.h index 33e4dc73b9593..1d18f914414ec 100644 --- a/src/coreclr/src/pal/inc/rt/intsafe.h +++ b/src/coreclr/src/pal/inc/rt/intsafe.h @@ -67,7 +67,7 @@ #define ULONGLONG_ERROR (0xffffffffffffffffULL) #define HIDWORD_MASK (0xffffffff00000000ULL) #endif // _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT #define SIZET_ERROR ULONGLONG_ERROR #else #define SIZET_ERROR ULONG_ERROR @@ -620,7 +620,7 @@ ULongLongToULong( // UINT_PTR -> ULONG conversion // ULONG_PTR -> ULONG conversion // -#ifdef BIT64 +#ifdef HOST_64BIT #define UIntPtrToULong ULongLongToULong #define ULongPtrToULong ULongLongToULong @@ -674,7 +674,7 @@ ULongLongToUInt( // UINT_PTR -> UINT conversion // ULONG_PTR -> UINT conversion // -#ifdef BIT64 +#ifdef HOST_64BIT #define UIntPtrToUInt ULongLongToUInt #define ULongPtrToUInt ULongLongToUInt @@ -775,7 +775,7 @@ ULongPtrToUInt( // // * -> UINT_PTR conversion (UINT_PTR is UINT on Win32, ULONGLONG on Win64) // -#ifdef BIT64 +#ifdef HOST_64BIT #define CharToUIntPtr CharToULongLong #define SignedCharToUIntPtr SignedCharToULongLong #define ShortToUIntPtr ShortToULongLong @@ -811,7 +811,7 @@ ULongLongToUIntPtr( IN ULONGLONG ullOperand, OUT UINT_PTR* puResult) { -#ifdef BIT64 +#ifdef HOST_64BIT *puResult = ullOperand; return S_OK; #else @@ -823,7 +823,7 @@ ULongLongToUIntPtr( // // UINT_PTR -> * conversion (UINT_PTR is UINT on Win32, ULONGLONG on Win64) // -#ifdef BIT64 +#ifdef HOST_64BIT #define UIntPtrToUShort ULongLongToUShort #define UIntPtrToInt ULongLongToInt #define UIntPtrToLong ULongLongToLong @@ -868,7 +868,7 @@ UIntPtrToLong( // // * -> ULONG_PTR conversion (ULONG_PTR is ULONG on Win32, ULONGLONG on Win64) // -#ifdef BIT64 +#ifdef HOST_64BIT #define CharToULongPtr CharToULongLong #define SignedCharToULongPtr SignedCharToULongLong #define ShortToULongPtr ShortToULongLong @@ -904,7 +904,7 @@ ULongLongToULongPtr( IN ULONGLONG ullOperand, OUT ULONG_PTR* pulResult) { -#ifdef BIT64 +#ifdef HOST_64BIT *pulResult = ullOperand; return S_OK; #else @@ -916,7 +916,7 @@ ULongLongToULongPtr( // // ULONG_PTR -> * conversion (ULONG_PTR is ULONG on Win32, ULONGLONG on Win64) // -#ifdef BIT64 +#ifdef HOST_64BIT #define ULongPtrToUShort ULongLongToUShort #define ULongPtrToInt ULongLongToInt #define ULongPtrToLong ULongLongToLong @@ -1114,7 +1114,7 @@ ULongAdd( // // ULONG_PTR addition // -#ifdef BIT64 +#ifdef HOST_64BIT #define ULongPtrAdd ULongLongAdd #else __inline @@ -1126,7 +1126,7 @@ ULongPtrAdd( { return ULongAdd((ULONG)ulAugend, (ULONG)ulAddend, (ULONG*)pulResult); } -#endif // BIT64 +#endif // HOST_64BIT // // DWORD addition @@ -1267,7 +1267,7 @@ ULongSub( // // ULONG_PTR subtraction // -#ifdef BIT64 +#ifdef HOST_64BIT #define ULongPtrSub ULongLongSub #else __inline @@ -1279,7 +1279,7 @@ ULongPtrSub( { return ULongSub((ULONG)ulMinuend, (ULONG)ulSubtrahend, (ULONG*)pulResult); } -#endif // BIT64 +#endif // HOST_64BIT // diff --git a/src/coreclr/src/pal/inc/rt/ntimage.h b/src/coreclr/src/pal/inc/rt/ntimage.h index 48d82916bcfee..10686ef9d98b0 100644 --- a/src/coreclr/src/pal/inc/rt/ntimage.h +++ b/src/coreclr/src/pal/inc/rt/ntimage.h @@ -357,7 +357,7 @@ typedef struct _IMAGE_OPTIONAL_HEADER64 { #define IMAGE_NT_OPTIONAL_HDR64_MAGIC 0x20b #define IMAGE_ROM_OPTIONAL_HDR_MAGIC 0x107 -#ifdef BIT64 +#ifdef HOST_64BIT typedef IMAGE_OPTIONAL_HEADER64 IMAGE_OPTIONAL_HEADER; typedef PIMAGE_OPTIONAL_HEADER64 PIMAGE_OPTIONAL_HEADER; #define IMAGE_SIZEOF_NT_OPTIONAL_HEADER IMAGE_SIZEOF_NT_OPTIONAL64_HEADER @@ -386,7 +386,7 @@ typedef struct _IMAGE_ROM_HEADERS { IMAGE_ROM_OPTIONAL_HEADER OptionalHeader; } IMAGE_ROM_HEADERS, *PIMAGE_ROM_HEADERS; -#ifdef BIT64 +#ifdef HOST_64BIT typedef IMAGE_NT_HEADERS64 IMAGE_NT_HEADERS; typedef PIMAGE_NT_HEADERS64 PIMAGE_NT_HEADERS; #else @@ -1288,7 +1288,7 @@ typedef struct _IMAGE_TLS_DIRECTORY32 { } IMAGE_TLS_DIRECTORY32; typedef IMAGE_TLS_DIRECTORY32 * PIMAGE_TLS_DIRECTORY32; -#ifdef BIT64 +#ifdef HOST_64BIT #define IMAGE_ORDINAL_FLAG IMAGE_ORDINAL_FLAG64 #define IMAGE_ORDINAL(Ordinal) IMAGE_ORDINAL64(Ordinal) typedef IMAGE_THUNK_DATA64 IMAGE_THUNK_DATA; @@ -1485,7 +1485,7 @@ typedef struct { ULONG Reserved[ 2 ]; } IMAGE_LOAD_CONFIG_DIRECTORY64, *PIMAGE_LOAD_CONFIG_DIRECTORY64; -#ifdef BIT64 +#ifdef HOST_64BIT typedef IMAGE_LOAD_CONFIG_DIRECTORY64 IMAGE_LOAD_CONFIG_DIRECTORY; typedef PIMAGE_LOAD_CONFIG_DIRECTORY64 PIMAGE_LOAD_CONFIG_DIRECTORY; #else diff --git a/src/coreclr/src/pal/inc/rt/palrt.h b/src/coreclr/src/pal/inc/rt/palrt.h index 012223626f613..d7245d934a5e7 100644 --- a/src/coreclr/src/pal/inc/rt/palrt.h +++ b/src/coreclr/src/pal/inc/rt/palrt.h @@ -180,7 +180,7 @@ inline void *__cdecl operator new(size_t, void *_P) #define ARGUMENT_PRESENT(ArgumentPointer) (\ (CHAR *)(ArgumentPointer) != (CHAR *)(NULL) ) -#if defined(BIT64) +#if defined(HOST_64BIT) #define MAX_NATURAL_ALIGNMENT sizeof(ULONGLONG) #else #define MAX_NATURAL_ALIGNMENT sizeof(ULONG) @@ -560,7 +560,7 @@ STDAPI_(HRESULT) VariantClear(VARIANT * pvarg); #define V_UINTREF(X) V_UNION(X, puintVal) #define V_ARRAY(X) V_UNION(X, parray) -#ifdef BIT64 +#ifdef HOST_64BIT #define V_INT_PTR(X) V_UNION(X, llVal) #define V_UINT_PTR(X) V_UNION(X, ullVal) #define V_INT_PTRREF(X) V_UNION(X, pllVal) @@ -892,11 +892,11 @@ typedef VOID (NTAPI *WAITORTIMERCALLBACK)(PVOID, BOOLEAN); // usage pattern is: // // int get_scratch_register() { -// #if defined(_TARGET_X86_) +// #if defined(TARGET_X86) // return eax; -// #elif defined(_TARGET_AMD64_) +// #elif defined(TARGET_AMD64) // return rax; -// #elif defined(_TARGET_ARM_) +// #elif defined(TARGET_ARM) // return r0; // #else // PORTABILITY_ASSERT("scratch register"); @@ -933,7 +933,7 @@ typedef VOID (NTAPI *WAITORTIMERCALLBACK)(PVOID, BOOLEAN); #define UNREFERENCED_PARAMETER(P) (void)(P) -#ifdef BIT64 +#ifdef HOST_64BIT #define VALPTR(x) VAL64(x) #define GET_UNALIGNED_PTR(x) GET_UNALIGNED_64(x) #define GET_UNALIGNED_VALPTR(x) GET_UNALIGNED_VAL64(x) @@ -947,7 +947,7 @@ typedef VOID (NTAPI *WAITORTIMERCALLBACK)(PVOID, BOOLEAN); #define SET_UNALIGNED_VALPTR(p,x) SET_UNALIGNED_VAL32(p,x) #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #define RUNTIME_FUNCTION_INDIRECT 0x1 #endif @@ -1173,14 +1173,14 @@ typedef OUT_OF_PROCESS_FUNCTION_TABLE_CALLBACK *POUT_OF_PROCESS_FUNCTION_TABLE_C #define OUT_OF_PROCESS_FUNCTION_TABLE_CALLBACK_EXPORT_NAME \ "OutOfProcessFunctionTableCallback" -// #if !defined(_TARGET_MAC64) +// #if !defined(TARGET_OSX) // typedef LONG (*PEXCEPTION_ROUTINE)( // IN PEXCEPTION_POINTERS pExceptionPointers, // IN LPVOID lpvParam); // #define DISPATCHER_CONTEXT LPVOID -// #else // defined(_TARGET_MAC64) +// #else // defined(TARGET_OSX) // // Define unwind history table structure. @@ -1213,7 +1213,7 @@ EXCEPTION_DISPOSITION PVOID DispatcherContext ); -#if defined(_ARM_) +#if defined(HOST_ARM) typedef struct _DISPATCHER_CONTEXT { DWORD ControlPc; @@ -1231,7 +1231,7 @@ typedef struct _DISPATCHER_CONTEXT { DWORD Reserved; } DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT; -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) typedef struct _DISPATCHER_CONTEXT { ULONG64 ControlPc; @@ -1249,7 +1249,7 @@ typedef struct _DISPATCHER_CONTEXT { ULONG64 Reserved; } DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT; -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) typedef struct _DISPATCHER_CONTEXT { ULONG64 ControlPc; @@ -1263,7 +1263,7 @@ typedef struct _DISPATCHER_CONTEXT { PUNWIND_HISTORY_TABLE HistoryTable; } DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT; -#elif defined(_X86_) +#elif defined(HOST_X86) typedef struct _DISPATCHER_CONTEXT { DWORD ControlPc; @@ -1285,7 +1285,7 @@ typedef struct _DISPATCHER_CONTEXT { #endif -// #endif // !defined(_TARGET_MAC64) +// #endif // !defined(TARGET_OSX) typedef DISPATCHER_CONTEXT *PDISPATCHER_CONTEXT; diff --git a/src/coreclr/src/pal/inc/rt/safecrt.h b/src/coreclr/src/pal/inc/rt/safecrt.h index bbe421875b7a1..1f5e82650a65c 100644 --- a/src/coreclr/src/pal/inc/rt/safecrt.h +++ b/src/coreclr/src/pal/inc/rt/safecrt.h @@ -98,7 +98,7 @@ /* _W64 */ #if !defined(_W64) -#if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +#if !defined(__midl) && (defined(HOST_X86) || defined(_M_IX86)) && _MSC_VER >= 1300 #define _W64 __w64 #else #define _W64 @@ -107,7 +107,7 @@ /* uintptr_t */ #if !defined(_UINTPTR_T_DEFINED) -#if defined(BIT64) +#if defined(HOST_64BIT) typedef unsigned __int64 uintptr_t; #else typedef _W64 unsigned int uintptr_t; diff --git a/src/coreclr/src/pal/inc/strsafe.h b/src/coreclr/src/pal/inc/strsafe.h index a7c86d82796dd..b618a3bf81c79 100644 --- a/src/coreclr/src/pal/inc/strsafe.h +++ b/src/coreclr/src/pal/inc/strsafe.h @@ -29,11 +29,11 @@ #include // for va_start, etc. #ifndef _SIZE_T_DEFINED -#ifdef BIT64 +#ifdef HOST_64BIT typedef unsigned __int64 size_t; #else typedef __w64 unsigned int size_t; -#endif // !BIT64 +#endif // !HOST_64BIT #define _SIZE_T_DEFINED #endif // !_SIZE_T_DEFINED diff --git a/src/coreclr/src/pal/inc/unixasmmacros.inc b/src/coreclr/src/pal/inc/unixasmmacros.inc index 263d616041906..18ed49e202e35 100644 --- a/src/coreclr/src/pal/inc/unixasmmacros.inc +++ b/src/coreclr/src/pal/inc/unixasmmacros.inc @@ -10,7 +10,7 @@ #define LOCAL_LABEL(name) L##name #else #define C_FUNC(name) name -#if defined(_AMD64_) || defined(_X86_) +#if defined(HOST_AMD64) || defined(HOST_X86) #define EXTERNAL_C_FUNC(name) C_FUNC(name)@plt #else #define EXTERNAL_C_FUNC(name) C_FUNC(name) @@ -20,7 +20,7 @@ #if defined(__APPLE__) #define C_PLTFUNC(name) _##name -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) #define C_PLTFUNC(name) name #else #define C_PLTFUNC(name) name@PLT @@ -34,12 +34,12 @@ .equiv \New, \Old .endm -#if defined(_X86_) +#if defined(HOST_X86) #include "unixasmmacrosx86.inc" -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) #include "unixasmmacrosamd64.inc" -#elif defined(_ARM_) +#elif defined(HOST_ARM) #include "unixasmmacrosarm.inc" -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) #include "unixasmmacrosarm64.inc" #endif diff --git a/src/coreclr/src/pal/src/CMakeLists.txt b/src/coreclr/src/pal/src/CMakeLists.txt index d9e350d6ee84a..2e2685213287d 100644 --- a/src/coreclr/src/pal/src/CMakeLists.txt +++ b/src/coreclr/src/pal/src/CMakeLists.txt @@ -65,7 +65,7 @@ include_directories(include) # Compile options if(CMAKE_SYSTEM_NAME STREQUAL Darwin) - add_definitions(-D_TARGET_MAC64) + add_definitions(-DTARGET_OSX) add_definitions(-DXSTATE_SUPPORTED) set(PLATFORM_SOURCES arch/amd64/activationhandlerwrapper.S @@ -79,9 +79,7 @@ endif(CMAKE_SYSTEM_NAME STREQUAL Darwin) if (FEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION) add_definitions(-DFEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION) endif(FEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION) -add_definitions(-DPLATFORM_UNIX) add_definitions(-DLP64COMPATIBLE) -add_definitions(-DFEATURE_PAL) add_definitions(-DCORECLR) add_definitions(-DPIC) add_definitions(-D_FILE_OFFSET_BITS=64) diff --git a/src/coreclr/src/pal/src/arch/amd64/activationhandlerwrapper.S b/src/coreclr/src/pal/src/arch/amd64/activationhandlerwrapper.S index 63f718e81f096..0ac73fceb6be1 100644 --- a/src/coreclr/src/pal/src/arch/amd64/activationhandlerwrapper.S +++ b/src/coreclr/src/pal/src/arch/amd64/activationhandlerwrapper.S @@ -6,7 +6,7 @@ #include "unixasmmacros.inc" #include "asmconstants.h" -#ifdef BIT64 +#ifdef HOST_64BIT // Offset of the return address from the ActivationHandler in the ActivationHandlerWrapper .globl C_FUNC(ActivationHandlerReturnOffset) C_FUNC(ActivationHandlerReturnOffset): @@ -27,4 +27,4 @@ LOCAL_LABEL(ActivationHandlerReturn): ret NESTED_END ActivationHandlerWrapper, _TEXT -#endif // BIT64 +#endif // HOST_64BIT diff --git a/src/coreclr/src/pal/src/arch/amd64/asmconstants.h b/src/coreclr/src/pal/src/arch/amd64/asmconstants.h index 182c1191e4829..71b584a51cc1c 100644 --- a/src/coreclr/src/pal/src/arch/amd64/asmconstants.h +++ b/src/coreclr/src/pal/src/arch/amd64/asmconstants.h @@ -2,7 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -#ifdef BIT64 +#ifdef HOST_64BIT #define CONTEXT_AMD64 0x100000 @@ -74,7 +74,7 @@ #define CONTEXT_LastExceptionFromRip CONTEXT_LastExceptionToRip+8 #define CONTEXT_Size CONTEXT_LastExceptionFromRip+8 -#else // BIT64 +#else // HOST_64BIT #define CONTEXT_ContextFlags 0 #define CONTEXT_FLOATING_POINT 8 @@ -103,4 +103,4 @@ #define CONTEXT_Xmm6 CONTEXT_Xmm5+16 #define CONTEXT_Xmm7 CONTEXT_Xmm6+16 -#endif // BIT64 +#endif // HOST_64BIT diff --git a/src/coreclr/src/pal/src/arch/amd64/context2.S b/src/coreclr/src/pal/src/arch/amd64/context2.S index 9112ae6248244..b2a23917de816 100644 --- a/src/coreclr/src/pal/src/arch/amd64/context2.S +++ b/src/coreclr/src/pal/src/arch/amd64/context2.S @@ -11,7 +11,7 @@ #include "unixasmmacros.inc" #include "asmconstants.h" -#ifdef BIT64 +#ifdef HOST_64BIT #define IRETFRAME_Rip 0 #define IRETFRAME_SegCs IRETFRAME_Rip+8 diff --git a/src/coreclr/src/pal/src/cruntime/file.cpp b/src/coreclr/src/pal/src/cruntime/file.cpp index d4c61e915fd3a..e481d43f6b49e 100644 --- a/src/coreclr/src/pal/src/cruntime/file.cpp +++ b/src/coreclr/src/pal/src/cruntime/file.cpp @@ -644,7 +644,7 @@ PAL_ftell(PAL_FILE * f) _ASSERTE(f != NULL); lRetVal = ftell( f->bsdFilePtr ); -#ifdef BIT64 +#ifdef HOST_64BIT /* Windows does not set an error if the file pointer's position is greater than _I32_MAX. It just returns -1. */ if (lRetVal > _I32_MAX) diff --git a/src/coreclr/src/pal/src/cruntime/misc.cpp b/src/coreclr/src/pal/src/cruntime/misc.cpp index 194874f93d5a8..2a569a6b7691c 100644 --- a/src/coreclr/src/pal/src/cruntime/misc.cpp +++ b/src/coreclr/src/pal/src/cruntime/misc.cpp @@ -31,9 +31,9 @@ Module Name: #include #include -#if defined(_AMD64_) || defined(_x86_) +#if defined(HOST_AMD64) || defined(_x86_) #include -#endif // defined(_AMD64_) || defined(_x86_) +#endif // defined(HOST_AMD64) || defined(_x86_) #if defined(_DEBUG) #include #endif //defined(_DEBUG) @@ -241,7 +241,7 @@ PAL_bsearch(const void *key, const void *base, size_t nmemb, size_t size, return retval; } -#ifdef _AMD64_ +#ifdef HOST_AMD64 PALIMPORT unsigned int PAL__mm_getcsr(void) @@ -255,7 +255,7 @@ void PAL__mm_setcsr(unsigned int i) _mm_setcsr(i); } -#endif // _AMD64_ +#endif // HOST_AMD64 /*++ Function: diff --git a/src/coreclr/src/pal/src/cruntime/printf.cpp b/src/coreclr/src/pal/src/cruntime/printf.cpp index 05f7660678554..f0d5b1c1b64af 100644 --- a/src/coreclr/src/pal/src/cruntime/printf.cpp +++ b/src/coreclr/src/pal/src/cruntime/printf.cpp @@ -323,7 +323,7 @@ static BOOL Internal_ScanfExtractFormatA(LPCSTR *Fmt, LPSTR Out, int iOutSize, L } } -#ifdef BIT64 +#ifdef HOST_64BIT if (**Fmt == 'p') { *Prefix = SCANF_PREFIX_LONGLONG; @@ -346,7 +346,7 @@ static BOOL Internal_ScanfExtractFormatA(LPCSTR *Fmt, LPSTR Out, int iOutSize, L else if (**Fmt == 'l' || **Fmt == 'w') { ++(*Fmt); -#ifdef BIT64 +#ifdef HOST_64BIT // Only want to change the prefix on 64 bit when inputing characters. if (**Fmt == 'c' || **Fmt == 's') #endif @@ -639,7 +639,7 @@ static BOOL Internal_ScanfExtractFormatW(LPCWSTR *Fmt, LPSTR Out, int iOutSize, } } -#ifdef BIT64 +#ifdef HOST_64BIT if (**Fmt == 'p') { *Prefix = SCANF_PREFIX_LONGLONG; @@ -662,7 +662,7 @@ static BOOL Internal_ScanfExtractFormatW(LPCWSTR *Fmt, LPSTR Out, int iOutSize, else if (**Fmt == 'l' || **Fmt == 'w') { ++(*Fmt); -#ifdef BIT64 +#ifdef HOST_64BIT // Only want to change the prefix on 64 bit when inputing characters. if (**Fmt == 'C' || **Fmt == 'S') #endif diff --git a/src/coreclr/src/pal/src/cruntime/printfcpp.cpp b/src/coreclr/src/pal/src/cruntime/printfcpp.cpp index 71f98474c652d..037c3e9910084 100644 --- a/src/coreclr/src/pal/src/cruntime/printfcpp.cpp +++ b/src/coreclr/src/pal/src/cruntime/printfcpp.cpp @@ -280,7 +280,7 @@ BOOL Internal_ExtractFormatA(CPalThread *pthrCurrent, LPCSTR *Fmt, LPSTR Out, LP } } -#ifdef BIT64 +#ifdef HOST_64BIT if (**Fmt == 'p') { *Prefix = PFF_PREFIX_LONGLONG; @@ -303,7 +303,7 @@ BOOL Internal_ExtractFormatA(CPalThread *pthrCurrent, LPCSTR *Fmt, LPSTR Out, LP else { ++(*Fmt); - #ifdef BIT64 + #ifdef HOST_64BIT /* convert to 'll' so that Unix snprintf can handle it */ *Prefix = PFF_PREFIX_LONGLONG; #endif @@ -319,7 +319,7 @@ BOOL Internal_ExtractFormatA(CPalThread *pthrCurrent, LPCSTR *Fmt, LPSTR Out, LP else if (**Fmt == 'l' || **Fmt == 'w') { ++(*Fmt); -#ifdef BIT64 +#ifdef HOST_64BIT // Only want to change the prefix on 64 bit when printing characters. if (**Fmt == 'c' || **Fmt == 's') #endif @@ -584,7 +584,7 @@ BOOL Internal_ExtractFormatW(CPalThread *pthrCurrent, LPCWSTR *Fmt, LPSTR Out, L } } -#ifdef BIT64 +#ifdef HOST_64BIT if (**Fmt == 'p') { *Prefix = PFF_PREFIX_LONGLONG; @@ -607,7 +607,7 @@ BOOL Internal_ExtractFormatW(CPalThread *pthrCurrent, LPCWSTR *Fmt, LPSTR Out, L else { ++(*Fmt); - #ifdef BIT64 + #ifdef HOST_64BIT /* convert to 'll' so that Unix snprintf can handle it */ *Prefix = PFF_PREFIX_LONGLONG; #endif @@ -622,7 +622,7 @@ BOOL Internal_ExtractFormatW(CPalThread *pthrCurrent, LPCWSTR *Fmt, LPSTR Out, L else if (**Fmt == 'l' || **Fmt == 'w') { ++(*Fmt); - #ifdef BIT64 + #ifdef HOST_64BIT // Only want to change the prefix on 64 bit when printing characters. if (**Fmt == 'C' || **Fmt == 'S') #endif diff --git a/src/coreclr/src/pal/src/cruntime/silent_printf.cpp b/src/coreclr/src/pal/src/cruntime/silent_printf.cpp index 96cc9588dab22..9a34a396bc6e3 100644 --- a/src/coreclr/src/pal/src/cruntime/silent_printf.cpp +++ b/src/coreclr/src/pal/src/cruntime/silent_printf.cpp @@ -465,7 +465,7 @@ BOOL Silent_ExtractFormatA(LPCSTR *Fmt, LPSTR Out, LPINT Flags, LPINT Width, LPI } } -#ifdef BIT64 +#ifdef HOST_64BIT if (**Fmt == 'p') { *Prefix = PFF_PREFIX_LONGLONG; @@ -488,7 +488,7 @@ BOOL Silent_ExtractFormatA(LPCSTR *Fmt, LPSTR Out, LPINT Flags, LPINT Width, LPI else if (**Fmt == 'l' || **Fmt == 'w') { ++(*Fmt); -#ifdef BIT64 +#ifdef HOST_64BIT // Only want to change the prefix on 64 bit when printing characters. if (**Fmt == 'c' || **Fmt == 's') #endif diff --git a/src/coreclr/src/pal/src/cruntime/string.cpp b/src/coreclr/src/pal/src/cruntime/string.cpp index 0b66f4a2f4860..04c1918e9dcd6 100644 --- a/src/coreclr/src/pal/src/cruntime/string.cpp +++ b/src/coreclr/src/pal/src/cruntime/string.cpp @@ -170,7 +170,7 @@ PAL_strtoul(const char *szNumber, char **pszEnd, int nBase) ulResult = strtoul(szNumber, pszEnd, nBase); -#ifdef BIT64 +#ifdef HOST_64BIT if (ulResult > UINT32_MAX) { char ch = *szNumber; diff --git a/src/coreclr/src/pal/src/cruntime/wchar.cpp b/src/coreclr/src/pal/src/cruntime/wchar.cpp index 2924fd9cf74a7..01e05cec3fb6b 100644 --- a/src/coreclr/src/pal/src/cruntime/wchar.cpp +++ b/src/coreclr/src/pal/src/cruntime/wchar.cpp @@ -331,7 +331,7 @@ PAL_wcstoul( res = strtoul(s_nptr, &s_endptr, base); -#ifdef BIT64 +#ifdef HOST_64BIT if (res > UINT32_MAX) { wchar_16 wc = *nptr; diff --git a/src/coreclr/src/pal/src/debug/debug.cpp b/src/coreclr/src/pal/src/debug/debug.cpp index 662f31911362c..72b5173262902 100644 --- a/src/coreclr/src/pal/src/debug/debug.cpp +++ b/src/coreclr/src/pal/src/debug/debug.cpp @@ -20,7 +20,7 @@ Revision History: --*/ -#ifndef BIT64 +#ifndef HOST_64BIT #undef _LARGEFILE64_SOURCE #undef _FILE_OFFSET_BITS #endif diff --git a/src/coreclr/src/pal/src/exception/machexception.cpp b/src/coreclr/src/pal/src/exception/machexception.cpp index befbf672b3915..5bf4218b40c2e 100644 --- a/src/coreclr/src/pal/src/exception/machexception.cpp +++ b/src/coreclr/src/pal/src/exception/machexception.cpp @@ -356,17 +356,17 @@ PAL_ERROR CorUnix::CPalThread::DisableMachExceptions() return palError; } -#if !defined(_AMD64_) +#if !defined(HOST_AMD64) extern "C" void PAL_DispatchException(PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachExceptionInfo *pMachExceptionInfo) -#else // defined(_AMD64_) +#else // defined(HOST_AMD64) // Since HijackFaultingThread pushed the context, exception record and info on the stack, we need to adjust the // signature of PAL_DispatchException such that the corresponding arguments are considered to be on the stack // per GCC64 calling convention rules. Hence, the first 6 dummy arguments (corresponding to RDI, RSI, RDX,RCX, R8, R9). extern "C" void PAL_DispatchException(DWORD64 dwRDI, DWORD64 dwRSI, DWORD64 dwRDX, DWORD64 dwRCX, DWORD64 dwR8, DWORD64 dwR9, PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachExceptionInfo *pMachExceptionInfo) -#endif // !defined(_AMD64_) +#endif // !defined(HOST_AMD64) { CPalThread *pThread = InternalGetCurrentThread(); @@ -413,10 +413,10 @@ void PAL_DispatchException(DWORD64 dwRDI, DWORD64 dwRSI, DWORD64 dwRDX, DWORD64 } } -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) extern "C" void PAL_DispatchExceptionWrapper(); extern "C" int PAL_DispatchExceptionReturnOffset; -#endif // _X86_ || _AMD64_ +#endif // HOST_X86 || HOST_AMD64 /*++ Function : @@ -476,7 +476,7 @@ BuildExceptionRecord( { switch (exceptionInfo.Subcodes[0]) { -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) case EXC_I386_DIV: exceptionCode = EXCEPTION_INT_DIVIDE_BY_ZERO; break; @@ -500,7 +500,7 @@ BuildExceptionRecord( break; case EXC_SOFTWARE: -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION; break; #else @@ -509,7 +509,7 @@ BuildExceptionRecord( // Trace, breakpoint, etc. Details in subcode field. case EXC_BREAKPOINT: -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) if (exceptionInfo.Subcodes[0] == EXC_I386_SGL) { exceptionCode = EXCEPTION_SINGLE_STEP; @@ -614,7 +614,7 @@ HijackFaultingThread( // Fill in the exception record from the exception info BuildExceptionRecord(exceptionInfo, &exceptionRecord); -#ifdef _X86_ +#ifdef HOST_X86 threadContext.ContextFlags = CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS; #else threadContext.ContextFlags = CONTEXT_FLOATING_POINT; @@ -624,7 +624,7 @@ HijackFaultingThread( threadContext.ContextFlags |= CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS; CONTEXT_GetThreadContextFromThreadState(x86_THREAD_STATE, (thread_state_t)&exceptionInfo.ThreadState, &threadContext); -#if defined(CORECLR) && (defined(_X86_) || defined(_AMD64_)) +#if defined(CORECLR) && (defined(HOST_X86) || defined(HOST_AMD64)) // For CoreCLR we look more deeply at access violations to determine whether they're the result of a stack // overflow. If so we'll terminate the process immediately (the current default policy of the CoreCLR EE). // Otherwise we'll either A/V ourselves trying to set up the SEH exception record and context on the @@ -670,7 +670,7 @@ HijackFaultingThread( // corrupted). Our managed jits always generate code which does this as does MSVC. GCC, however, // does not do this by default. We have to explicitly provide the -fstack-check compiler option // to enable the behavior. -#if (defined(_X86_) || defined(_AMD64_)) && defined(__APPLE__) +#if (defined(HOST_X86) || defined(HOST_AMD64)) && defined(__APPLE__) if (exceptionRecord.ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { // Assume this AV isn't an SO to begin with. @@ -679,9 +679,9 @@ HijackFaultingThread( // Calculate the page base addresses for the fault and the faulting thread's SP. int cbPage = getpagesize(); char *pFaultPage = (char*)(exceptionRecord.ExceptionInformation[1] & ~(cbPage - 1)); -#ifdef _X86_ +#ifdef HOST_X86 char *pStackTopPage = (char*)(threadContext.Esp & ~(cbPage - 1)); -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) char *pStackTopPage = (char*)(threadContext.Rsp & ~(cbPage - 1)); #endif @@ -698,7 +698,7 @@ HijackFaultingThread( vm_size_t vm_size; vm_region_flavor_t vm_flavor; mach_msg_type_number_t infoCnt; -#ifdef BIT64 +#ifdef HOST_64BIT vm_region_basic_info_data_64_t info; infoCnt = VM_REGION_BASIC_INFO_COUNT_64; vm_flavor = VM_REGION_BASIC_INFO_64; @@ -711,7 +711,7 @@ HijackFaultingThread( vm_address = (vm_address_t)(pFaultPage + cbPage); -#ifdef BIT64 +#ifdef HOST_64BIT machret = vm_region_64( #else machret = vm_region( @@ -723,9 +723,9 @@ HijackFaultingThread( (vm_region_info_t)&info, &infoCnt, &object_name); -#ifdef _X86_ +#ifdef HOST_X86 CHECK_MACH("vm_region", machret); -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) CHECK_MACH("vm_region_64", machret); #endif @@ -737,7 +737,7 @@ HijackFaultingThread( fIsStackOverflow = true; } -#if defined(_AMD64_) +#if defined(HOST_AMD64) if (!fIsStackOverflow) { // Check if we can read pointer sizeD bytes below the target thread's stack pointer. @@ -754,7 +754,7 @@ HijackFaultingThread( fIsStackOverflow = true; } } -#endif // _AMD64_ +#endif // HOST_AMD64 if (fIsStackOverflow) { @@ -774,12 +774,12 @@ HijackFaultingThread( abort(); } } -#else // (_X86_ || _AMD64_) && __APPLE__ +#else // (HOST_X86 || HOST_AMD64) && __APPLE__ #error Platform not supported for correct stack overflow handling -#endif // (_X86_ || _AMD64_) && __APPLE__ -#endif // CORECLR && _X86_ +#endif // (HOST_X86 || HOST_AMD64) && __APPLE__ +#endif // CORECLR && HOST_X86 -#if defined(_X86_) +#if defined(HOST_X86) NONPAL_ASSERTE(exceptionInfo.ThreadState.tsh.flavor == x86_THREAD_STATE32); // Make a copy of the thread state because the one in exceptionInfo needs to be preserved to restore @@ -844,7 +844,7 @@ HijackFaultingThread( // Now set the thread state for the faulting thread so that PAL_DispatchException executes next machret = thread_set_state(thread, x86_THREAD_STATE32, (thread_state_t)&ts32, x86_THREAD_STATE32_COUNT); CHECK_MACH("thread_set_state(thread)", machret); -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) NONPAL_ASSERTE(exceptionInfo.ThreadState.tsh.flavor == x86_THREAD_STATE64); // Make a copy of the thread state because the one in exceptionInfo needs to be preserved to restore @@ -1224,9 +1224,9 @@ void MachExceptionInfo::RestoreState(mach_port_t thread) { if (Subcodes[0] == EXC_I386_BPT) { -#ifdef _X86_ +#ifdef HOST_X86 ThreadState.uts.ts32.eip--; -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) ThreadState.uts.ts64.__rip--; #else #error Platform not supported diff --git a/src/coreclr/src/pal/src/exception/machmessage.cpp b/src/coreclr/src/pal/src/exception/machmessage.cpp index 0ca177b651663..aa83687e1dd1d 100644 --- a/src/coreclr/src/pal/src/exception/machmessage.cpp +++ b/src/coreclr/src/pal/src/exception/machmessage.cpp @@ -998,7 +998,7 @@ thread_act_t MachMessage::GetThreadFromState(thread_state_flavor_t eFlavor, thre // thread). switch (eFlavor) { -#ifdef _X86_ +#ifdef HOST_X86 case x86_THREAD_STATE: targetSP = ((x86_thread_state_t*)pState)->uts.ts32.esp; break; @@ -1006,7 +1006,7 @@ thread_act_t MachMessage::GetThreadFromState(thread_state_flavor_t eFlavor, thre case x86_THREAD_STATE32: targetSP = ((x86_thread_state32_t*)pState)->esp; break; -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) case x86_THREAD_STATE: targetSP = ((x86_thread_state_t*)pState)->uts.ts64.__rsp; break; @@ -1041,9 +1041,9 @@ thread_act_t MachMessage::GetThreadFromState(thread_state_flavor_t eFlavor, thre // threads sharing the same stack which is very bad). Conversely the thread we're looking for is // suspended in the kernel so its SP should not change. We should always be able to find an exact // match as a result. -#ifdef _X86_ +#ifdef HOST_X86 if (threadState.uts.ts32.esp == targetSP) -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) if (threadState.uts.ts64.__rsp == targetSP) #else #error Unexpected architecture. diff --git a/src/coreclr/src/pal/src/exception/machmessage.h b/src/coreclr/src/pal/src/exception/machmessage.h index 11e20a5750507..91d4c976561fc 100644 --- a/src/coreclr/src/pal/src/exception/machmessage.h +++ b/src/coreclr/src/pal/src/exception/machmessage.h @@ -22,11 +22,11 @@ using namespace CorUnix; #if HAVE_MACH_EXCEPTIONS -#if defined(_AMD64_) +#if defined(HOST_AMD64) #define MACH_EH_TYPE(x) mach_##x #else #define MACH_EH_TYPE(x) x -#endif // defined(_AMD64_) +#endif // defined(HOST_AMD64) // The vast majority of Mach calls we make in this module are critical: we cannot recover from failures of // these methods (principally because we're handling hardware exceptions in the context of a single dedicated diff --git a/src/coreclr/src/pal/src/exception/remote-unwind.cpp b/src/coreclr/src/pal/src/exception/remote-unwind.cpp index 8864e3f95bf14..f1251fd7a0ab4 100644 --- a/src/coreclr/src/pal/src/exception/remote-unwind.cpp +++ b/src/coreclr/src/pal/src/exception/remote-unwind.cpp @@ -69,13 +69,13 @@ SET_DEFAULT_DEBUG_CHANNEL(EXCEPT); #include #include -#if defined(_X86_) || defined(_ARM_) +#if defined(HOST_X86) || defined(HOST_ARM) #define PRIx PRIx32 #define PRIu PRIu32 #define PRId PRId32 #define PRIA "08" #define PRIxA PRIA PRIx -#elif defined(_AMD64_) || defined(_ARM64_) +#elif defined(HOST_AMD64) || defined(HOST_ARM64) #define PRIx PRIx64 #define PRIu PRIu64 #define PRId PRId64 @@ -120,7 +120,7 @@ typedef struct _libunwindInfo static void UnwindContextToContext(unw_cursor_t *cursor, CONTEXT *winContext) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Rip); unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Rsp); unw_get_reg(cursor, UNW_X86_64_RBP, (unw_word_t *) &winContext->Rbp); @@ -129,14 +129,14 @@ static void UnwindContextToContext(unw_cursor_t *cursor, CONTEXT *winContext) unw_get_reg(cursor, UNW_X86_64_R13, (unw_word_t *) &winContext->R13); unw_get_reg(cursor, UNW_X86_64_R14, (unw_word_t *) &winContext->R14); unw_get_reg(cursor, UNW_X86_64_R15, (unw_word_t *) &winContext->R15); -#elif defined(_X86_) +#elif defined(HOST_X86) unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Eip); unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Esp); unw_get_reg(cursor, UNW_X86_EBP, (unw_word_t *) &winContext->Ebp); unw_get_reg(cursor, UNW_X86_EBX, (unw_word_t *) &winContext->Ebx); unw_get_reg(cursor, UNW_X86_ESI, (unw_word_t *) &winContext->Esi); unw_get_reg(cursor, UNW_X86_EDI, (unw_word_t *) &winContext->Edi); -#elif defined(_ARM_) +#elif defined(HOST_ARM) unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Pc); unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Sp); unw_get_reg(cursor, UNW_ARM_R4, (unw_word_t *) &winContext->R4); @@ -149,7 +149,7 @@ static void UnwindContextToContext(unw_cursor_t *cursor, CONTEXT *winContext) unw_get_reg(cursor, UNW_ARM_R11, (unw_word_t *) &winContext->R11); unw_get_reg(cursor, UNW_ARM_R14, (unw_word_t *) &winContext->Lr); TRACE("sp %p pc %p lr %p\n", winContext->Sp, winContext->Pc, winContext->Lr); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Pc); unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Sp); unw_get_reg(cursor, UNW_AARCH64_X19, (unw_word_t *) &winContext->X19); @@ -210,7 +210,7 @@ access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write switch (regnum) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) case UNW_REG_IP: *valp = (unw_word_t)winContext->Rip; break; case UNW_REG_SP: *valp = (unw_word_t)winContext->Rsp; break; case UNW_X86_64_RBP: *valp = (unw_word_t)winContext->Rbp; break; @@ -219,14 +219,14 @@ access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write case UNW_X86_64_R13: *valp = (unw_word_t)winContext->R13; break; case UNW_X86_64_R14: *valp = (unw_word_t)winContext->R14; break; case UNW_X86_64_R15: *valp = (unw_word_t)winContext->R15; break; -#elif defined(_X86_) +#elif defined(HOST_X86) case UNW_REG_IP: *valp = (unw_word_t)winContext->Eip; break; case UNW_REG_SP: *valp = (unw_word_t)winContext->Esp; break; case UNW_X86_EBX: *valp = (unw_word_t)winContext->Ebx; break; case UNW_X86_ESI: *valp = (unw_word_t)winContext->Esi; break; case UNW_X86_EDI: *valp = (unw_word_t)winContext->Edi; break; case UNW_X86_EBP: *valp = (unw_word_t)winContext->Ebp; break; -#elif defined(_ARM_) +#elif defined(HOST_ARM) case UNW_ARM_R4: *valp = (unw_word_t)winContext->R4; break; case UNW_ARM_R5: *valp = (unw_word_t)winContext->R5; break; case UNW_ARM_R6: *valp = (unw_word_t)winContext->R6; break; @@ -238,7 +238,7 @@ access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write case UNW_ARM_R13: *valp = (unw_word_t)winContext->Sp; break; case UNW_ARM_R14: *valp = (unw_word_t)winContext->Lr; break; case UNW_ARM_R15: *valp = (unw_word_t)winContext->Pc; break; -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) case UNW_AARCH64_X19: *valp = (unw_word_t)winContext->X19; break; case UNW_AARCH64_X20: *valp = (unw_word_t)winContext->X20; break; case UNW_AARCH64_X21: *valp = (unw_word_t)winContext->X21; break; @@ -477,4 +477,4 @@ PAL_VirtualUnwindOutOfProc(CONTEXT *context, KNONVOLATILE_CONTEXT_POINTERS *cont return FALSE; } -#endif // defined(_AMD64_) && defined(HAVE_UNW_GET_ACCESSORS) +#endif // defined(HOST_AMD64) && defined(HAVE_UNW_GET_ACCESSORS) diff --git a/src/coreclr/src/pal/src/exception/seh-unwind.cpp b/src/coreclr/src/pal/src/exception/seh-unwind.cpp index fbb1a1947aea0..53c86a0045384 100644 --- a/src/coreclr/src/pal/src/exception/seh-unwind.cpp +++ b/src/coreclr/src/pal/src/exception/seh-unwind.cpp @@ -42,7 +42,7 @@ Module Name: #if UNWIND_CONTEXT_IS_UCONTEXT_T -#if defined(_AMD64_) +#if defined(HOST_AMD64) #define ASSIGN_UNWIND_REGS \ ASSIGN_REG(Rip) \ ASSIGN_REG(Rsp) \ @@ -52,7 +52,7 @@ Module Name: ASSIGN_REG(R13) \ ASSIGN_REG(R14) \ ASSIGN_REG(R15) -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) #define ASSIGN_UNWIND_REGS \ ASSIGN_REG(Pc) \ ASSIGN_REG(Sp) \ @@ -68,7 +68,7 @@ Module Name: ASSIGN_REG(X26) \ ASSIGN_REG(X27) \ ASSIGN_REG(X28) -#elif defined(_X86_) +#elif defined(HOST_X86) #define ASSIGN_UNWIND_REGS \ ASSIGN_REG(Eip) \ ASSIGN_REG(Esp) \ @@ -89,7 +89,7 @@ static void WinContextToUnwindContext(CONTEXT *winContext, unw_context_t *unwCon #else static void WinContextToUnwindContext(CONTEXT *winContext, unw_context_t *unwContext) { -#if defined(_ARM_) +#if defined(HOST_ARM) // Assuming that unw_set_reg() on cursor will point the cursor to the // supposed stack frame is dangerous for libunwind-arm in Linux. // It is because libunwind's unw_cursor_t has other data structure @@ -116,7 +116,7 @@ static void WinContextToUnwindContext(CONTEXT *winContext, unw_context_t *unwCon static void WinContextToUnwindCursor(CONTEXT *winContext, unw_cursor_t *cursor) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) unw_set_reg(cursor, UNW_REG_IP, winContext->Rip); unw_set_reg(cursor, UNW_REG_SP, winContext->Rsp); unw_set_reg(cursor, UNW_X86_64_RBP, winContext->Rbp); @@ -125,7 +125,7 @@ static void WinContextToUnwindCursor(CONTEXT *winContext, unw_cursor_t *cursor) unw_set_reg(cursor, UNW_X86_64_R13, winContext->R13); unw_set_reg(cursor, UNW_X86_64_R14, winContext->R14); unw_set_reg(cursor, UNW_X86_64_R15, winContext->R15); -#elif defined(_X86_) +#elif defined(HOST_X86) unw_set_reg(cursor, UNW_REG_IP, winContext->Eip); unw_set_reg(cursor, UNW_REG_SP, winContext->Esp); unw_set_reg(cursor, UNW_X86_EBP, winContext->Ebp); @@ -138,7 +138,7 @@ static void WinContextToUnwindCursor(CONTEXT *winContext, unw_cursor_t *cursor) void UnwindContextToWinContext(unw_cursor_t *cursor, CONTEXT *winContext) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Rip); unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Rsp); unw_get_reg(cursor, UNW_X86_64_RBP, (unw_word_t *) &winContext->Rbp); @@ -147,14 +147,14 @@ void UnwindContextToWinContext(unw_cursor_t *cursor, CONTEXT *winContext) unw_get_reg(cursor, UNW_X86_64_R13, (unw_word_t *) &winContext->R13); unw_get_reg(cursor, UNW_X86_64_R14, (unw_word_t *) &winContext->R14); unw_get_reg(cursor, UNW_X86_64_R15, (unw_word_t *) &winContext->R15); -#elif defined(_X86_) +#elif defined(HOST_X86) unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Eip); unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Esp); unw_get_reg(cursor, UNW_X86_EBP, (unw_word_t *) &winContext->Ebp); unw_get_reg(cursor, UNW_X86_EBX, (unw_word_t *) &winContext->Ebx); unw_get_reg(cursor, UNW_X86_ESI, (unw_word_t *) &winContext->Esi); unw_get_reg(cursor, UNW_X86_EDI, (unw_word_t *) &winContext->Edi); -#elif defined(_ARM_) +#elif defined(HOST_ARM) unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Sp); unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Pc); unw_get_reg(cursor, UNW_ARM_R14, (unw_word_t *) &winContext->Lr); @@ -166,7 +166,7 @@ void UnwindContextToWinContext(unw_cursor_t *cursor, CONTEXT *winContext) unw_get_reg(cursor, UNW_ARM_R9, (unw_word_t *) &winContext->R9); unw_get_reg(cursor, UNW_ARM_R10, (unw_word_t *) &winContext->R10); unw_get_reg(cursor, UNW_ARM_R11, (unw_word_t *) &winContext->R11); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Pc); unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Sp); unw_get_reg(cursor, UNW_AARCH64_X29, (unw_word_t *) &winContext->Fp); @@ -206,19 +206,19 @@ static void GetContextPointer(unw_cursor_t *cursor, unw_context_t *unwContext, i void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext, KNONVOLATILE_CONTEXT_POINTERS *contextPointers) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) GetContextPointer(cursor, unwContext, UNW_X86_64_RBP, &contextPointers->Rbp); GetContextPointer(cursor, unwContext, UNW_X86_64_RBX, &contextPointers->Rbx); GetContextPointer(cursor, unwContext, UNW_X86_64_R12, &contextPointers->R12); GetContextPointer(cursor, unwContext, UNW_X86_64_R13, &contextPointers->R13); GetContextPointer(cursor, unwContext, UNW_X86_64_R14, &contextPointers->R14); GetContextPointer(cursor, unwContext, UNW_X86_64_R15, &contextPointers->R15); -#elif defined(_X86_) +#elif defined(HOST_X86) GetContextPointer(cursor, unwContext, UNW_X86_EBX, &contextPointers->Ebx); GetContextPointer(cursor, unwContext, UNW_X86_EBP, &contextPointers->Ebp); GetContextPointer(cursor, unwContext, UNW_X86_ESI, &contextPointers->Esi); GetContextPointer(cursor, unwContext, UNW_X86_EDI, &contextPointers->Edi); -#elif defined(_ARM_) +#elif defined(HOST_ARM) GetContextPointer(cursor, unwContext, UNW_ARM_R4, &contextPointers->R4); GetContextPointer(cursor, unwContext, UNW_ARM_R5, &contextPointers->R5); GetContextPointer(cursor, unwContext, UNW_ARM_R6, &contextPointers->R6); @@ -227,7 +227,7 @@ void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext, KNONVOL GetContextPointer(cursor, unwContext, UNW_ARM_R9, &contextPointers->R9); GetContextPointer(cursor, unwContext, UNW_ARM_R10, &contextPointers->R10); GetContextPointer(cursor, unwContext, UNW_ARM_R11, &contextPointers->R11); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) GetContextPointer(cursor, unwContext, UNW_AARCH64_X19, &contextPointers->X19); GetContextPointer(cursor, unwContext, UNW_AARCH64_X20, &contextPointers->X20); GetContextPointer(cursor, unwContext, UNW_AARCH64_X21, &contextPointers->X21); @@ -313,16 +313,16 @@ BOOL PAL_VirtualUnwind(CONTEXT *context, KNONVOLATILE_CONTEXT_POINTERS *contextP if (unw_is_signal_frame(&cursor) > 0) { context->ContextFlags |= CONTEXT_EXCEPTION_ACTIVE; -#if defined(_ARM_) || defined(_ARM64_) || defined(_X86_) +#if defined(HOST_ARM) || defined(HOST_ARM64) || defined(HOST_X86) context->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL; -#endif // _ARM_ || _ARM64_ +#endif // HOST_ARM || HOST_ARM64 } else { context->ContextFlags &= ~CONTEXT_EXCEPTION_ACTIVE; -#if defined(_ARM_) || defined(_ARM64_) || defined(_X86_) +#if defined(HOST_ARM) || defined(HOST_ARM64) || defined(HOST_X86) context->ContextFlags |= CONTEXT_UNWOUND_TO_CALL; -#endif // _ARM_ || _ARM64_ +#endif // HOST_ARM || HOST_ARM64 } // Update the passed in windows context to reflect the unwind diff --git a/src/coreclr/src/pal/src/exception/seh.cpp b/src/coreclr/src/pal/src/exception/seh.cpp index 8aefe5a10e6ee..ef95f5e354a3d 100644 --- a/src/coreclr/src/pal/src/exception/seh.cpp +++ b/src/coreclr/src/pal/src/exception/seh.cpp @@ -190,11 +190,11 @@ PAL_ThrowExceptionFromContext(CONTEXT* context, PAL_SEHException* ex) PAL_SEHException* ex - the exception to throw. --*/ extern "C" -#ifdef _X86_ +#ifdef HOST_X86 void __fastcall ThrowExceptionHelper(PAL_SEHException* ex) -#else // _X86_ +#else // HOST_X86 void ThrowExceptionHelper(PAL_SEHException* ex) -#endif // !_X86_ +#endif // !HOST_X86 { throw std::move(*ex); } diff --git a/src/coreclr/src/pal/src/exception/signal.cpp b/src/coreclr/src/pal/src/exception/signal.cpp index 4110263d41976..49fdec8db89e4 100644 --- a/src/coreclr/src/pal/src/exception/signal.cpp +++ b/src/coreclr/src/pal/src/exception/signal.cpp @@ -791,7 +791,7 @@ static bool common_signal_handler(int code, siginfo_t *siginfo, void *sigcontext ULONG contextFlags = CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT; -#if defined(_AMD64_) +#if defined(HOST_AMD64) contextFlags |= CONTEXT_XSTATE; #endif diff --git a/src/coreclr/src/pal/src/file/file.cpp b/src/coreclr/src/pal/src/file/file.cpp index 54145f8ea24aa..20dcfc43326c0 100644 --- a/src/coreclr/src/pal/src/file/file.cpp +++ b/src/coreclr/src/pal/src/file/file.cpp @@ -332,7 +332,7 @@ CorUnix::InternalCanonicalizeRealPath(LPCSTR lpUnixPath, PathCharString& lpBuffe { WARN("realpath() failed with error %d\n", errno); palError = FILEGetLastErrorFromErrno(); -#if defined(_AMD64_) +#if defined(HOST_AMD64) // If we are here, then we tried to invoke realpath // against a directory. // @@ -354,7 +354,7 @@ CorUnix::InternalCanonicalizeRealPath(LPCSTR lpUnixPath, PathCharString& lpBuffe palError = ERROR_PATH_NOT_FOUND; } } -#endif // defined(_AMD64_) +#endif // defined(HOST_AMD64) goto LExit; } @@ -362,7 +362,7 @@ CorUnix::InternalCanonicalizeRealPath(LPCSTR lpUnixPath, PathCharString& lpBuffe } else { -#if defined(_AMD64_) +#if defined(HOST_AMD64) bool fSetFilename = true; // Since realpath implementation cannot handle inexistent filenames, // check if we are going to truncate the "/" corresponding to the @@ -386,7 +386,7 @@ CorUnix::InternalCanonicalizeRealPath(LPCSTR lpUnixPath, PathCharString& lpBuffe fSetFilename = false; } else -#endif // defined(_AMD64_) +#endif // defined(HOST_AMD64) *pchSeparator = '\0'; if (!RealPathHelper(lpExistingPath, lpBuffer)) @@ -394,7 +394,7 @@ CorUnix::InternalCanonicalizeRealPath(LPCSTR lpUnixPath, PathCharString& lpBuffe WARN("realpath() failed with error %d\n", errno); palError = FILEGetLastErrorFromErrno(); -#if defined(_AMD64_) +#if defined(HOST_AMD64) // If we are here, then we tried to invoke realpath // against a directory after stripping out the filename // from the original path. @@ -417,21 +417,21 @@ CorUnix::InternalCanonicalizeRealPath(LPCSTR lpUnixPath, PathCharString& lpBuffe palError = ERROR_PATH_NOT_FOUND; } } -#endif // defined(_AMD64_) +#endif // defined(HOST_AMD64) goto LExit; } -#if defined(_AMD64_) +#if defined(HOST_AMD64) if (fSetFilename == true) -#endif // defined(_AMD64_) +#endif // defined(HOST_AMD64) lpFilename = pchSeparator + 1; } -#if defined(_AMD64_) +#if defined(HOST_AMD64) if (lpFilename == NULL) goto LExit; -#endif // _AMD64_ +#endif // HOST_AMD64 if (!lpBuffer.Append("/",1) || !lpBuffer.Append(lpFilename, strlen(lpFilename))) { diff --git a/src/coreclr/src/pal/src/include/pal/context.h b/src/coreclr/src/pal/src/include/pal/context.h index 9607df3603a02..482ec3471c5cd 100644 --- a/src/coreclr/src/pal/src/include/pal/context.h +++ b/src/coreclr/src/pal/src/include/pal/context.h @@ -56,7 +56,7 @@ using asm_sigcontext::_xstate; #if HAVE___GREGSET_T -#ifdef BIT64 +#ifdef HOST_64BIT #define MCREG_Rbx(mc) ((mc).__gregs[_REG_RBX]) #define MCREG_Rcx(mc) ((mc).__gregs[_REG_RCX]) #define MCREG_Rdx(mc) ((mc).__gregs[_REG_RDX]) @@ -92,7 +92,7 @@ using asm_sigcontext::_xstate; #define FPREG_MxCsr(uc) (((struct fxsave*)(&(uc)->uc_mcontext.__fpregs))->fx_mxcsr) #define FPREG_MxCsr_Mask(uc) (((struct fxsave*)(&(uc)->uc_mcontext.__fpregs))->fx_mxcsr_mask) -#else // BIT64 +#else // HOST_64BIT #define MCREG_Ebx(mc) ((mc).__gregs[_REG_EBX]) #define MCREG_Ecx(mc) ((mc).__gregs[_REG_ECX]) @@ -107,11 +107,11 @@ using asm_sigcontext::_xstate; #define MCREG_SegSs(mc) ((mc).__gregs[_REG_SS]) #define MCREG_EFlags(mc) ((mc).__gregs[_REG_RFLAGS]) -#endif // BIT64 +#endif // HOST_64BIT #elif HAVE_GREGSET_T -#ifdef BIT64 +#ifdef HOST_64BIT #define MCREG_Rbx(mc) ((mc).gregs[REG_RBX]) #define MCREG_Rcx(mc) ((mc).gregs[REG_RCX]) #define MCREG_Rdx(mc) ((mc).gregs[REG_RDX]) @@ -223,7 +223,7 @@ inline void *FPREG_Xstate_Ymmh(const ucontext_t *uc) ///////////////////// -#else // BIT64 +#else // HOST_64BIT #define MCREG_Ebx(mc) ((mc).gregs[REG_EBX]) #define MCREG_Ecx(mc) ((mc).gregs[REG_ECX]) @@ -237,15 +237,15 @@ inline void *FPREG_Xstate_Ymmh(const ucontext_t *uc) #define MCREG_SegCs(mc) ((mc).gregs[REG_CS]) #define MCREG_SegSs(mc) ((mc).gregs[REG_SS]) -#endif // BIT64 +#endif // HOST_64BIT #define MCREG_EFlags(mc) ((mc).gregs[REG_EFL]) #else // HAVE_GREGSET_T -#ifdef BIT64 +#ifdef HOST_64BIT -#if defined(_ARM64_) +#if defined(HOST_ARM64) #define MCREG_X0(mc) ((mc).regs[0]) #define MCREG_X1(mc) ((mc).regs[1]) #define MCREG_X2(mc) ((mc).regs[2]) @@ -357,9 +357,9 @@ const fpsimd_context* GetConstNativeSigSimdContext(const native_context_t *mc) #define FPREG_St(uc, index) *(M128A*) &(FPSTATE(uc)->sv_fp[index].fp_acc) #endif -#else // BIT64 +#else // HOST_64BIT -#if defined(_ARM_) +#if defined(HOST_ARM) #define MCREG_R0(mc) ((mc).arm_r0) #define MCREG_R1(mc) ((mc).arm_r1) @@ -432,7 +432,7 @@ const VfpSigFrame* GetConstNativeSigSimdContext(const native_context_t *mc) return GetNativeSigSimdContext(const_cast(mc)); } -#elif defined(_X86_) +#elif defined(HOST_X86) #define MCREG_Ebx(mc) ((mc).mc_ebx) #define MCREG_Ecx(mc) ((mc).mc_ecx) @@ -451,14 +451,14 @@ const VfpSigFrame* GetConstNativeSigSimdContext(const native_context_t *mc) #error "Unsupported arch" #endif -#endif // BIT64 +#endif // HOST_64BIT #endif // HAVE_GREGSET_T #if HAVE_PT_REGS -#ifdef BIT64 +#ifdef HOST_64BIT #define PTREG_Rbx(ptreg) ((ptreg).rbx) #define PTREG_Rcx(ptreg) ((ptreg).rcx) #define PTREG_Rdx(ptreg) ((ptreg).rdx) @@ -479,9 +479,9 @@ const VfpSigFrame* GetConstNativeSigSimdContext(const native_context_t *mc) #define PTREG_R14(ptreg) ((ptreg).r14) #define PTREG_R15(ptreg) ((ptreg).r15) -#else // BIT64 +#else // HOST_64BIT -#if defined(_ARM_) +#if defined(HOST_ARM) #define PTREG_R0(ptreg) ((ptreg).uregs[0]) #define PTREG_R1(ptreg) ((ptreg).uregs[1]) #define PTREG_R2(ptreg) ((ptreg).uregs[2]) @@ -499,7 +499,7 @@ const VfpSigFrame* GetConstNativeSigSimdContext(const native_context_t *mc) #define PTREG_Lr(ptreg) ((ptreg).uregs[14]) #define PTREG_Pc(ptreg) ((ptreg).uregs[15]) #define PTREG_Cpsr(ptreg) ((ptreg).uregs[16]) -#elif defined(_X86_) +#elif defined(HOST_X86) #define PTREG_Ebx(ptreg) ((ptreg).ebx) #define PTREG_Ecx(ptreg) ((ptreg).ecx) #define PTREG_Edx(ptreg) ((ptreg).edx) @@ -515,7 +515,7 @@ const VfpSigFrame* GetConstNativeSigSimdContext(const native_context_t *mc) #error "Unsupported arch" #endif -#endif // BIT64 +#endif // HOST_64BIT #define PTREG_EFlags(ptreg) ((ptreg).eflags) @@ -530,7 +530,7 @@ const VfpSigFrame* GetConstNativeSigSimdContext(const native_context_t *mc) #error "struct reg" has unrecognized format #endif -#ifdef BIT64 +#ifdef HOST_64BIT #define BSDREG_Rbx(reg) BSD_REGS_STYLE(reg,RBX,rbx) #define BSDREG_Rcx(reg) BSD_REGS_STYLE(reg,RCX,rcx) @@ -553,7 +553,7 @@ const VfpSigFrame* GetConstNativeSigSimdContext(const native_context_t *mc) #define BSDREG_R15(reg) BSD_REGS_STYLE(reg,R15,r15) #define BSDREG_EFlags(reg) BSD_REGS_STYLE(reg,RFLAGS,rflags) -#else // BIT64 +#else // HOST_64BIT #define BSDREG_Ebx(reg) BSD_REGS_STYLE(reg,EBX,ebx) #define BSDREG_Ecx(reg) BSD_REGS_STYLE(reg,ECX,ecx) @@ -568,17 +568,17 @@ const VfpSigFrame* GetConstNativeSigSimdContext(const native_context_t *mc) #define BSDREG_Esp(reg) BSD_REGS_STYLE(reg,ESP,esp) #define BSDREG_SegSs(reg) BSD_REGS_STYLE(reg,SS,ss) -#endif // BIT64 +#endif // HOST_64BIT #endif // HAVE_BSD_REGS_T inline static DWORD64 CONTEXTGetPC(LPCONTEXT pContext) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) return pContext->Rip; -#elif defined(_X86_) +#elif defined(HOST_X86) return pContext->Eip; -#elif defined(_ARM64_) || defined(_ARM_) +#elif defined(HOST_ARM64) || defined(HOST_ARM) return pContext->Pc; #else #error "don't know how to get the program counter for this architecture" @@ -587,11 +587,11 @@ inline static DWORD64 CONTEXTGetPC(LPCONTEXT pContext) inline static void CONTEXTSetPC(LPCONTEXT pContext, DWORD64 pc) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) pContext->Rip = pc; -#elif defined(_X86_) +#elif defined(HOST_X86) pContext->Eip = pc; -#elif defined(_ARM64_) || defined(_ARM_) +#elif defined(HOST_ARM64) || defined(HOST_ARM) pContext->Pc = pc; #else #error "don't know how to set the program counter for this architecture" @@ -600,13 +600,13 @@ inline static void CONTEXTSetPC(LPCONTEXT pContext, DWORD64 pc) inline static DWORD64 CONTEXTGetFP(LPCONTEXT pContext) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) return pContext->Rbp; -#elif defined(_X86_) +#elif defined(HOST_X86) return pContext->Ebp; -#elif defined(_ARM_) +#elif defined(HOST_ARM) return pContext->R7; -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) return pContext->Fp; #else #error "don't know how to get the frame pointer for this architecture" diff --git a/src/coreclr/src/pal/src/include/pal/mutex.hpp b/src/coreclr/src/pal/src/include/pal/mutex.hpp index e490d98fc8dcb..8aa9a53bdafc2 100644 --- a/src/coreclr/src/pal/src/include/pal/mutex.hpp +++ b/src/coreclr/src/pal/src/include/pal/mutex.hpp @@ -69,7 +69,7 @@ DWORD SPINLOCKTryAcquire (LONG * lock); // Temporarily disabling usage of pthread process-shared mutexes on ARM/ARM64 due to functional issues that cannot easily be // detected with code due to hangs. See https://github.com/dotnet/coreclr/issues/5456. -#if HAVE_FULLY_FEATURED_PTHREAD_MUTEXES && HAVE_FUNCTIONAL_PTHREAD_ROBUST_MUTEXES && !(defined(_ARM_) || defined(_ARM64_) || defined(__FreeBSD__)) +#if HAVE_FULLY_FEATURED_PTHREAD_MUTEXES && HAVE_FUNCTIONAL_PTHREAD_ROBUST_MUTEXES && !(defined(HOST_ARM) || defined(HOST_ARM64) || defined(__FreeBSD__)) #define NAMED_MUTEX_USE_PTHREAD_MUTEX 1 #else #define NAMED_MUTEX_USE_PTHREAD_MUTEX 0 diff --git a/src/coreclr/src/pal/src/include/pal/palinternal.h b/src/coreclr/src/pal/src/include/pal/palinternal.h index c7a2e81afb834..c45ad250c7e69 100644 --- a/src/coreclr/src/pal/src/include/pal/palinternal.h +++ b/src/coreclr/src/pal/src/include/pal/palinternal.h @@ -339,7 +339,7 @@ function_name() to call the system's implementation #undef va_arg #endif -#if !defined(_MSC_VER) && defined(BIT64) +#if !defined(_MSC_VER) && defined(HOST_64BIT) #undef _BitScanForward64 #undef _BitScanReverse64 #endif @@ -532,10 +532,10 @@ function_name() to call the system's implementation #undef towupper #undef wvsnprintf -#ifdef _AMD64_ +#ifdef HOST_AMD64 #undef _mm_getcsr #undef _mm_setcsr -#endif // _AMD64_ +#endif // HOST_AMD64 #undef min #undef max diff --git a/src/coreclr/src/pal/src/map/map.cpp b/src/coreclr/src/pal/src/map/map.cpp index ec2ad42ff0ecb..ddc715b25c994 100644 --- a/src/coreclr/src/pal/src/map/map.cpp +++ b/src/coreclr/src/pal/src/map/map.cpp @@ -2354,14 +2354,14 @@ void * MAPMapPEFile(HANDLE hFile) // We're going to start adding mappings to the mapping list, so take the critical section InternalEnterCriticalSection(pThread, &mapping_critsec); -#ifdef BIT64 +#ifdef HOST_64BIT // First try to reserve virtual memory using ExecutableAllocator. This allows all PE images to be // near each other and close to the coreclr library which also allows the runtime to generate // more efficient code (by avoiding usage of jump stubs). Alignment to a 64 KB granularity should // not be necessary (alignment to page size should be sufficient), but see // ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done. loadedBase = ReserveMemoryFromExecutableAllocator(pThread, ALIGN_UP(virtualSize, VIRTUAL_64KB)); -#endif // BIT64 +#endif // HOST_64BIT if (loadedBase == NULL) { diff --git a/src/coreclr/src/pal/src/map/virtual.cpp b/src/coreclr/src/pal/src/map/virtual.cpp index 2bc35bfebc170..571273cf40d8b 100644 --- a/src/coreclr/src/pal/src/map/virtual.cpp +++ b/src/coreclr/src/pal/src/map/virtual.cpp @@ -1272,7 +1272,7 @@ PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange( IN LPCVOID lpEndAddress, IN SIZE_T dwSize) { -#ifdef BIT64 +#ifdef HOST_64BIT PERF_ENTRY(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange); ENTRY( "PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(lpBeginAddress = %p, lpEndAddress = %p, dwSize = %Iu)\n", @@ -1315,9 +1315,9 @@ PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange( LOGEXIT("PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange returning %p\n", address); PERF_EXIT(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange); return address; -#else // !BIT64 +#else // !HOST_64BIT return nullptr; -#endif // BIT64 +#endif // HOST_64BIT } /*++ @@ -1835,7 +1835,7 @@ static void VM_ALLOCATE_VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATIO vm_region_flavor_t vm_flavor; mach_msg_type_number_t infoCnt; mach_port_t object_name; -#ifdef BIT64 +#ifdef HOST_64BIT vm_region_basic_info_data_64_t info; infoCnt = VM_REGION_BASIC_INFO_COUNT_64; vm_flavor = VM_REGION_BASIC_INFO_64; @@ -1846,7 +1846,7 @@ static void VM_ALLOCATE_VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATIO #endif vm_address = (vm_address_t)lpAddress; -#ifdef BIT64 +#ifdef HOST_64BIT MachRet = vm_region_64( #else MachRet = vm_region( @@ -2050,15 +2050,15 @@ Function : --*/ void* ReserveMemoryFromExecutableAllocator(CPalThread* pThread, SIZE_T allocationSize) { -#ifdef BIT64 +#ifdef HOST_64BIT InternalEnterCriticalSection(pThread, &virtual_critsec); void* mem = g_executableMemoryAllocator.AllocateMemory(allocationSize); InternalLeaveCriticalSection(pThread, &virtual_critsec); return mem; -#else // !BIT64 +#else // !HOST_64BIT return nullptr; -#endif // BIT64 +#endif // HOST_64BIT } /*++ @@ -2079,9 +2079,9 @@ void ExecutableMemoryAllocator::Initialize() // Enable the executable memory allocator on 64-bit platforms only // because 32-bit platforms have limited amount of virtual address space. -#ifdef BIT64 +#ifdef HOST_64BIT TryReserveInitialMemory(); -#endif // BIT64 +#endif // HOST_64BIT } @@ -2195,7 +2195,7 @@ void ExecutableMemoryAllocator::TryReserveInitialMemory() --*/ void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize) { -#ifdef BIT64 +#ifdef HOST_64BIT void* allocatedMemory = nullptr; // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but @@ -2215,9 +2215,9 @@ void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize) } return allocatedMemory; -#else // !BIT64 +#else // !HOST_64BIT return nullptr; -#endif // BIT64 +#endif // HOST_64BIT } /*++ @@ -2233,7 +2233,7 @@ void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize) --*/ void *ExecutableMemoryAllocator::AllocateMemoryWithinRange(const void *beginAddress, const void *endAddress, SIZE_T allocationSize) { -#ifdef BIT64 +#ifdef HOST_64BIT _ASSERTE(beginAddress <= endAddress); // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see @@ -2263,9 +2263,9 @@ void *ExecutableMemoryAllocator::AllocateMemoryWithinRange(const void *beginAddr m_nextFreeAddress = nextFreeAddress; m_remainingReservedMemory -= allocationSize; return address; -#else // !BIT64 +#else // !HOST_64BIT return nullptr; -#endif // BIT64 +#endif // HOST_64BIT } /*++ diff --git a/src/coreclr/src/pal/src/misc/jitsupport.cpp b/src/coreclr/src/pal/src/misc/jitsupport.cpp index 375b335deb9ca..0da36ab8903a6 100644 --- a/src/coreclr/src/pal/src/misc/jitsupport.cpp +++ b/src/coreclr/src/pal/src/misc/jitsupport.cpp @@ -7,10 +7,6 @@ #include "pal/dbgmsg.h" SET_DEFAULT_DEBUG_CHANNEL(MISC); -#if defined(_ARM64_) -#define _TARGET_ARM64_ -#endif - #include "../../../inc/corjitflags.h" #if HAVE_AUXV_HWCAP_H @@ -25,7 +21,7 @@ PAL_GetJitCpuCapabilityFlags(CORJIT_FLAGS *flags) { _ASSERTE(flags); -#if defined(_ARM64_) +#if defined(HOST_ARM64) #if HAVE_AUXV_HWCAP_H unsigned long hwCap = getauxval(AT_HWCAP); @@ -128,5 +124,5 @@ PAL_GetJitCpuCapabilityFlags(CORJIT_FLAGS *flags) CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD); CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP); #endif // HAVE_AUXV_HWCAP_H -#endif // defined(_ARM64_) +#endif // defined(HOST_ARM64) } diff --git a/src/coreclr/src/pal/src/misc/perfjitdump.cpp b/src/coreclr/src/pal/src/misc/perfjitdump.cpp index 56bac2790b846..e75d5989f18a2 100644 --- a/src/coreclr/src/pal/src/misc/perfjitdump.cpp +++ b/src/coreclr/src/pal/src/misc/perfjitdump.cpp @@ -39,13 +39,13 @@ namespace JIT_DUMP_MAGIC = 0x4A695444, JIT_DUMP_VERSION = 1, -#if defined(_X86_) +#if defined(HOST_X86) ELF_MACHINE = EM_386, -#elif defined(_ARM_) +#elif defined(HOST_ARM) ELF_MACHINE = EM_ARM, -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) ELF_MACHINE = EM_X86_64, -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) ELF_MACHINE = EM_AARCH64, #else #error ELF_MACHINE unsupported for target diff --git a/src/coreclr/src/pal/src/misc/perftrace.cpp b/src/coreclr/src/pal/src/misc/perftrace.cpp index 138b7bbf9b4af..82969a1c89143 100644 --- a/src/coreclr/src/pal/src/misc/perftrace.cpp +++ b/src/coreclr/src/pal/src/misc/perftrace.cpp @@ -174,7 +174,7 @@ static const char PATH_SEPARATOR[] = "/"; static ULONGLONG PERFGetTicks(){ -#ifdef _X86_ // for BSD and Windows. +#ifdef HOST_X86 // for BSD and Windows. unsigned long a, d; #ifdef _MSC_VER __asm{ @@ -190,7 +190,7 @@ PERFGetTicks(){ return ((ULONGLONG)((unsigned int)(d)) << 32) | (unsigned int)(a); #else return 0; // on non-BSD and non-Windows, we'll return 0 for now. -#endif // _X86_ +#endif // HOST_X86 } static diff --git a/src/coreclr/src/pal/src/misc/sysinfo.cpp b/src/coreclr/src/pal/src/misc/sysinfo.cpp index fc10a3c89aa02..dffcc63f32962 100644 --- a/src/coreclr/src/pal/src/misc/sysinfo.cpp +++ b/src/coreclr/src/pal/src/misc/sysinfo.cpp @@ -63,12 +63,12 @@ Revision History: #include #endif // HAVE_MACHINE_VMPARAM_H -#if defined(_TARGET_MAC64) +#if defined(TARGET_OSX) #include #include #include #include -#endif // defined(_TARGET_MAC64) +#endif // defined(TARGET_OSX) // On some platforms sys/user.h ends up defining _DEBUG; if so // remove the definition before including the header and put @@ -109,7 +109,7 @@ PAL_GetTotalCpuCount() #if HAVE_SYSCONF -#if defined(_ARM_) || defined(_ARM64_) +#if defined(HOST_ARM) || defined(HOST_ARM64) #define SYSCONF_GET_NUMPROCS _SC_NPROCESSORS_CONF #define SYSCONF_GET_NUMPROCS_NAME "_SC_NPROCESSORS_CONF" #else @@ -219,7 +219,7 @@ GetSystemInfo( lpSystemInfo->lpMaximumApplicationAddress = (PVOID) (1ull << 47); #elif defined(USERLIMIT) lpSystemInfo->lpMaximumApplicationAddress = (PVOID) USERLIMIT; -#elif defined(BIT64) +#elif defined(HOST_64BIT) #if defined(USRSTACK64) lpSystemInfo->lpMaximumApplicationAddress = (PVOID) USRSTACK64; #else // !USRSTACK64 @@ -548,7 +548,7 @@ PAL_GetLogicalProcessorCacheSizeFromOS() cacheSize = std::max(cacheSize, (size_t)sysconf(_SC_LEVEL4_CACHE_SIZE)); #endif -#if defined(_ARM64_) +#if defined(HOST_ARM64) if(cacheSize == 0) { size_t size; diff --git a/src/coreclr/src/pal/src/safecrt/cruntime.h b/src/coreclr/src/pal/src/safecrt/cruntime.h index e3170b62bf362..9508e8ff9c9dd 100644 --- a/src/coreclr/src/pal/src/safecrt/cruntime.h +++ b/src/coreclr/src/pal/src/safecrt/cruntime.h @@ -31,9 +31,9 @@ #error ERROR: Use of C runtime library internal header file. #endif /* _CRTBLD */ -#if defined (_SYSCRT) && defined (BIT64) +#if defined (_SYSCRT) && defined (HOST_64BIT) #define _USE_OLD_STDCPP 1 -#endif /* defined (_SYSCRT) && defined (BIT64) */ +#endif /* defined (_SYSCRT) && defined (HOST_64BIT) */ #if !defined (UNALIGNED) #if defined (_M_AMD64) diff --git a/src/coreclr/src/pal/src/safecrt/input.inl b/src/coreclr/src/pal/src/safecrt/input.inl index d9ba8f56e2c77..4045f423ca7b7 100644 --- a/src/coreclr/src/pal/src/safecrt/input.inl +++ b/src/coreclr/src/pal/src/safecrt/input.inl @@ -510,11 +510,11 @@ DEFAULT_LABEL: va_copy(arglistsave, arglist); /* Get the next argument - size of the array in characters */ -#ifdef BIT64 +#ifdef HOST_64BIT original_array_width = array_width = (size_t)(va_arg(arglist, unsigned int)); -#else /* BIT64 */ +#else /* HOST_64BIT */ original_array_width = array_width = va_arg(arglist, size_t); -#endif /* BIT64 */ +#endif /* HOST_64BIT */ if(array_width < 1) { if (widechar > 0) @@ -862,11 +862,11 @@ x_incwidth: case _T('p') : /* force %hp to be treated as %p */ longone = 1; -#ifdef BIT64 +#ifdef HOST_64BIT /* force %p to be 64 bit in WIN64 */ ++integer64; num64 = 0; -#endif /* BIT64 */ +#endif /* HOST_64BIT */ case _T('o') : case _T('u') : case _T('d') : diff --git a/src/coreclr/src/pal/src/safecrt/output.inl b/src/coreclr/src/pal/src/safecrt/output.inl index d561ce7253188..cab3a808e9080 100644 --- a/src/coreclr/src/pal/src/safecrt/output.inl +++ b/src/coreclr/src/pal/src/safecrt/output.inl @@ -102,7 +102,7 @@ Buffer size required to be passed to _gcvt, fcvt and other fp conversion routine #define SHORT_IS_INT 0 /* 1 means short is same size as int */ #define LONGLONG_IS_INT64 1 /* 1 means long long is same as int64 */ -#if defined (BIT64) +#if defined (HOST_64BIT) #define PTR_IS_INT 0 /* 1 means ptr is same size as int */ CASSERT(sizeof(void *) != sizeof(int)); #if __LP64__ @@ -114,14 +114,14 @@ Buffer size required to be passed to _gcvt, fcvt and other fp conversion routine #endif #define PTR_IS_INT64 1 /* 1 means ptr is same size as int64 */ CASSERT(sizeof(void *) == sizeof(int64_t)); -#else /* defined (BIT64) */ +#else /* defined (HOST_64BIT) */ #define PTR_IS_INT 1 /* 1 means ptr is same size as int */ CASSERT(sizeof(void *) == sizeof(int)); #define PTR_IS_LONG 1 /* 1 means ptr is same size as long */ CASSERT(sizeof(void *) == sizeof(long)); #define PTR_IS_INT64 0 /* 1 means ptr is same size as int64 */ CASSERT(sizeof(void *) != sizeof(int64_t)); -#endif /* defined (BIT64) */ +#endif /* defined (HOST_64BIT) */ /* CONSTANTS */ diff --git a/src/coreclr/src/pal/src/safecrt/safecrt_output_l.cpp b/src/coreclr/src/pal/src/safecrt/safecrt_output_l.cpp index f20e63a3f7ce8..8b71fd3600825 100644 --- a/src/coreclr/src/pal/src/safecrt/safecrt_output_l.cpp +++ b/src/coreclr/src/pal/src/safecrt/safecrt_output_l.cpp @@ -118,7 +118,7 @@ Buffer size required to be passed to _gcvt, fcvt and other fp conversion routine #define LONGLONG_IS_INT64 1 /* 1 means long long is same as int64 */ CASSERT(sizeof(long long) == sizeof(int64_t)); -#if defined (BIT64) +#if defined (HOST_64BIT) #define PTR_IS_INT 0 /* 1 means ptr is same size as int */ CASSERT(sizeof(void *) != sizeof(int)); #if __LP64__ @@ -130,14 +130,14 @@ Buffer size required to be passed to _gcvt, fcvt and other fp conversion routine #endif #define PTR_IS_INT64 1 /* 1 means ptr is same size as int64 */ CASSERT(sizeof(void *) == sizeof(int64_t)); -#else /* defined (BIT64) */ +#else /* defined (HOST_64BIT) */ #define PTR_IS_INT 1 /* 1 means ptr is same size as int */ CASSERT(sizeof(void *) == sizeof(int)); #define PTR_IS_LONG 1 /* 1 means ptr is same size as long */ CASSERT(sizeof(void *) == sizeof(long)); #define PTR_IS_INT64 0 /* 1 means ptr is same size as int64 */ CASSERT(sizeof(void *) != sizeof(int64_t)); -#endif /* defined (BIT64) */ +#endif /* defined (HOST_64BIT) */ /* CONSTANTS */ diff --git a/src/coreclr/src/pal/src/thread/context.cpp b/src/coreclr/src/pal/src/thread/context.cpp index 00031b9821a41..d00f688856479 100644 --- a/src/coreclr/src/pal/src/thread/context.cpp +++ b/src/coreclr/src/pal/src/thread/context.cpp @@ -36,13 +36,13 @@ SET_DEFAULT_DEBUG_CHANNEL(THREAD); // some headers have code with asserts, so do extern PGET_GCMARKER_EXCEPTION_CODE g_getGcMarkerExceptionCode; #define CONTEXT_AREA_MASK 0xffff -#ifdef _X86_ +#ifdef HOST_X86 #define CONTEXT_ALL_FLOATING (CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) #define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT -#elif defined(_ARM_) +#elif defined(HOST_ARM) #define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) #define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT #else #error Unexpected architecture. @@ -65,7 +65,7 @@ typedef int __ptrace_request; #include #endif // HAVE_PT_REGS -#ifdef _AMD64_ +#ifdef HOST_AMD64 #define ASSIGN_CONTROL_REGS \ ASSIGN_REG(Rbp) \ ASSIGN_REG(Rip) \ @@ -89,7 +89,7 @@ typedef int __ptrace_request; ASSIGN_REG(R14) \ ASSIGN_REG(R15) \ -#elif defined(_X86_) +#elif defined(HOST_X86) #define ASSIGN_CONTROL_REGS \ ASSIGN_REG(Ebp) \ ASSIGN_REG(Eip) \ @@ -106,7 +106,7 @@ typedef int __ptrace_request; ASSIGN_REG(Ecx) \ ASSIGN_REG(Eax) \ -#elif defined(_ARM_) +#elif defined(HOST_ARM) #define ASSIGN_CONTROL_REGS \ ASSIGN_REG(Sp) \ ASSIGN_REG(Lr) \ @@ -127,7 +127,7 @@ typedef int __ptrace_request; ASSIGN_REG(R10) \ ASSIGN_REG(R11) \ ASSIGN_REG(R12) -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) #define ASSIGN_CONTROL_REGS \ ASSIGN_REG(Cpsr) \ ASSIGN_REG(Fp) \ @@ -444,7 +444,7 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native) if ((lpContext->ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 FPREG_ControlWord(native) = lpContext->FltSave.ControlWord; FPREG_StatusWord(native) = lpContext->FltSave.StatusWord; FPREG_TagWord(native) = lpContext->FltSave.TagWord; @@ -464,7 +464,7 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native) { FPREG_Xmm(native, i) = lpContext->FltSave.XmmRegisters[i]; } -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) fpsimd_context* fp = GetNativeSigSimdContext(native); if (fp) { @@ -475,7 +475,7 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native) *(NEON128*) &fp->vregs[i] = lpContext->V[i]; } } -#elif defined(_ARM_) +#elif defined(HOST_ARM) VfpSigFrame* fp = GetNativeSigSimdContext(native); if (fp) { @@ -489,13 +489,13 @@ void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native) } // TODO: Enable for all Unix systems -#if defined(_AMD64_) && defined(XSTATE_SUPPORTED) +#if defined(HOST_AMD64) && defined(XSTATE_SUPPORTED) if ((lpContext->ContextFlags & CONTEXT_XSTATE) == CONTEXT_XSTATE) { _ASSERTE(FPREG_HasYmmRegisters(native)); memcpy_s(FPREG_Xstate_Ymmh(native), sizeof(M128A) * 16, lpContext->VectorRegister, sizeof(M128A) * 16); } -#endif //_AMD64_ && XSTATE_SUPPORTED +#endif //HOST_AMD64 && XSTATE_SUPPORTED } /*++ @@ -523,7 +523,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex if ((contextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) { ASSIGN_CONTROL_REGS -#if defined(_ARM_) +#if defined(HOST_ARM) // WinContext assumes that the least bit of Pc is always 1 (denoting thumb) // although the pc value retrived from native context might not have set the least bit. // This becomes especially problematic if the context is on the JIT_WRITEBARRIER. @@ -563,7 +563,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex if ((contextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 lpContext->FltSave.ControlWord = FPREG_ControlWord(native); lpContext->FltSave.StatusWord = FPREG_StatusWord(native); lpContext->FltSave.TagWord = FPREG_TagWord(native); @@ -583,7 +583,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex { lpContext->FltSave.XmmRegisters[i] = FPREG_Xmm(native, i); } -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) const fpsimd_context* fp = GetConstNativeSigSimdContext(native); if (fp) { @@ -594,7 +594,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex lpContext->V[i] = *(NEON128*) &fp->vregs[i]; } } -#elif defined(_ARM_) +#elif defined(HOST_ARM) const VfpSigFrame* fp = GetConstNativeSigSimdContext(native); if (fp) { @@ -613,7 +613,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex #endif } -#ifdef _AMD64_ +#ifdef HOST_AMD64 if ((contextFlags & CONTEXT_XSTATE) == CONTEXT_XSTATE) { // TODO: Enable for all Unix systems @@ -631,7 +631,7 @@ void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContex lpContext->ContextFlags &= ~xstateFlags; } } -#endif // _AMD64_ +#endif // HOST_AMD64 } /*++ @@ -649,13 +649,13 @@ Return value : --*/ LPVOID GetNativeContextPC(const native_context_t *context) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 return (LPVOID)MCREG_Rip(context->uc_mcontext); -#elif defined(_X86_) +#elif defined(HOST_X86) return (LPVOID) MCREG_Eip(context->uc_mcontext); -#elif defined(_ARM_) +#elif defined(HOST_ARM) return (LPVOID) MCREG_Pc(context->uc_mcontext); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) return (LPVOID) MCREG_Pc(context->uc_mcontext); #else # error implement me for this architecture @@ -677,13 +677,13 @@ Return value : --*/ LPVOID GetNativeContextSP(const native_context_t *context) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 return (LPVOID)MCREG_Rsp(context->uc_mcontext); -#elif defined(_X86_) +#elif defined(HOST_X86) return (LPVOID) MCREG_Esp(context->uc_mcontext); -#elif defined(_ARM_) +#elif defined(HOST_ARM) return (LPVOID) MCREG_Sp(context->uc_mcontext); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) return (LPVOID) MCREG_Sp(context->uc_mcontext); #else # error implement me for this architecture @@ -938,7 +938,7 @@ CONTEXT_GetThreadContextFromPort( if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) & CONTEXT_AREA_MASK) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 x86_thread_state64_t State; StateFlavor = x86_THREAD_STATE64; #else @@ -1015,7 +1015,7 @@ CONTEXT_GetThreadContextFromThreadState( { switch (threadStateFlavor) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 case x86_THREAD_STATE64: if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) & CONTEXT_AREA_MASK) { @@ -1174,7 +1174,7 @@ CONTEXT_SetThreadContextOnPort( if (lpContext->ContextFlags & (CONTEXT_CONTROL|CONTEXT_INTEGER) & CONTEXT_AREA_MASK) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 x86_thread_state64_t State; StateFlavor = x86_THREAD_STATE64; @@ -1222,7 +1222,7 @@ CONTEXT_SetThreadContextOnPort( if (lpContext->ContextFlags & CONTEXT_ALL_FLOATING & CONTEXT_AREA_MASK) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 #ifdef XSTATE_SUPPORTED // We're relying on the fact that the initial portion of // x86_avx_state64_t is identical to x86_float_state64_t. @@ -1273,7 +1273,7 @@ CONTEXT_SetThreadContextOnPort( if (lpContext->ContextFlags & CONTEXT_FLOATING_POINT & CONTEXT_AREA_MASK) { -#ifdef _AMD64_ +#ifdef HOST_AMD64 *(DWORD*)&State.__fpu_fcw = lpContext->FltSave.ControlWord; *(DWORD*)&State.__fpu_fsw = lpContext->FltSave.StatusWord; State.__fpu_ftw = lpContext->FltSave.TagWord; @@ -1297,7 +1297,7 @@ CONTEXT_SetThreadContextOnPort( #endif } -#if defined(_AMD64_) && defined(XSTATE_SUPPORTED) +#if defined(HOST_AMD64) && defined(XSTATE_SUPPORTED) if (lpContext->ContextFlags & CONTEXT_XSTATE & CONTEXT_AREA_MASK) { memcpy(&State.__fpu_ymmh0, lpContext->VectorRegister, 16 * 16); @@ -1383,10 +1383,10 @@ DBG_FlushInstructionCache( IN LPCVOID lpBaseAddress, IN SIZE_T dwSize) { -#ifndef _ARM_ +#ifndef HOST_ARM // Intrinsic should do the right thing across all platforms (except Linux arm) __builtin___clear_cache((char *)lpBaseAddress, (char *)((INT_PTR)lpBaseAddress + dwSize)); -#else // _ARM_ +#else // HOST_ARM // On Linux/arm (at least on 3.10) we found that there is a problem with __do_cache_op (arch/arm/kernel/traps.c) // implementing cacheflush syscall. cacheflush flushes only the first page in range [lpBaseAddress, lpBaseAddress + dwSize) // and leaves other pages in undefined state which causes random tests failures (often due to SIGSEGV) with no particular pattern. @@ -1406,6 +1406,6 @@ DBG_FlushInstructionCache( __builtin___clear_cache((char *)begin, (char *)endOrNextPageBegin); begin = endOrNextPageBegin; } -#endif // _ARM_ +#endif // HOST_ARM return TRUE; } diff --git a/src/coreclr/src/pal/src/thread/thread.cpp b/src/coreclr/src/pal/src/thread/thread.cpp index 6f42e94bd8aa0..c8dcc43d06f56 100644 --- a/src/coreclr/src/pal/src/thread/thread.cpp +++ b/src/coreclr/src/pal/src/thread/thread.cpp @@ -2623,7 +2623,7 @@ void * CPalThread::GetStackBase() { void* stackBase; -#ifdef _TARGET_MAC64 +#ifdef TARGET_OSX // This is a Mac specific method stackBase = pthread_get_stackaddr_np(pthread_self()); #else @@ -2663,7 +2663,7 @@ void * CPalThread::GetStackLimit() { void* stackLimit; -#ifdef _TARGET_MAC64 +#ifdef TARGET_OSX // This is a Mac specific method stackLimit = ((BYTE *)pthread_get_stackaddr_np(pthread_self()) - pthread_get_stacksize_np(pthread_self())); diff --git a/src/coreclr/src/pal/tests/CMakeLists.txt b/src/coreclr/src/pal/tests/CMakeLists.txt index 3e46072b6dc28..b6d9fe2d44eb8 100644 --- a/src/coreclr/src/pal/tests/CMakeLists.txt +++ b/src/coreclr/src/pal/tests/CMakeLists.txt @@ -1,9 +1,7 @@ cmake_minimum_required(VERSION 3.14.2) # Compile options -add_definitions(-DPLATFORM_UNIX) add_definitions(-DLP64COMPATIBLE) -add_definitions(-DFEATURE_PAL) add_definitions(-DCORECLR) add_definitions(-DPIC) diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/_snprintf_s/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/_snprintf_s/test4/test4.cpp index 7c0615213cebc..089e056c7aa91 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/_snprintf_s/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/_snprintf_s/test4/test4.cpp @@ -33,7 +33,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ - #if defined(BIT64) + #if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest("%p", NULL, "NULL", "0000000000000000"); DoPointerTest("%p", ptr, "pointer to 0x123456", "0000000000123456"); @@ -61,7 +61,7 @@ int __cdecl main(int argc, char *argv[]) DoPointerTest("%Lp", ptr, "pointer to 0x123456", "00123456"); DoI64Test("%I64p", lptr, "pointer to 0x1234567887654321", "1234567887654321"); - #endif //defined(BIT64) + #endif //defined(HOST_64BIT) PAL_Terminate(); return PASS; diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/_snwprintf_s/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/_snwprintf_s/test4/test4.cpp index 0819a7e6591db..cdf2728ba1ef1 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/_snwprintf_s/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/_snwprintf_s/test4/test4.cpp @@ -33,7 +33,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, convert("0000000000000000")); DoPointerTest(convert("%p"), ptr, convert("0000000000123456")); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/_vsnprintf_s/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/_vsnprintf_s/test4/test4.cpp index c4a77f957dcbb..f795c0dc1fd28 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/_vsnprintf_s/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/_vsnprintf_s/test4/test4.cpp @@ -58,7 +58,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest("%p", NULL, "NULL", "0000000000000000"); DoPointerTest("%p", ptr, "pointer to 0x123456", "0000000000123456"); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/_vsnwprintf_s/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/_vsnwprintf_s/test4/test4.cpp index 006e154f70617..acf9abadae966 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/_vsnwprintf_s/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/_vsnwprintf_s/test4/test4.cpp @@ -64,7 +64,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, convert("NULL"), convert("00000000")); DoPointerTest(convert("%p"), ptr, convert("pointer to 0x123456"), diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/fprintf/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/fprintf/test4/test4.cpp index ef3108d8640ab..4af4d1af6184c 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/fprintf/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/fprintf/test4/test4.cpp @@ -75,7 +75,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoTest("%p", NULL, "NULL", "0000000000000000", "0x0"); DoTest("%p", ptr, "pointer to 0x123456", "0000000000123456", "0x123456"); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/fwprintf/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/fwprintf/test4/test4.cpp index a894120f9ad19..ba0cafba01ae6 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/fwprintf/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/fwprintf/test4/test4.cpp @@ -32,7 +32,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, "NULL", "0000000000000000", "0x0"); DoPointerTest(convert("%p"), ptr, "pointer to 0x123456", "0000000000123456", diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/printf/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/printf/test4/test4.cpp index 7b0178cd294b6..ba2fa589ba2b8 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/printf/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/printf/test4/test4.cpp @@ -29,7 +29,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest("%p", NULL, "NULL", "0000000000000000"); DoPointerTest("%p", ptr, "pointer to 0x123456", "0000000000123456"); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/sprintf_s/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/sprintf_s/test4/test4.cpp index bb3a6d818c738..72349700ba9c9 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/sprintf_s/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/sprintf_s/test4/test4.cpp @@ -33,7 +33,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest("%p", NULL, "NULL", "0000000000000000"); DoPointerTest("%p", ptr, "pointer to 0x123456", "0000000000123456"); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/swprintf/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/swprintf/test4/test4.cpp index 9a68bdaac3120..04bfca3285c0b 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/swprintf/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/swprintf/test4/test4.cpp @@ -33,7 +33,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, convert("0000000000000000")); DoPointerTest(convert("%p"), ptr, convert("0000000000123456")); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/vfprintf/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/vfprintf/test4/test4.cpp index 75d11f641cd2c..d1376f18ec3d7 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/vfprintf/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/vfprintf/test4/test4.cpp @@ -32,7 +32,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest("%p", NULL, "NULL", "0000000000000000"); DoPointerTest("%p", ptr, "pointer to 0x123456", "0000000000123456"); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/vprintf/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/vprintf/test4/test4.cpp index 2cbdb35cc1acf..2469cb5661cf4 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/vprintf/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/vprintf/test4/test4.cpp @@ -32,7 +32,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest("%p", NULL, "NULL", "0000000000000000"); DoPointerTest("%p", ptr, "pointer to 0x123456", "0000000000123456"); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/vsprintf/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/vsprintf/test4/test4.cpp index e052d8ee6829e..513b0dd3fa606 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/vsprintf/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/vsprintf/test4/test4.cpp @@ -31,7 +31,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest("%p", NULL, "NULL", "0000000000000000"); DoPointerTest("%p", ptr, "pointer to 0x123456", "0000000000123456"); diff --git a/src/coreclr/src/pal/tests/palsuite/c_runtime/vswprintf/test4/test4.cpp b/src/coreclr/src/pal/tests/palsuite/c_runtime/vswprintf/test4/test4.cpp index c54fe6aabb5e5..2d61137d1baf1 100644 --- a/src/coreclr/src/pal/tests/palsuite/c_runtime/vswprintf/test4/test4.cpp +++ b/src/coreclr/src/pal/tests/palsuite/c_runtime/vswprintf/test4/test4.cpp @@ -62,7 +62,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); DoPointerTest(convert("%p"), NULL, convert("NULL"), convert("0000000000000000")); DoPointerTest(convert("%p"), ptr, convert("pointer to 0x123456"), diff --git a/src/coreclr/src/pal/tests/palsuite/composite/synchronization/nativecs_interlocked/makefile b/src/coreclr/src/pal/tests/palsuite/composite/synchronization/nativecs_interlocked/makefile index d0f215e18a18c..98daf0ddc7a2f 100644 --- a/src/coreclr/src/pal/tests/palsuite/composite/synchronization/nativecs_interlocked/makefile +++ b/src/coreclr/src/pal/tests/palsuite/composite/synchronization/nativecs_interlocked/makefile @@ -31,10 +31,10 @@ else ifeq ($(OSNAME),HP-UX) COMP=g++ INTERLOCK_OBJ=hpitinterlock.o - COPT=-DHPUX -O2 -mlp64 -finline -fPIC -DPIC -DBIT64 -DBIT64 -DLP64COMPATIBLE \ - -D_POSIX_C_SOURCE=199506L -D_HPUX_ -D_XOPEN_SOURCE_EXTENDED -DBIT64 \ - -DBIGENDIAN -DBIT64 $(DEBUGOPT) -xc++ - LOPT=-O2 -mlp64 -DBIT64 -DBIT64 -lgcc -lpthread /usr/lib/hpux64/libunwind.so + COPT=-DHPUX -O2 -mlp64 -finline -fPIC -DPIC -DHOST_64BIT -DHOST_64BIT -DLP64COMPATIBLE \ + -D_POSIX_C_SOURCE=199506L -D_HPUX_ -D_XOPEN_SOURCE_EXTENDED -DHOST_64BIT \ + -DBIGENDIAN -DHOST_64BIT $(DEBUGOPT) -xc++ + LOPT=-O2 -mlp64 -DHOST_64BIT -DHOST_64BIT -lgcc -lpthread /usr/lib/hpux64/libunwind.so endif endif endif diff --git a/src/coreclr/src/pal/tests/palsuite/eventprovider/CMakeLists.txt b/src/coreclr/src/pal/tests/palsuite/eventprovider/CMakeLists.txt index e6400a887742c..000ee2d2fb0d1 100644 --- a/src/coreclr/src/pal/tests/palsuite/eventprovider/CMakeLists.txt +++ b/src/coreclr/src/pal/tests/palsuite/eventprovider/CMakeLists.txt @@ -15,9 +15,9 @@ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/clralltestevents.cpp include_directories(${GENERATED_INCLUDE_DIR}) -if(FEATURE_PAL) +if(TARGET_UNIX) include_directories(${COREPAL_SOURCE_DIR}/inc/rt) -endif(FEATURE_PAL) +endif(TARGET_UNIX) add_executable(eventprovidertest ${SOURCES} diff --git a/src/coreclr/src/pal/tests/palsuite/miscellaneous/FormatMessageW/test2/test.cpp b/src/coreclr/src/pal/tests/palsuite/miscellaneous/FormatMessageW/test2/test.cpp index 3895b71af9d47..eba38d48bd9c7 100644 --- a/src/coreclr/src/pal/tests/palsuite/miscellaneous/FormatMessageW/test2/test.cpp +++ b/src/coreclr/src/pal/tests/palsuite/miscellaneous/FormatMessageW/test2/test.cpp @@ -460,7 +460,7 @@ int test11(int num, ...) /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) Trace("Testing for 64 Bit Platforms \n"); if(memcmp(OutBuffer, convert("Pal 00000000000123AB and foo Testing"), diff --git a/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterLockedExchangeAdd/test1/test.cpp b/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterLockedExchangeAdd/test1/test.cpp index a68a1f609e7e5..a44f569b7bcb5 100644 --- a/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterLockedExchangeAdd/test1/test.cpp +++ b/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterLockedExchangeAdd/test1/test.cpp @@ -36,7 +36,7 @@ int __cdecl main(int argc, char *argv[]) -#if defined(BIT64) +#if defined(HOST_64BIT) ptrValue = (LONG *) malloc(sizeof(LONG)); if(ptrValue == NULL) diff --git a/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterlockedCompareExchange64/test1/test.cpp b/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterlockedCompareExchange64/test1/test.cpp index bb5981c65b1f2..938855dda5355 100644 --- a/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterlockedCompareExchange64/test1/test.cpp +++ b/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterlockedCompareExchange64/test1/test.cpp @@ -42,7 +42,7 @@ int __cdecl main(int argc, char *argv[]) { /* ** Run only on 64 bit platforms */ -#if defined(BIT64) +#if defined(HOST_64BIT) /* Compare START_VALUE with BaseVariableToManipulate, they're equal, so exchange */ @@ -95,7 +95,7 @@ int __cdecl main(int argc, char *argv[]) { TempValue,BaseVariableToManipulate); } -#endif //if defined(BIT64) +#endif //if defined(HOST_64BIT) PAL_Terminate(); return PASS; } diff --git a/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterlockedCompareExchange64/test2/test.cpp b/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterlockedCompareExchange64/test2/test.cpp index 3b91db4f3256e..0b6fb116518d2 100644 --- a/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterlockedCompareExchange64/test2/test.cpp +++ b/src/coreclr/src/pal/tests/palsuite/miscellaneous/InterlockedCompareExchange64/test2/test.cpp @@ -47,7 +47,7 @@ int __cdecl main(int argc, char *argv[]) /* ** Run only on 64 bit platforms */ - #if defined(BIT64) + #if defined(HOST_64BIT) //Create MAX_THREADS threads that will operate on the global counter for (i=0;i #include -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include #include #else #include "pal.h" -#endif //FEATURE_PAL +#endif //TARGET_UNIX bool ResizeBuffer(char *&buffer, size_t& size, size_t currLen, size_t newSize, bool &fixedBuffer) { @@ -398,7 +398,7 @@ def generateEventPipeImplFiles( #include "{root:s}/vm/eventpipeevent.h" #include "{root:s}/vm/eventpipe.h" -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) #define wcslen PAL_wcslen #endif diff --git a/src/coreclr/src/scripts/genEventingTests.py b/src/coreclr/src/scripts/genEventingTests.py index 8039e557bcc1c..3931b2f6d8179 100644 --- a/src/coreclr/src/scripts/genEventingTests.py +++ b/src/coreclr/src/scripts/genEventingTests.py @@ -110,9 +110,9 @@ def generateSanityTest(sClrEtwAllMan,testDir): ** ** **===================================================================*/ -#if FEATURE_PAL +#if TARGET_UNIX #include -#endif //FEATURE_PAL +#endif //TARGET_UNIX #include typedef struct _Struct1 { @@ -139,7 +139,7 @@ def generateSanityTest(sClrEtwAllMan,testDir): BYTE* win_Binary =(BYTE*)var21 ; int __cdecl main(int argc, char **argv) { -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) /* Initialize the PAL. */ @@ -164,7 +164,7 @@ def generateSanityTest(sClrEtwAllMan,testDir): } Trace("\\n All eventing APIs were fired succesfully \\n"); #endif //defined(FEATURE_EVENT_TRACE) -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) /* Shutdown the PAL. */ diff --git a/src/coreclr/src/tools/crossgen/crossgen.cpp b/src/coreclr/src/tools/crossgen/crossgen.cpp index ecc6f05b689eb..15a3f5510c843 100644 --- a/src/coreclr/src/tools/crossgen/crossgen.cpp +++ b/src/coreclr/src/tools/crossgen/crossgen.cpp @@ -415,7 +415,7 @@ extern HMODULE g_hThisInst; int _cdecl wmain(int argc, __in_ecount(argc) WCHAR **argv) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX g_hThisInst = WszGetModuleHandle(NULL); #endif @@ -449,7 +449,7 @@ int _cdecl wmain(int argc, __in_ecount(argc) WCHAR **argv) HRESULT hr; -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX // This is required to properly display Unicode characters _setmode(_fileno(stdout), _O_U8TEXT); #endif @@ -753,7 +753,7 @@ int _cdecl wmain(int argc, __in_ecount(argc) WCHAR **argv) { if (argc == 1) { -#if !defined(PLATFORM_UNIX) +#if !defined(TARGET_UNIX) // When not running on Mac or Linux, which can have forward-slash pathnames, we know // a command switch here means an invalid argument. if (*argv[0] == W('-') || *argv[0] == W('/')) @@ -761,7 +761,7 @@ int _cdecl wmain(int argc, __in_ecount(argc) WCHAR **argv) OutputErrf(W("Invalid parameter: %s\n"), *argv); exit(INVALID_ARGUMENTS); } -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX // The last thing on the command line is an assembly name or path, and // because we got this far is not an argument like /nologo. Because this // code works on Mac, with forward-slash pathnames, we can't assume @@ -987,7 +987,7 @@ int _cdecl wmain(int argc, __in_ecount(argc) WCHAR **argv) return 0; } -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX int main(int argc, char *argv[]) { if (0 != PAL_Initialize(argc, argv)) @@ -1013,4 +1013,4 @@ int main(int argc, char *argv[]) return ret; } -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX diff --git a/src/coreclr/src/tools/crossgen2/jitinterface/corinfoexception.h b/src/coreclr/src/tools/crossgen2/jitinterface/corinfoexception.h index 4eb278475a235..c8bcc8045ff8f 100644 --- a/src/coreclr/src/tools/crossgen2/jitinterface/corinfoexception.h +++ b/src/coreclr/src/tools/crossgen2/jitinterface/corinfoexception.h @@ -4,7 +4,7 @@ #include -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX typedef char16_t WCHAR; #else typedef wchar_t WCHAR; diff --git a/src/coreclr/src/tools/crossgen2/jitinterface/dllexport.h b/src/coreclr/src/tools/crossgen2/jitinterface/dllexport.h index 5cc1b0350561b..6c746ec82b525 100644 --- a/src/coreclr/src/tools/crossgen2/jitinterface/dllexport.h +++ b/src/coreclr/src/tools/crossgen2/jitinterface/dllexport.h @@ -16,8 +16,8 @@ // *** // Define default call conventions // *** -#if defined(_X86_) && !defined(PLATFORM_UNIX) +#if defined(HOST_X86) && !defined(TARGET_UNIX) #define STDMETHODCALLTYPE __stdcall #else #define STDMETHODCALLTYPE -#endif // defined(_X86_) && !defined(PLATFORM_UNIX) +#endif // defined(HOST_X86) && !defined(TARGET_UNIX) diff --git a/src/coreclr/src/tools/metainfo/mdinfo.h b/src/coreclr/src/tools/metainfo/mdinfo.h index 5d133b19cfea6..fbdc20efacc9e 100644 --- a/src/coreclr/src/tools/metainfo/mdinfo.h +++ b/src/coreclr/src/tools/metainfo/mdinfo.h @@ -9,7 +9,7 @@ #include "cor.h" #include "corhlprpriv.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include #endif diff --git a/src/coreclr/src/tools/util/consoleargs.cpp b/src/coreclr/src/tools/util/consoleargs.cpp index d434a8b97824a..8a2e436555d0f 100644 --- a/src/coreclr/src/tools/util/consoleargs.cpp +++ b/src/coreclr/src/tools/util/consoleargs.cpp @@ -45,7 +45,7 @@ inline int HexValue (WCHAR c) return (c >= '0' && c <= '9') ? c - '0' : (c & 0xdf) - 'A' + 10; } -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX // Get canonical file path from a user specified path. wszSrcfileName can include relative paths, etc. // Much of this function was taken from csc.exe. DWORD GetCanonFilePath(_In_z_ LPCWSTR wszSrcFileName, _Out_z_cap_(cchDestFileName) LPWSTR wszDestFileName, _In_ DWORD cchDestFileName, _In_ bool fPreserveSrcCasing) @@ -300,7 +300,7 @@ DWORD GetCanonFilePath(_In_z_ LPCWSTR wszSrcFileName, _Out_z_cap_(cchDestFileNam } return 0; } -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX bool FreeString(LPCWSTR szText) { @@ -338,7 +338,7 @@ void ConsoleArgs::CleanUpArgs() bool ConsoleArgs::GetFullFileName(LPCWSTR szSource, __out_ecount(cchFilenameBuffer) LPWSTR filenameBuffer, DWORD cchFilenameBuffer, bool fOutputFilename) { -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX WCHAR tempBuffer[MAX_LONGPATH]; memset(filenameBuffer, 0, cchFilenameBuffer * sizeof(WCHAR)); if (!PathCanonicalizeW(tempBuffer, szSource) || @@ -925,7 +925,7 @@ void ConsoleArgs::ProcessResponseArgs() } LPWSTR szActualText = nullptr; -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX szActualText = pwzFileBuffer; #else DWORD dwNumChars = ExpandEnvironmentStrings(pwzFileBuffer, NULL, 0); diff --git a/src/coreclr/src/tools/util/file_can.h b/src/coreclr/src/tools/util/file_can.h index 71031eb9d7338..bb575d098b5ec 100644 --- a/src/coreclr/src/tools/util/file_can.h +++ b/src/coreclr/src/tools/util/file_can.h @@ -19,7 +19,7 @@ enum FileType HANDLE OpenFileEx( LPCWSTR filename, DWORD *fileLen, LPCWSTR relPath = NULL, bool bWrite = false); HRESULT ReadTextFile (PCWSTR pszFileName, UINT uiCodePage, WCAllocBuffer & textBuffer, FileType *fileType); -#if !defined(FEATURE_PAL) && !defined(CSEE) +#if !defined(TARGET_UNIX) && !defined(CSEE) // If you call ReadTextFile a lot you should create one HCRYPTPROV and pass it in to every call, otherwise // ReadTextFile indirectly creates and destroys a new HCRYPTPROV for every call, which is slow and unnecessary. // You can use CryptProvider to manage an HCRYPTPROV for you. diff --git a/src/coreclr/src/unwinder/amd64/unwinder_amd64.cpp b/src/coreclr/src/unwinder/amd64/unwinder_amd64.cpp index 41414c0a7b829..1039773a4a036 100644 --- a/src/coreclr/src/unwinder/amd64/unwinder_amd64.cpp +++ b/src/coreclr/src/unwinder/amd64/unwinder_amd64.cpp @@ -729,15 +729,15 @@ Return Value: // UnwindOp = UnwindInfo->UnwindCode[Index].UnwindOp; -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX if (UnwindOp > UWOP_SET_FPREG_LARGE) { return E_UNEXPECTED; } -#else // !PLATFORM_UNIX +#else // !TARGET_UNIX if (UnwindOp > UWOP_PUSH_MACHFRAME) { return E_UNEXPECTED; } -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX OpInfo = UnwindInfo->UnwindCode[Index].OpInfo; if (PrologOffset >= UnwindInfo->UnwindCode[Index].CodeOffset) { @@ -805,7 +805,7 @@ Return Value: ContextRecord->Rsp -= UnwindInfo->FrameOffset * 16; break; -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX // // Establish the the frame pointer register using a large size displacement. @@ -824,7 +824,7 @@ Return Value: ContextRecord->Rsp -= FrameOffset * 16; break; -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX // // Save nonvolatile integer register on the stack using a @@ -1142,7 +1142,7 @@ Routine Description: FrameOffset = UnwindInfo->FrameOffset; -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX // If UnwindInfo->FrameOffset == 15 (the maximum value), then there might be a UWOP_SET_FPREG_LARGE. // However, it is still legal for a UWOP_SET_FPREG to set UnwindInfo->FrameOffset == 15 (since this // was always part of the specification), so we need to look through the UnwindCode array to determine @@ -1161,7 +1161,7 @@ Routine Description: Index += UnwindOpSlots(UnwindOp); } } -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX *EstablisherFrame = (&ContextRecord->Rax)[UnwindInfo->FrameRegister]; *EstablisherFrame -= FrameOffset * 16; @@ -1174,14 +1174,14 @@ Routine Description: if (UnwindOp.UnwindOp == UWOP_SET_FPREG) { break; } -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX else if (UnwindOp.UnwindOp == UWOP_SET_FPREG_LARGE) { UNWINDER_ASSERT(UnwindInfo->FrameOffset == 15); FrameOffset = UnwindInfo->UnwindCode[Index + 1].FrameOffset; FrameOffset += UnwindInfo->UnwindCode[Index + 2].FrameOffset << 16; break; } -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX Index += UnwindOpSlots(UnwindOp); } diff --git a/src/coreclr/src/unwinder/arm/unwinder_arm.cpp b/src/coreclr/src/unwinder/arm/unwinder_arm.cpp index 07b12ca167dff..b5e717769cf05 100644 --- a/src/coreclr/src/unwinder/arm/unwinder_arm.cpp +++ b/src/coreclr/src/unwinder/arm/unwinder_arm.cpp @@ -1498,7 +1498,7 @@ BOOL DacUnwindStackFrame(T_CONTEXT *pContext, T_KNONVOLATILE_CONTEXT_POINTERS* p return res; } -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) PEXCEPTION_ROUTINE RtlVirtualUnwind( __in ULONG HandlerType, __in ULONG ImageBase, diff --git a/src/coreclr/src/unwinder/arm64/unwinder_arm64.cpp b/src/coreclr/src/unwinder/arm64/unwinder_arm64.cpp index e28fa1b3968df..e93d0b0e5297d 100644 --- a/src/coreclr/src/unwinder/arm64/unwinder_arm64.cpp +++ b/src/coreclr/src/unwinder/arm64/unwinder_arm64.cpp @@ -1609,7 +1609,7 @@ BOOL DacUnwindStackFrame(T_CONTEXT *pContext, T_KNONVOLATILE_CONTEXT_POINTERS* p return res; } -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) PEXCEPTION_ROUTINE RtlVirtualUnwind( IN ULONG HandlerType, diff --git a/src/coreclr/src/utilcode/ccomprc.cpp b/src/coreclr/src/utilcode/ccomprc.cpp index 3db5ef781c2e3..0fca5cc692cbf 100644 --- a/src/coreclr/src/utilcode/ccomprc.cpp +++ b/src/coreclr/src/utilcode/ccomprc.cpp @@ -8,7 +8,7 @@ #include "ndpversion.h" #include "../dlls/mscorrc/resource.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include "resourcestring.h" #define NATIVE_STRING_RESOURCE_NAME mscorrc_debug __attribute__((visibility("default"))) DECLARE_NATIVE_STRING_RESOURCE_TABLE(NATIVE_STRING_RESOURCE_NAME); @@ -115,10 +115,10 @@ HRESULT CCompRC::AddMapNode(LocaleID langId, HRESOURCEDLL hInst, BOOL fMissing) LPCWSTR CCompRC::m_pDefaultResource = W("mscorrc.debug.dll"); LPCWSTR CCompRC::m_pFallbackResource= W("mscorrc.dll"); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX LPCSTR CCompRC::m_pDefaultResourceDomain = "mscorrc.debug"; LPCSTR CCompRC::m_pFallbackResourceDomain = "mscorrc"; -#endif // FEATURE_PAL +#endif // TARGET_UNIX HRESULT CCompRC::Init(LPCWSTR pResourceFile, BOOL bUseFallback) { @@ -166,7 +166,7 @@ HRESULT CCompRC::Init(LPCWSTR pResourceFile, BOOL bUseFallback) return E_OUTOFMEMORY; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (m_pResourceFile == m_pDefaultResource) { @@ -181,7 +181,7 @@ HRESULT CCompRC::Init(LPCWSTR pResourceFile, BOOL bUseFallback) _ASSERTE(!"Unsupported resource file"); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX if (m_csMap == NULL) { @@ -551,7 +551,7 @@ HRESULT CCompRC::LoadString(ResourceCategory eCategory, LocaleID langId, UINT iR } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HRESULT hr; HRESOURCEDLL hInst = 0; //instance of cultured resource dll int length; @@ -692,17 +692,17 @@ HRESULT CCompRC::LoadString(ResourceCategory eCategory, LocaleID langId, UINT iR *szBuffer = W('\0'); return hr; -#else // !FEATURE_PAL +#else // !TARGET_UNIX return LoadNativeStringResource(NATIVE_STRING_RESOURCE_TABLE(NATIVE_STRING_RESOURCE_NAME), iResourceID, szBuffer, iMax, pcwchUsed); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #ifndef DACCESS_COMPILE HRESULT CCompRC::LoadResourceFile(HRESOURCEDLL * pHInst, LPCWSTR lpFileName) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD dwLoadLibraryFlags; if(m_pResourceFile == m_pDefaultResource) dwLoadLibraryFlags = LOAD_LIBRARY_AS_DATAFILE; @@ -712,9 +712,9 @@ HRESULT CCompRC::LoadResourceFile(HRESOURCEDLL * pHInst, LPCWSTR lpFileName) if ((*pHInst = WszLoadLibraryEx(lpFileName, NULL, dwLoadLibraryFlags)) == NULL) { return HRESULT_FROM_GetLastError(); } -#else // !FEATURE_PAL +#else // !TARGET_UNIX PORTABILITY_ASSERT("UNIXTODO: Implement resource loading - use peimagedecoder?"); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return S_OK; } diff --git a/src/coreclr/src/utilcode/clrhost_nodependencies.cpp b/src/coreclr/src/utilcode/clrhost_nodependencies.cpp index 643483d958284..9d9c6246ecf58 100644 --- a/src/coreclr/src/utilcode/clrhost_nodependencies.cpp +++ b/src/coreclr/src/utilcode/clrhost_nodependencies.cpp @@ -454,7 +454,7 @@ operator delete[](void *p) NOEXCEPT * New operator overloading for the executable heap * ------------------------------------------------------------------------ */ -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX const CExecutable executable = { 0 }; @@ -538,17 +538,17 @@ void * __cdecl operator new[](size_t n, const CExecutable&, const NoThrow&) return result; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef _DEBUG // This is a DEBUG routing to verify that a memory region complies with executable requirements BOOL DbgIsExecutable(LPVOID lpMem, SIZE_T length) { -#if defined(CROSSGEN_COMPILE) || defined(FEATURE_PAL) +#if defined(CROSSGEN_COMPILE) || defined(TARGET_UNIX) // No NX support on PAL or for crossgen compilations. return TRUE; -#else // !(CROSSGEN_COMPILE || FEATURE_PAL) +#else // !(CROSSGEN_COMPILE || TARGET_UNIX) BYTE *regionStart = (BYTE*) ALIGN_DOWN((BYTE*)lpMem, GetOsPageSize()); BYTE *regionEnd = (BYTE*) ALIGN_UP((BYTE*)lpMem+length, GetOsPageSize()); _ASSERTE(length > 0); @@ -570,7 +570,7 @@ BOOL DbgIsExecutable(LPVOID lpMem, SIZE_T length) } return TRUE; -#endif // CROSSGEN_COMPILE || FEATURE_PAL +#endif // CROSSGEN_COMPILE || TARGET_UNIX } #endif //_DEBUG diff --git a/src/coreclr/src/utilcode/dacutil.cpp b/src/coreclr/src/utilcode/dacutil.cpp index 33249d9f6b7ca..611867163a826 100644 --- a/src/coreclr/src/utilcode/dacutil.cpp +++ b/src/coreclr/src/utilcode/dacutil.cpp @@ -75,11 +75,11 @@ LiveProcDataTarget::GetMachineType( { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) *machine = IMAGE_FILE_MACHINE_I386; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) *machine = IMAGE_FILE_MACHINE_AMD64; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) *machine = IMAGE_FILE_MACHINE_ARMNT; #else PORTABILITY_ASSERT("Unknown Processor"); diff --git a/src/coreclr/src/utilcode/debug.cpp b/src/coreclr/src/utilcode/debug.cpp index 91d2031430798..8fd814d12f703 100644 --- a/src/coreclr/src/utilcode/debug.cpp +++ b/src/coreclr/src/utilcode/debug.cpp @@ -87,7 +87,7 @@ void DoRaiseExceptionOnAssert(DWORD chance) #if !defined(DACCESS_COMPILE) if (chance) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX PAL_TRY_NAKED { RaiseException(EXCEPTION_INTERNAL_ASSERT, 0, 0, NULL); @@ -96,10 +96,10 @@ void DoRaiseExceptionOnAssert(DWORD chance) { } PAL_ENDTRY_NAKED -#else // FEATURE_PAL +#else // TARGET_UNIX // For PAL always raise the exception. RaiseException(EXCEPTION_INTERNAL_ASSERT, 0, 0, NULL); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } #endif // !DACCESS_COMPILE } @@ -222,7 +222,7 @@ VOID LogAssert( STRESS_LOG2(LF_ASSERT, LL_ALWAYS, "ASSERT:%s, line:%d\n", szFile, iLine); SYSTEMTIME st; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX GetLocalTime(&st); #else GetSystemTime(&st); @@ -260,7 +260,7 @@ BOOL LaunchJITDebugger() STATIC_CONTRACT_DEBUG_ONLY; BOOL fSuccess = FALSE; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX EX_TRY { SString debugger; @@ -297,7 +297,7 @@ BOOL LaunchJITDebugger() { } EX_END_CATCH(SwallowAllExceptions); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return fSuccess; } @@ -559,7 +559,7 @@ bool _DbgBreakCheckNoThrow( return result; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Get the timestamp from the PE file header. This is useful unsigned DbgGetEXETimeStamp() { @@ -584,7 +584,7 @@ unsigned DbgGetEXETimeStamp() return cache; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Called from within the IfFail...() macros. Set a breakpoint here to break on // errors. @@ -615,7 +615,7 @@ VOID DebBreakHr(HRESULT hr) #endif } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX CHAR g_szExprWithStack2[10480]; #endif void *dbgForceToMemory; // dummy pointer that pessimises enregistration @@ -684,7 +684,7 @@ VOID DbgAssertDialog(const char *szFile, int iLine, const char *szExpr) else { char *szExprToDisplay = (char*)szExpr; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX BOOL fGotStackTrace = TRUE; #else BOOL fGotStackTrace = FALSE; @@ -703,7 +703,7 @@ VOID DbgAssertDialog(const char *szFile, int iLine, const char *szExpr) } EX_END_CATCH(SwallowAllExceptions); #endif // DACCESS_COMPILE -#endif // FEATURE_PAL +#endif // TARGET_UNIX if (_DbgBreakCheckNoThrow(szFile, iLine, szExprToDisplay, !fGotStackTrace)) { @@ -731,7 +731,7 @@ bool GetStackTraceAtContext(SString & s, CONTEXT * pContext) FAULT_NOT_FATAL(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX EX_TRY { const int cTotal = cfrMaxAssertStackLevels - 1; @@ -750,7 +750,7 @@ bool GetStackTraceAtContext(SString & s, CONTEXT * pContext) // Nothing to do here. } EX_END_CATCH(SwallowAllExceptions); -#endif // FEATURE_PAL +#endif // TARGET_UNIX return fSuccess; } // GetStackTraceAtContext @@ -821,7 +821,7 @@ void DECLSPEC_NORETURN __FreeBuildAssertFail(const char *szFile, int iLine, cons // Give assert in output for easy access. ClrGetModuleFileName(0, modulePath); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX buffer.Printf(W("CLR: Assert failure(PID %d [0x%08x], Thread: %d [0x%x]): %hs\n") W(" File: %hs, Line: %d Image:\n"), GetCurrentProcessId(), GetCurrentProcessId(), @@ -832,9 +832,9 @@ void DECLSPEC_NORETURN __FreeBuildAssertFail(const char *szFile, int iLine, cons WszOutputDebugString(buffer); // Write out the error to the console _putws(buffer); -#else // FEATURE_PAL +#else // TARGET_UNIX // UNIXTODO: Do this for Unix. -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Log to the stress log. Note that we can't include the szExpr b/c that // may not be a string literal (particularly for formatt-able asserts). STRESS_LOG2(LF_ASSERT, LL_ALWAYS, "ASSERT:%s, line:%d\n", szFile, iLine); diff --git a/src/coreclr/src/utilcode/hostimpl.cpp b/src/coreclr/src/utilcode/hostimpl.cpp index e451f9e94dee5..60c6acaf48151 100644 --- a/src/coreclr/src/utilcode/hostimpl.cpp +++ b/src/coreclr/src/utilcode/hostimpl.cpp @@ -358,7 +358,7 @@ HANDLE STDMETHODCALLTYPE UtilExecutionEngine::ClrGetProcessExecutableHeap() HANDLE STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX return NULL; #else return HeapCreate(flOptions, dwInitialSize, dwMaximumSize); @@ -367,7 +367,7 @@ HANDLE STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapCreate(DWORD flOptions, SIZ BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapDestroy(HANDLE hHeap) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX return FALSE; #else return HeapDestroy(hHeap); @@ -390,7 +390,7 @@ BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapFree(HANDLE hHeap, DWORD dwFl BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX return FALSE; #else return HeapValidate(hHeap, dwFlags, lpMem); diff --git a/src/coreclr/src/utilcode/log.cpp b/src/coreclr/src/utilcode/log.cpp index e9bc9257253a7..184e222ce231c 100644 --- a/src/coreclr/src/utilcode/log.cpp +++ b/src/coreclr/src/utilcode/log.cpp @@ -340,7 +340,7 @@ VOID LogSpewAlwaysValist(const char *fmt, va_list args) // trashing your program... _ASSERTE((buflen < (DWORD) BUFFERSIZE) && "Log text is too long!") ; -#if !PLATFORM_UNIX +#if !TARGET_UNIX //convert NL's to CR NL to fixup notepad const int BUFFERSIZE2 = BUFFERSIZE + 500; char rgchBuffer2[BUFFERSIZE2]; @@ -361,7 +361,7 @@ VOID LogSpewAlwaysValist(const char *fmt, va_list args) buflen = (DWORD)(d - pBuffer2); pBuffer = pBuffer2; -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX if (LogFlags & LOG_ENABLE_FILE_LOGGING && LogFileHandle != INVALID_HANDLE_VALUE) { diff --git a/src/coreclr/src/utilcode/longfilepathwrappers.cpp b/src/coreclr/src/utilcode/longfilepathwrappers.cpp index b974d62cc2868..44c35ad2a9aba 100644 --- a/src/coreclr/src/utilcode/longfilepathwrappers.cpp +++ b/src/coreclr/src/utilcode/longfilepathwrappers.cpp @@ -11,14 +11,14 @@ class LongFile { private: -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static const WCHAR* ExtendedPrefix; static const WCHAR* DevicePathPrefix; static const WCHAR* UNCPathPrefix; static const WCHAR* UNCExtendedPathPrefix; static const WCHAR VolumeSeparatorChar; #define UNCPATHPREFIX W("\\\\") -#endif //FEATURE_PAL +#endif //TARGET_UNIX static const WCHAR DirectorySeparatorChar; static const WCHAR AltDirectorySeparatorChar; public: @@ -31,7 +31,7 @@ class LongFile static HRESULT NormalizePath(SString& path); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static void NormalizeDirectorySeparators(SString& path); #endif }; @@ -60,11 +60,11 @@ LoadLibraryExWrapper( if (LongFile::IsPathNotFullyQualified(path) || SUCCEEDED(LongFile::NormalizePath(path))) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //Adding the assert to ensure relative paths which are not just filenames are not used for LoadLibrary Calls _ASSERTE(!LongFile::IsPathNotFullyQualified(path) || !LongFile::ContainsDirectorySeparator(path)); LongFile::NormalizeDirectorySeparators(path); -#endif //FEATURE_PAL +#endif //TARGET_UNIX ret = LoadLibraryExW(path.GetUnicode(), hFile, dwFlags); } @@ -657,7 +657,7 @@ DWORD WINAPI GetEnvironmentVariableWrapper( } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL CopyFileExWrapper( @@ -764,10 +764,10 @@ FindFirstFileExWrapper( return ret; } -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #if ! defined(DACCESS_COMPILE) && !defined(SELF_NO_HOST) extern HINSTANCE g_pMSCorEE; @@ -822,13 +822,13 @@ BOOL PAL_GetPALDirectoryWrapper(SString& pbuffer) return retval; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX //Implementation of LongFile Helpers const WCHAR LongFile::DirectorySeparatorChar = W('\\'); const WCHAR LongFile::AltDirectorySeparatorChar = W('/'); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX const WCHAR LongFile::VolumeSeparatorChar = W(':'); const WCHAR* LongFile::ExtendedPrefix = W("\\\\?\\"); const WCHAR* LongFile::DevicePathPrefix = W("\\\\.\\"); @@ -1003,7 +1003,7 @@ HRESULT LongFile::NormalizePath(SString & path) { return S_OK; } -#endif //FEATURE_PAL +#endif //TARGET_UNIX BOOL LongFile::ContainsDirectorySeparator(SString & path) { diff --git a/src/coreclr/src/utilcode/md5.cpp b/src/coreclr/src/utilcode/md5.cpp index a0a0f10743a18..1617d52905c1d 100644 --- a/src/coreclr/src/utilcode/md5.cpp +++ b/src/coreclr/src/utilcode/md5.cpp @@ -142,7 +142,7 @@ void MD5::GetHashValue(MD5HASHDATA* phash) // // but our compiler has an intrinsic! - #if (defined(_X86_) || defined(_ARM_)) && defined(PLATFORM_UNIX) + #if (defined(HOST_X86) || defined(HOST_ARM)) && defined(TARGET_UNIX) #define ROL(x, n) (((x) << (n)) | ((x) >> (32-(n)))) #define ROTATE_LEFT(x,n) (x) = ROL(x,n) #else diff --git a/src/coreclr/src/utilcode/pedecoder.cpp b/src/coreclr/src/utilcode/pedecoder.cpp index 8eb65f9cb3e5a..c5267a751e0e2 100644 --- a/src/coreclr/src/utilcode/pedecoder.cpp +++ b/src/coreclr/src/utilcode/pedecoder.cpp @@ -1459,7 +1459,7 @@ CHECK PEDecoder::CheckILOnly() const CHECK(CheckILOnlyBaseRelocations()); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (!IsMapped()) { CHECK(CheckILOnlyEntryPoint()); @@ -1511,7 +1511,7 @@ CHECK PEDecoder::CheckILOnlyImportDlls() const // The only allowed DLL Imports are MscorEE.dll:_CorExeMain,_CorDllMain -#ifdef BIT64 +#ifdef HOST_64BIT // On win64, when the image is LoadLibrary'd, we whack the import and IAT directories. We have to relax // the verification for mapped images. Ideally, we would only do it for a post-LoadLibrary image. if (IsMapped() && !HasDirectoryEntry(IMAGE_DIRECTORY_ENTRY_IMPORT)) @@ -1604,7 +1604,7 @@ CHECK PEDecoder::CheckILOnlyImportByNameTable(RVA rva) const CHECK_OK; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // jmp dword ptr ds:[XXXX] #define JMP_DWORD_PTR_DS_OPCODE { 0xFF, 0x25 } #define JMP_DWORD_PTR_DS_OPCODE_SIZE 2 // Size of opcode @@ -1678,7 +1678,7 @@ CHECK PEDecoder::CheckILOnlyBaseRelocations() const CHECK_OK; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 CHECK PEDecoder::CheckILOnlyEntryPoint() const { CONTRACT_CHECK @@ -1721,7 +1721,7 @@ CHECK PEDecoder::CheckILOnlyEntryPoint() const CHECK_OK; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifndef DACCESS_COMPILE @@ -3110,7 +3110,7 @@ BOOL PEDecoder::ForceRelocForDLL(LPCWSTR lpFileName) STATIC_CONTRACT_CANNOT_TAKE_LOCK; #endif -#if defined(DACCESS_COMPILE) || defined(FEATURE_PAL) +#if defined(DACCESS_COMPILE) || defined(TARGET_UNIX) return TRUE; #else @@ -3181,7 +3181,7 @@ BOOL PEDecoder::ForceRelocForDLL(LPCWSTR lpFileName) return fSuccess; -#endif // DACCESS_COMPILE || FEATURE_PAL +#endif // DACCESS_COMPILE || TARGET_UNIX } #endif // _DEBUG diff --git a/src/coreclr/src/utilcode/perflog.cpp b/src/coreclr/src/utilcode/perflog.cpp index 7abca5badd313..17d84a99a2f05 100644 --- a/src/coreclr/src/utilcode/perflog.cpp +++ b/src/coreclr/src/utilcode/perflog.cpp @@ -113,7 +113,7 @@ void PerfLog::PerfLogInitialize() // Hardcoded file name for spitting the perf auotmation formatted perf data. Open // the file here for writing and close in PerfLogDone(). m_hPerfLogFileHandle = WszCreateFile ( -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX W("/tmp/PerfData.dat"), #else W("C:\\PerfData.dat"), diff --git a/src/coreclr/src/utilcode/regutil.cpp b/src/coreclr/src/utilcode/regutil.cpp index e0913ecaa7994..4ae47cb2db6e9 100644 --- a/src/coreclr/src/utilcode/regutil.cpp +++ b/src/coreclr/src/utilcode/regutil.cpp @@ -21,7 +21,7 @@ #define COMPLUS_PREFIX W("COMPlus_") #define LEN_OF_COMPLUS_PREFIX 8 -#if (!defined(FEATURE_UTILCODE_NO_DEPENDENCIES) || defined(DEBUG)) && !defined(FEATURE_PAL) +#if (!defined(FEATURE_UTILCODE_NO_DEPENDENCIES) || defined(DEBUG)) && !defined(TARGET_UNIX) #define ALLOW_REGISTRY #endif diff --git a/src/coreclr/src/utilcode/safewrap.cpp b/src/coreclr/src/utilcode/safewrap.cpp index f19e52e613069..e32e5b54d68bd 100644 --- a/src/coreclr/src/utilcode/safewrap.cpp +++ b/src/coreclr/src/utilcode/safewrap.cpp @@ -180,7 +180,7 @@ DWORD ClrReportEvent( } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HANDLE h = ::RegisterEventSourceW( NULL, // uses local computer pEventSource); @@ -215,10 +215,10 @@ DWORD ClrReportEvent( ::DeregisterEventSource(h); return (ret == TRUE)?ERROR_SUCCESS:dwRetStatus; -#else // FEATURE_PAL +#else // TARGET_UNIX // UNIXTODO: Report the event somewhere? return ERROR_SUCCESS; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // Returns ERROR_SUCCESS if succeessful in reporting to event log, or @@ -234,7 +234,7 @@ DWORD ClrReportEvent( return ClrReportEvent(pEventSource, wType, wCategory, dwEventID, lpUserSid, 1, &pMessage); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Read a REG_SZ (null-terminated string) value from the registry. Throws. // // Arguments: @@ -334,4 +334,4 @@ void ClrRegReadString(HKEY hKey, const SString & szValueName, SString & value) COUNT_T numCharsNoNull = numCharsIncludingNull - 1; value.CloseBuffer(numCharsNoNull); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX diff --git a/src/coreclr/src/utilcode/sstring.cpp b/src/coreclr/src/utilcode/sstring.cpp index 094984bd8d7ac..d73e8861eed33 100644 --- a/src/coreclr/src/utilcode/sstring.cpp +++ b/src/coreclr/src/utilcode/sstring.cpp @@ -71,7 +71,7 @@ static WCHAR MapChar(WCHAR wc, DWORD dwFlags) WCHAR wTmp; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX int iRet = ::LCMapStringEx(LOCALE_NAME_INVARIANT, dwFlags, &wc, 1, &wTmp, 1, NULL, NULL, 0); if (!iRet) { @@ -79,7 +79,7 @@ static WCHAR MapChar(WCHAR wc, DWORD dwFlags) wTmp = wc; } -#else // !FEATURE_PAL +#else // !TARGET_UNIX // For PAL, no locale specific processing is done if (dwFlags == LCMAP_UPPERCASE) @@ -101,7 +101,7 @@ static WCHAR MapChar(WCHAR wc, DWORD dwFlags) PAL_ToLowerInvariant(wc); #endif } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return wTmp; } diff --git a/src/coreclr/src/utilcode/stacktrace.cpp b/src/coreclr/src/utilcode/stacktrace.cpp index 0b92b2ad487e2..364bfecb82eda 100644 --- a/src/coreclr/src/utilcode/stacktrace.cpp +++ b/src/coreclr/src/utilcode/stacktrace.cpp @@ -135,7 +135,7 @@ typedef BOOL (__stdcall *pfnImgHlp_StackWalk)( PTRANSLATE_ADDRESS_ROUTINE TranslateAddress ); -#ifdef BIT64 +#ifdef HOST_64BIT typedef DWORD64 (__stdcall *pfnImgHlp_SymGetModuleBase64)( IN HANDLE hProcess, IN DWORD64 dwAddr @@ -213,13 +213,13 @@ struct IMGHLPFN_LOAD }; -#if defined(BIT64) +#if defined(HOST_64BIT) typedef void (*pfn_GetRuntimeStackWalkInfo)( IN ULONG64 ControlPc, OUT UINT_PTR* pModuleBase, OUT UINT_PTR* pFuncEntry ); -#endif // BIT64 +#endif // HOST_64BIT // @@ -243,9 +243,9 @@ pfnImgHlp_SymLoadModule _SymLoadModule; pfnImgHlp_SymRegisterCallback _SymRegisterCallback; pfnImgHlp_SymSetOptions _SymSetOptions; pfnImgHlp_SymGetOptions _SymGetOptions; -#if defined(BIT64) +#if defined(HOST_64BIT) pfn_GetRuntimeStackWalkInfo _GetRuntimeStackWalkInfo; -#endif // BIT64 +#endif // HOST_64BIT IMGHLPFN_LOAD ailFuncList[] = { @@ -469,7 +469,7 @@ void MagicInit() // _SymSetOptions(_SymGetOptions() | SYMOPT_DEFERRED_LOADS|SYMOPT_DEBUG); -#ifndef BIT64 +#ifndef HOST_64BIT _SymRegisterCallback(g_hProcess, SymCallback, 0); #endif @@ -574,7 +574,7 @@ DWORD_PTR dwPCAddr HANDLE hFuncEntry = _SymFunctionTableAccess( hProcess, dwPCAddr ); -#if defined(BIT64) +#if defined(HOST_64BIT) if (hFuncEntry == NULL) { if (_GetRuntimeStackWalkInfo == NULL) @@ -587,7 +587,7 @@ DWORD_PTR dwPCAddr _GetRuntimeStackWalkInfo((ULONG64)dwPCAddr, NULL, (UINT_PTR*)(&hFuncEntry)); } -#endif // BIT64 +#endif // HOST_64BIT return hFuncEntry; } @@ -652,7 +652,7 @@ DWORD_PTR dwAddr } } -#if defined(BIT64) +#if defined(HOST_64BIT) if (_GetRuntimeStackWalkInfo == NULL) { _GetRuntimeStackWalkInfo = (pfn_GetRuntimeStackWalkInfo) @@ -665,7 +665,7 @@ DWORD_PTR dwAddr _GetRuntimeStackWalkInfo((ULONG64)dwAddr, (UINT_PTR*)&moduleBase, NULL); if (moduleBase != NULL) return moduleBase; -#endif // BIT64 +#endif // HOST_64BIT return 0; } @@ -724,7 +724,7 @@ CONTEXT * pContext // Context to use (or NULL to use current) memcpy(&context, pContext, sizeof(CONTEXT)); } -#ifdef BIT64 +#ifdef HOST_64BIT STACKFRAME64 stkfrm; memset(&stkfrm, 0, sizeof(STACKFRAME64)); #else @@ -741,7 +741,7 @@ CONTEXT * pContext // Context to use (or NULL to use current) stkfrm.AddrFrame.Offset = context.Ebp; // Frame Pointer #endif -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // If we don't have a user-supplied context, then don't skip any frames. // So ignore this function (GetStackBackTrace) // ClrCaptureContext on x86 gives us the ESP/EBP/EIP of its caller's caller @@ -750,7 +750,7 @@ CONTEXT * pContext // Context to use (or NULL to use current) { ifrStart += 1; } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 for (UINT i = 0; i < ifrStart + cfrTotal; i++) { @@ -791,7 +791,7 @@ CONTEXT * pContext // Context to use (or NULL to use current) * Actually prints the info into the string for the symbol. ****************************************************************************/ -#ifdef BIT64 +#ifdef HOST_64BIT #define FMT_ADDR_BARE "%08x`%08x" #else #define FMT_ADDR_BARE "%08x" @@ -948,7 +948,7 @@ void MagicDeinit(void) } } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) /**************************************************************************** * ClrCaptureContext * *-------------------* diff --git a/src/coreclr/src/utilcode/stresslog.cpp b/src/coreclr/src/utilcode/stresslog.cpp index 43d69b21cf305..2c4fd173a35d9 100644 --- a/src/coreclr/src/utilcode/stresslog.cpp +++ b/src/coreclr/src/utilcode/stresslog.cpp @@ -22,7 +22,7 @@ HANDLE StressLogChunk::s_LogChunkHeap = NULL; #endif // !STRESS_LOG_READONLY /*********************************************************************************/ -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) /* This is like QueryPerformanceCounter but a lot faster. On machines with variable-speed CPUs (for power management), this is not accurate, but may @@ -37,7 +37,7 @@ __forceinline __declspec(naked) unsigned __int64 getTimeStamp() { }; } -#else // _TARGET_X86_ +#else // TARGET_X86 unsigned __int64 getTimeStamp() { STATIC_CONTRACT_LEAF; @@ -49,9 +49,9 @@ unsigned __int64 getTimeStamp() { return ret.QuadPart; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if defined(TARGET_X86) && !defined(TARGET_UNIX) /*********************************************************************************/ /* Get the the frequency cooresponding to 'getTimeStamp'. For x86, this is the @@ -98,7 +98,7 @@ unsigned __int64 getTickFrequency() return hz; } -#else // _TARGET_X86_ +#else // TARGET_X86 /*********************************************************************************/ @@ -113,7 +113,7 @@ unsigned __int64 getTickFrequency() return ret.QuadPart; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifdef STRESS_LOG @@ -174,7 +174,7 @@ void StressLog::Initialize(unsigned facilities, unsigned level, unsigned maxByt GetSystemTimeAsFileTime (&theLog.startTime); theLog.startTimeStamp = getTimeStamp(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX theLog.moduleOffset = (SIZE_T)hMod; // HMODULES are base addresses. #ifdef _DEBUG @@ -183,9 +183,9 @@ void StressLog::Initialize(unsigned facilities, unsigned level, unsigned maxByt GetProcAddress(hModNtdll, "RtlCaptureStackBackTrace")); #endif // _DEBUG -#else // !FEATURE_PAL +#else // !TARGET_UNIX theLog.moduleOffset = (SIZE_T)PAL_GetSymbolModuleBase((void *)StressLog::Initialize); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #if !defined (STRESS_LOG_READONLY) StressLogChunk::s_LogChunkHeap = ClrHeapCreate (0, STRESSLOG_CHUNK_SIZE * 128, 0); diff --git a/src/coreclr/src/utilcode/util.cpp b/src/coreclr/src/utilcode/util.cpp index c8f9bd52e52e9..db7f2be643a11 100644 --- a/src/coreclr/src/utilcode/util.cpp +++ b/src/coreclr/src/utilcode/util.cpp @@ -237,7 +237,7 @@ namespace StackSString ssDllName; if ((wszDllPath == nullptr) || (wszDllPath[0] == W('\0')) || fIsDllPathPrefix) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX IfFailRet(Clr::Util::Com::FindInprocServer32UsingCLSID(rclsid, ssDllName)); EX_TRY @@ -256,9 +256,9 @@ namespace IfFailRet(hr); wszDllPath = ssDllName.GetUnicode(); -#else // !FEATURE_PAL +#else // !TARGET_UNIX return E_FAIL; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } _ASSERTE(wszDllPath != nullptr); @@ -503,12 +503,12 @@ BYTE * ClrVirtualAllocExecutable(SIZE_T dwSize, // Fall through to #endif // USE_UPPER_ADDRESS -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Tell PAL to use the executable memory allocator to satisfy this request for virtual memory. // This will allow us to place JIT'ed code close to the coreclr library // and thus improve performance by avoiding jump stubs in managed code. flAllocationType |= MEM_RESERVE_EXECUTABLE; -#endif // FEATURE_PAL +#endif // TARGET_UNIX return (BYTE *) ClrVirtualAlloc (NULL, dwSize, flAllocationType, flProtect); @@ -523,13 +523,13 @@ LPVOID ClrVirtualAllocAligned(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocatio _ASSERTE(alignment != 0); _ASSERTE((alignment & (alignment - 1)) == 0); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // The VirtualAlloc on Windows ensures 64kB alignment _ASSERTE(alignment <= 0x10000); return ClrVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect); -#else // !FEATURE_PAL +#else // !TARGET_UNIX if(alignment < GetOsPageSize()) alignment = GetOsPageSize(); @@ -538,7 +538,7 @@ LPVOID ClrVirtualAllocAligned(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocatio SIZE_T addr = (SIZE_T)ClrVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect); return (LPVOID)((addr + (alignment - 1)) & ~(alignment - 1)); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #ifdef _DEBUG @@ -620,13 +620,13 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr, return (BYTE*) ClrVirtualAlloc(nullptr, dwSize, flAllocationType, flProtect); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX pResult = (BYTE *)PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(pMinAddr, pMaxAddr, dwSize); if (pResult != nullptr) { return pResult; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX // We will do one scan from [pMinAddr .. pMaxAddr] // First align the tryAddr up to next 64k base address. @@ -733,7 +733,7 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr, return ::VirtualAllocExNuma(hProc, lpAddr, dwSize, allocType, prot, node); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX /*static*/ BOOL NumaNodeInfo::GetNumaProcessorNodeEx(PPROCESSOR_NUMBER proc_no, PUSHORT node_no) { return ::GetNumaProcessorNodeEx(proc_no, node_no); @@ -767,12 +767,12 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr, return false; } -#else // !FEATURE_PAL +#else // !TARGET_UNIX /*static*/ BOOL NumaNodeInfo::GetNumaProcessorNodeEx(USHORT proc_no, PUSHORT node_no) { return PAL_GetNumaProcessorNode(proc_no, node_no); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif /*static*/ BOOL NumaNodeInfo::m_enableGCNumaAware = FALSE; @@ -808,7 +808,7 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr, m_enableGCNumaAware = InitNumaNodeInfoAPI(); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //****************************************************************************** // CPUGroupInfo @@ -840,7 +840,7 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr, { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX return ::GetSystemTimes(idleTime, kernelTime, userTime); #else return FALSE; @@ -857,7 +857,7 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr, /*static*/ LONG CPUGroupInfo::m_initialization = 0; /*static*/ bool CPUGroupInfo::s_hadSingleProcessorAtStartup = false; -#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if !defined(FEATURE_REDHAWK) && (defined(TARGET_AMD64) || defined(TARGET_ARM64)) // Calculate greatest common divisor DWORD GCD(DWORD u, DWORD v) { @@ -887,7 +887,7 @@ DWORD LCM(DWORD u, DWORD v) } CONTRACTL_END; -#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if !defined(FEATURE_REDHAWK) && (defined(TARGET_AMD64) || defined(TARGET_ARM64)) BYTE *bBuffer = NULL; SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pSLPIEx = NULL; SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pRecord = NULL; @@ -962,7 +962,7 @@ DWORD LCM(DWORD u, DWORD v) { LIMITED_METHOD_CONTRACT; -#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if !defined(FEATURE_REDHAWK) && (defined(TARGET_AMD64) || defined(TARGET_ARM64)) WORD begin = 0; WORD nr_proc = 0; @@ -988,7 +988,7 @@ DWORD LCM(DWORD u, DWORD v) } CONTRACTL_END; -#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if !defined(FEATURE_REDHAWK) && (defined(TARGET_AMD64) || defined(TARGET_ARM64)) BOOL enableGCCPUGroups = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCCpuGroup) != 0; BOOL threadUseAllCpuGroups = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_Thread_UseAllCpuGroups) != 0; @@ -1010,7 +1010,7 @@ DWORD LCM(DWORD u, DWORD v) BOOL hasMultipleGroups = m_nGroups > 1; m_enableGCCPUGroups = enableGCCPUGroups && hasMultipleGroups; m_threadUseAllCpuGroups = threadUseAllCpuGroups && hasMultipleGroups; -#endif // _TARGET_AMD64_ || _TARGET_ARM64_ +#endif // TARGET_AMD64 || TARGET_ARM64 // Determine if the process is affinitized to a single processor (or if the system has a single processor) DWORD_PTR processAffinityMask, systemAffinityMask; @@ -1079,7 +1079,7 @@ DWORD LCM(DWORD u, DWORD v) { LIMITED_METHOD_CONTRACT; -#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if !defined(FEATURE_REDHAWK) && (defined(TARGET_AMD64) || defined(TARGET_ARM64)) WORD bTemp = 0; WORD bDiff = processor_number - bTemp; @@ -1110,7 +1110,7 @@ DWORD LCM(DWORD u, DWORD v) } CONTRACTL_END; -#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if !defined(FEATURE_REDHAWK) && (defined(TARGET_AMD64) || defined(TARGET_ARM64)) // m_enableGCCPUGroups and m_threadUseAllCpuGroups must be TRUE _ASSERTE(m_enableGCCPUGroups && m_threadUseAllCpuGroups); @@ -1160,7 +1160,7 @@ DWORD LCM(DWORD u, DWORD v) } CONTRACTL_END; -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64)) WORD i, minGroup = 0; DWORD minWeight = 0; @@ -1203,7 +1203,7 @@ DWORD LCM(DWORD u, DWORD v) /*static*/ void CPUGroupInfo::ClearCPUGroupAffinity(GROUP_AFFINITY *gf) { LIMITED_METHOD_CONTRACT; -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64)) // m_enableGCCPUGroups and m_threadUseAllCpuGroups must be TRUE _ASSERTE(m_enableGCCPUGroups && m_threadUseAllCpuGroups); @@ -1238,7 +1238,7 @@ BOOL CPUGroupInfo::GetCPUGroupRange(WORD group_number, WORD* group_begin, WORD* LIMITED_METHOD_CONTRACT; return m_threadUseAllCpuGroups; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX //****************************************************************************** // Returns the number of processors that a process has been configured to run on @@ -1259,7 +1259,7 @@ int GetCurrentProcessCpuCount() unsigned int count = 0; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD_PTR pmask, smask; if (!GetProcessAffinityMask(GetCurrentProcess(), &pmask, &smask)) @@ -1287,20 +1287,20 @@ int GetCurrentProcessCpuCount() count = 64; } -#else // !FEATURE_PAL +#else // !TARGET_UNIX count = PAL_GetLogicalCpuCountFromOS(); uint32_t cpuLimit; if (PAL_GetCpuLimit(&cpuLimit) && cpuLimit < count) count = cpuLimit; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX cCPUs = count; return count; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD_PTR GetCurrentProcessCpuMask() { CONTRACTL @@ -1310,7 +1310,7 @@ DWORD_PTR GetCurrentProcessCpuMask() } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DWORD_PTR pmask, smask; if (!GetProcessAffinityMask(GetCurrentProcess(), &pmask, &smask)) @@ -1322,7 +1322,7 @@ DWORD_PTR GetCurrentProcessCpuMask() return 0; #endif } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX uint32_t GetOsPageSizeUncached() { @@ -1338,7 +1338,7 @@ namespace uint32_t GetOsPageSize() { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX size_t result = g_pageSize.LoadWithoutBarrier(); if(!result) @@ -2636,11 +2636,11 @@ void PutThumb2BlRel24(UINT16 * p, INT32 imm24) // Verify that we got a valid offset _ASSERTE(FitsInThumb2BlRel24(imm24)); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Ensure that the ThumbBit is not set on the offset // as it cannot be encoded. _ASSERTE(!(imm24 & THUMB_CODE)); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM USHORT Opcode0 = p[0]; USHORT Opcode1 = p[1]; @@ -2958,7 +2958,7 @@ BOOL IsIPInModule(HMODULE_TGT hModule, PCODE ip) param.fRet = FALSE; // UNIXTODO: implement a proper version for PAL -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX PAL_TRY(Param *, pParam, ¶m) { PTR_BYTE pBase = dac_cast(pParam->hModule); @@ -3027,7 +3027,7 @@ lDone: ; { } PAL_ENDTRY -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return param.fRet; } @@ -3096,7 +3096,7 @@ namespace Util static BOOL g_fLocalAppDataDirectoryInitted = FALSE; static WCHAR *g_wszLocalAppDataDirectory = NULL; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Struct used to scope suspension of client impersonation for the current thread. // https://docs.microsoft.com/en-us/windows/desktop/secauthz/client-impersonation class SuspendImpersonation @@ -3337,7 +3337,7 @@ namespace Com return __imp::FindSubKeyDefaultValueForCLSID(rclsid, W("InprocServer32"), ssInprocServer32Name); } } // namespace Com -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // namespace Util } // namespace Clr diff --git a/src/coreclr/src/utilcode/util_nodependencies.cpp b/src/coreclr/src/utilcode/util_nodependencies.cpp index 5ebaaee7391bf..4f6732a1a0a83 100644 --- a/src/coreclr/src/utilcode/util_nodependencies.cpp +++ b/src/coreclr/src/utilcode/util_nodependencies.cpp @@ -28,7 +28,7 @@ RunningOnStatusEnum gRunningOnStatus = RUNNING_ON_STATUS_UNINITED; //***************************************************************************** void InitRunningOnVersionStatus () { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_CANNOT_TAKE_LOCK; @@ -88,15 +88,15 @@ void InitRunningOnVersionStatus () UtilMessageBoxCatastrophicNonLocalized(NON_SUPPORTED_PLATFORM_MSGBOX_TEXT, NON_SUPPORTED_PLATFORM_MSGBOX_TITLE, MB_OK | MB_ICONERROR, TRUE); TerminateProcess(GetCurrentProcess(), NON_SUPPORTED_PLATFORM_TERMINATE_ERROR_CODE); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // InitRunningOnVersionStatus -#ifndef BIT64 +#ifndef HOST_64BIT //------------------------------------------------------------------------------ // Returns TRUE if we are running on a 64-bit OS in WoW, FALSE otherwise. BOOL RunningInWow64() { - #ifdef PLATFORM_UNIX + #ifdef TARGET_UNIX return FALSE; #else static int s_Wow64Process; @@ -116,7 +116,7 @@ BOOL RunningInWow64() } #endif -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //------------------------------------------------------------------------------ // // GetRegistryLongValue - Reads a configuration LONG value from the registry. @@ -470,7 +470,7 @@ HRESULT GetDebuggerSettingInfoWorker(__out_ecount_part_opt(*pcchDebuggerString, return S_OK; } // GetDebuggerSettingInfoWorker -#endif // FEATURE_PAL +#endif // TARGET_UNIX #endif //!defined(FEATURE_UTILCODE_NO_DEPENDENCIES) || defined(_DEBUG) diff --git a/src/coreclr/src/utilcode/utsem.cpp b/src/coreclr/src/utilcode/utsem.cpp index fe682a7d72390..c5387d3175587 100644 --- a/src/coreclr/src/utilcode/utsem.cpp +++ b/src/coreclr/src/utilcode/utsem.cpp @@ -22,7 +22,7 @@ Revision History: #include "contract.h" // Consider replacing this with a #ifdef INTEROP_DEBUGGING -#if !defined(SELF_NO_HOST) && defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if !defined(SELF_NO_HOST) && defined(TARGET_X86) && !defined(TARGET_UNIX) // For Interop debugging, the UTSemReadWrite class must inform the debugger // that this thread can't be suspended currently. See vm\util.hpp for the // implementation of these methods. @@ -31,7 +31,7 @@ void DecCantStopCount(); #else #define IncCantStopCount() #define DecCantStopCount() -#endif // !SELF_NO_HOST && _TARGET_X86_ +#endif // !SELF_NO_HOST && TARGET_X86 /****************************************************************************** Definitions of the bit fields in UTSemReadWrite::m_dwFlag: @@ -59,7 +59,7 @@ const ULONG WRITEWAITERS_INCR = 0x00400000; // amount to add to increment num // Copy of definition from file:..\VM\spinlock.h #define CALLER_LIMITS_SPINNING 0 -#if (defined(SELF_NO_HOST) && !defined(CROSSGEN_COMPILE)) || (defined(FEATURE_PAL) && defined(DACCESS_COMPILE)) +#if (defined(SELF_NO_HOST) && !defined(CROSSGEN_COMPILE)) || (defined(TARGET_UNIX) && defined(DACCESS_COMPILE)) // When we do not have host, we just call OS - see file:..\VM\hosting.cpp#__SwitchToThread BOOL __SwitchToThread(DWORD dwSleepMSec, DWORD dwSwitchCount) diff --git a/src/coreclr/src/utilcode/winfix.cpp b/src/coreclr/src/utilcode/winfix.cpp index 785839e69954e..b7fad1e79b353 100644 --- a/src/coreclr/src/utilcode/winfix.cpp +++ b/src/coreclr/src/utilcode/winfix.cpp @@ -183,7 +183,7 @@ WszCreateProcess( return fResult; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "psapi.h" @@ -292,11 +292,11 @@ HRESULT SetThreadName(HANDLE hThread, PCWSTR lpThreadDescription) return g_pfnSetThreadDescription(hThread, lpThreadDescription); } -#else //!FEATURE_PAL +#else //!TARGET_UNIX HRESULT SetThreadName(HANDLE hThread, PCWSTR lpThreadDescription) { return SetThreadDescription(hThread, lpThreadDescription); } -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX diff --git a/src/coreclr/src/vm/.vscode/c_cpp_properties.json b/src/coreclr/src/vm/.vscode/c_cpp_properties.json index 7f2a0274fccab..a4c0ec931c99a 100644 --- a/src/coreclr/src/vm/.vscode/c_cpp_properties.json +++ b/src/coreclr/src/vm/.vscode/c_cpp_properties.json @@ -21,24 +21,24 @@ "../../src/vm/amd64" ], "defines": [ - "_AMD64_", + "HOST_AMD64", "_BLD_CLR", "_CRT_SECURE_NO_WARNINGS", "_DBG", "_SECURE_SCL=0", - "_TARGET_64BIT_=1", - "_TARGET_AMD64_=1", + "TARGET_64BIT=1", + "TARGET_AMD64=1", "_UNICODE", "_WIN32", "_WIN32_WINNT=0x0602", - "BIT64", + "HOST_64BIT", "AMD64", - "BIT64=1", + "HOST_64BIT=1", "BUILDENV_CHECKED=1", "DACCESS_COMPILE", - "DBG_TARGET_64BIT=1", - "DBG_TARGET_AMD64=1", - "DBG_TARGET_64BIT=1", + "TARGET_64BIT=1", + "TARGET_AMD64=1", + "TARGET_64BIT=1", "DEBUGGING_SUPPORTED", "EnC_SUPPORTED", "FEATURE_APPX", @@ -80,7 +80,7 @@ "FEATURE_UTF8STRING=1", "FEATURE_WIN32_REGISTRY", "FEATURE_WINMD_RESILIENT", - "PLATFORM_WINDOWS=1", + "TARGET_WINDOWS=1", "PROFILING_SUPPORTED_DATA", "UNICODE", "UNIX_AMD64_ABI_ITF", diff --git a/src/coreclr/src/vm/amd64/asmconstants.h b/src/coreclr/src/vm/amd64/asmconstants.h index ced56e5d58846..74b238e0e7ed1 100644 --- a/src/coreclr/src/vm/amd64/asmconstants.h +++ b/src/coreclr/src/vm/amd64/asmconstants.h @@ -8,9 +8,9 @@ // Allow multiple inclusion. -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 #error this file should only be used on an AMD64 platform -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #include "../../inc/switches.h" @@ -496,7 +496,7 @@ ASMCONSTANTS_C_ASSERT(MethodDescClassification__mdcClassification == mdcClassifi #define MethodDescClassification__mcInstantiated 0x5 ASMCONSTANTS_C_ASSERT(MethodDescClassification__mcInstantiated == mcInstantiated); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define OFFSET__TEB__ThreadLocalStoragePointer 0x58 ASMCONSTANTS_C_ASSERT(OFFSET__TEB__ThreadLocalStoragePointer == offsetof(TEB, ThreadLocalStoragePointer)); #endif diff --git a/src/coreclr/src/vm/amd64/cgenamd64.cpp b/src/coreclr/src/vm/amd64/cgenamd64.cpp index 3aa92ce10368d..a1ca359f2cd40 100644 --- a/src/coreclr/src/vm/amd64/cgenamd64.cpp +++ b/src/coreclr/src/vm/amd64/cgenamd64.cpp @@ -203,20 +203,20 @@ void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) pRD->pCurrentContext->Rip = pRD->ControlPC = m_MachState.m_Rip; pRD->pCurrentContext->Rsp = pRD->SP = m_MachState.m_Rsp; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = (m_MachState.m_Ptrs.p##regname != NULL) ? \ *m_MachState.m_Ptrs.p##regname : m_MachState.m_Unwound.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER -#else // FEATURE_PAL +#else // TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContext->regname = *m_MachState.m_Ptrs.p##regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER -#endif // FEATURE_PAL +#endif // TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) pRD->pCurrentContextPointers->regname = m_MachState.m_Ptrs.p##regname; ENUM_CALLEE_SAVED_REGISTERS(); diff --git a/src/coreclr/src/vm/amd64/cgencpu.h b/src/coreclr/src/vm/amd64/cgencpu.h index 36382710fc04b..59f0f7a5a11f6 100644 --- a/src/coreclr/src/vm/amd64/cgencpu.h +++ b/src/coreclr/src/vm/amd64/cgencpu.h @@ -10,7 +10,7 @@ -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 #error Should only include "AMD64\cgencpu.h" for AMD64 builds #endif @@ -82,9 +82,9 @@ EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr); #define INSTRFMT_K64SMALL #define INSTRFMT_K64 -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define USE_REDIRECT_FOR_GCSTRESS -#endif // FEATURE_PAL +#endif // TARGET_UNIX // // REX prefix byte @@ -488,7 +488,7 @@ struct HijackArgs }; ULONG64 ReturnValue[2]; }; -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX CalleeSavedRegisters Regs; union { diff --git a/src/coreclr/src/vm/amd64/gmsamd64.cpp b/src/coreclr/src/vm/amd64/gmsamd64.cpp index 1c83ca495d941..3447385617856 100644 --- a/src/coreclr/src/vm/amd64/gmsamd64.cpp +++ b/src/coreclr/src/vm/amd64/gmsamd64.cpp @@ -50,9 +50,9 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, do { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pvControlPc = Thread::VirtualUnwindCallFrame(&ctx, &nonVolRegPtrs); -#else // !FEATURE_PAL +#else // !TARGET_UNIX #if defined(DACCESS_COMPILE) HRESULT hr = DacVirtualUnwind(threadId, &ctx, &nonVolRegPtrs); @@ -70,7 +70,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, #endif // DACCESS_COMPILE pvControlPc = GetIP(&ctx); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (funCallDepth > 0) { @@ -115,7 +115,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, // "unwoundState->_pRetAddr = PTR_TADDR(&unwoundState->m_Rip)". unwoundState->_pRetAddr = PTR_TADDR(unwoundState->m_Rsp - 8); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #define CALLEE_SAVED_REGISTER(regname) unwoundState->m_Unwound.regname = ctx.regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER diff --git a/src/coreclr/src/vm/amd64/gmscpu.h b/src/coreclr/src/vm/amd64/gmscpu.h index e122c0f1a88f9..2faa9256799c0 100644 --- a/src/coreclr/src/vm/amd64/gmscpu.h +++ b/src/coreclr/src/vm/amd64/gmscpu.h @@ -71,7 +71,7 @@ struct MachState PTR_TADDR _pRetAddr; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // On PAL, we don't always have the context pointers available due to // a limitation of an unwinding library. In such case, preserve // the unwound values. @@ -132,7 +132,7 @@ inline void LazyMachState::setLazyStateFromUnwind(MachState* copy) this->m_Rip = copy->m_Rip; this->m_Rsp = copy->m_Rsp; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX this->m_Unwound = copy->m_Unwound; #endif diff --git a/src/coreclr/src/vm/appdomain.cpp b/src/coreclr/src/vm/appdomain.cpp index 4fb2593f86cfb..76c6f766ee5e7 100644 --- a/src/coreclr/src/vm/appdomain.cpp +++ b/src/coreclr/src/vm/appdomain.cpp @@ -61,9 +61,9 @@ #include "nativeoverlapped.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "dwreport.h" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #include "stringarraylist.h" diff --git a/src/coreclr/src/vm/argdestination.h b/src/coreclr/src/vm/argdestination.h index 0f596ab20863c..004f91b4a48e6 100644 --- a/src/coreclr/src/vm/argdestination.h +++ b/src/coreclr/src/vm/argdestination.h @@ -30,7 +30,7 @@ class ArgDestination LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) _ASSERTE((argLocDescForStructInRegs != NULL) || (offset != TransitionBlock::StructInRegsOffset)); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // This assert is not interesting on arm64. argLocDescForStructInRegs could be // initialized if the args are being enregistered. #else @@ -45,7 +45,7 @@ class ArgDestination return dac_cast(dac_cast(m_base) + m_offset); } -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) #ifndef DACCESS_COMPILE // Returns true if the ArgDestination represents an HFA struct @@ -82,7 +82,7 @@ class ArgDestination } #endif // !DACCESS_COMPILE -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) #if defined(UNIX_AMD64_ABI) diff --git a/src/coreclr/src/vm/arm/armsinglestepper.cpp b/src/coreclr/src/vm/arm/armsinglestepper.cpp index 7f861f30c2ae0..7b7b578c93b2d 100644 --- a/src/coreclr/src/vm/arm/armsinglestepper.cpp +++ b/src/coreclr/src/vm/arm/armsinglestepper.cpp @@ -98,7 +98,7 @@ ArmSingleStepper::ArmSingleStepper() ArmSingleStepper::~ArmSingleStepper() { #if !defined(DACCESS_COMPILE) -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->BackoutMem(m_rgCode, kMaxCodeBuffer * sizeof(WORD)); #else DeleteExecutable(m_rgCode); @@ -111,7 +111,7 @@ void ArmSingleStepper::Init() #if !defined(DACCESS_COMPILE) if (m_rgCode == NULL) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_rgCode = (WORD *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(kMaxCodeBuffer * sizeof(WORD))); #else m_rgCode = new (executable) WORD[kMaxCodeBuffer]; diff --git a/src/coreclr/src/vm/arm/asmconstants.h b/src/coreclr/src/vm/arm/asmconstants.h index 55d1f597e91b9..f6d782d69811d 100644 --- a/src/coreclr/src/vm/arm/asmconstants.h +++ b/src/coreclr/src/vm/arm/asmconstants.h @@ -7,9 +7,9 @@ // Be sure to rebuild clr/src/vm/ceemain.cpp after changing this file, to // ensure that the constants match the expected C/C++ values -// #ifndef _ARM_ +// #ifndef HOST_ARM // #error this file should only be used on an ARM platform -// #endif // _ARM_ +// #endif // HOST_ARM #include "../../inc/switches.h" diff --git a/src/coreclr/src/vm/arm/cgencpu.h b/src/coreclr/src/vm/arm/cgencpu.h index 98a00f1e3c9f2..7b6da30fce6df 100644 --- a/src/coreclr/src/vm/arm/cgencpu.h +++ b/src/coreclr/src/vm/arm/cgencpu.h @@ -4,7 +4,7 @@ // -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM #error Should only include "cGenCpu.h" for ARM builds #endif @@ -30,9 +30,9 @@ struct ArgLocDesc; extern PCODE GetPreStubEntryPoint(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define USE_REDIRECT_FOR_GCSTRESS -#endif // FEATURE_PAL +#endif // TARGET_UNIX // CPU-dependent functions Stub * GenerateInitPInvokeFrameHelper(); @@ -1059,7 +1059,7 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode) #define JIT_GetSharedGCStaticBaseNoCtor JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain #define JIT_GetSharedNonGCStaticBaseNoCtor JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define JIT_Stelem_Ref JIT_Stelem_Ref #endif diff --git a/src/coreclr/src/vm/arm/stubs.cpp b/src/coreclr/src/vm/arm/stubs.cpp index 56573f6590283..ca6eeaec2ffb6 100644 --- a/src/coreclr/src/vm/arm/stubs.cpp +++ b/src/coreclr/src/vm/arm/stubs.cpp @@ -532,9 +532,9 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, do { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pvControlPc = Thread::VirtualUnwindCallFrame(&ctx, &nonVolRegPtrs); -#else // !FEATURE_PAL +#else // !TARGET_UNIX #ifdef DACCESS_COMPILE HRESULT hr = DacVirtualUnwind(threadId, &ctx, &nonVolRegPtrs); if (FAILED(hr)) @@ -550,7 +550,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, } #endif // DACCESS_COMPILE pvControlPc = GetIP(&ctx); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (funCallDepth > 0) { --funCallDepth; @@ -1328,7 +1328,7 @@ Stub *GenerateInitPInvokeFrameHelper() ThumbReg regScratch = ThumbReg(6); ThumbReg regR9 = ThumbReg(9); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Erect frame to perform call to GetThread psl->ThumbEmitProlog(1, sizeof(ArgumentRegisters), FALSE); // Save r4 for aligned stack @@ -1339,7 +1339,7 @@ Stub *GenerateInitPInvokeFrameHelper() psl->ThumbEmitGetThread(regThread); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX for (int reg = 0; reg < 4; reg++) psl->ThumbEmitLoadRegIndirect(ThumbReg(reg), thumbRegSp, offsetof(ArgumentRegisters, r) + sizeof(*ArgumentRegisters::r) * reg); #endif @@ -1367,7 +1367,7 @@ Stub *GenerateInitPInvokeFrameHelper() psl->ThumbEmitMovConstant(regScratch, 0); psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfReturnAddress - negSpace); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX DWORD cbSavedRegs = sizeof(ArgumentRegisters) + 2 * 4; // r0-r3, r4, lr psl->ThumbEmitAdd(regScratch, thumbRegSp, cbSavedRegs); psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfCallSiteSP - negSpace); @@ -1381,7 +1381,7 @@ Stub *GenerateInitPInvokeFrameHelper() // leave current Thread in R4 -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX psl->ThumbEmitEpilog(); #else // Return. The return address has been restored into LR at this point. @@ -1395,7 +1395,7 @@ Stub *GenerateInitPInvokeFrameHelper() void StubLinkerCPU::ThumbEmitGetThread(ThumbReg dest) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX ThumbEmitMovConstant(ThumbReg(0), (TADDR)GetThread); @@ -1406,7 +1406,7 @@ void StubLinkerCPU::ThumbEmitGetThread(ThumbReg dest) ThumbEmitMovRegReg(dest, ThumbReg(0)); } -#else // FEATURE_PAL +#else // TARGET_UNIX // mrc p15, 0, dest, c13, c0, 2 Emit16(0xee1d); @@ -1418,7 +1418,7 @@ void StubLinkerCPU::ThumbEmitGetThread(ThumbReg dest) ThumbEmitLoadRegIndirect(dest, dest, (g_TlsIndex & 0x7FFF0000) >> 16); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } #endif // CROSSGEN_COMPILE diff --git a/src/coreclr/src/vm/arm64/arm64singlestepper.cpp b/src/coreclr/src/vm/arm64/arm64singlestepper.cpp index d65be6dda2cde..fdf95a4d80f4b 100644 --- a/src/coreclr/src/vm/arm64/arm64singlestepper.cpp +++ b/src/coreclr/src/vm/arm64/arm64singlestepper.cpp @@ -47,7 +47,7 @@ Arm64SingleStepper::Arm64SingleStepper() Arm64SingleStepper::~Arm64SingleStepper() { #if !defined(DACCESS_COMPILE) -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->BackoutMem(m_rgCode, kMaxCodeBuffer * sizeof(uint32_t)); #else DeleteExecutable(m_rgCode); @@ -60,7 +60,7 @@ void Arm64SingleStepper::Init() #if !defined(DACCESS_COMPILE) if (m_rgCode == NULL) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_rgCode = (uint32_t *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(kMaxCodeBuffer * sizeof(uint32_t))); #else m_rgCode = new (executable) uint32_t[kMaxCodeBuffer]; diff --git a/src/coreclr/src/vm/arm64/asmconstants.h b/src/coreclr/src/vm/arm64/asmconstants.h index 3c9b3bf280c30..544d09cb5d2cd 100644 --- a/src/coreclr/src/vm/arm64/asmconstants.h +++ b/src/coreclr/src/vm/arm64/asmconstants.h @@ -7,9 +7,9 @@ // Be sure to rebuild clr/src/vm/ceemain.cpp after changing this file, to // ensure that the constants match the expected C/C++ values -// #ifndef _ARM64_ +// #ifndef HOST_ARM64 // #error this file should only be used on an ARM platform -// #endif // _ARM64_ +// #endif // HOST_ARM64 #include "../../inc/switches.h" diff --git a/src/coreclr/src/vm/arm64/cgencpu.h b/src/coreclr/src/vm/arm64/cgencpu.h index f29e04527a72a..7f9bef9da9c41 100644 --- a/src/coreclr/src/vm/arm64/cgencpu.h +++ b/src/coreclr/src/vm/arm64/cgencpu.h @@ -4,7 +4,7 @@ // -#ifndef _TARGET_ARM64_ +#ifndef TARGET_ARM64 #error Should only include "cGenCpu.h" for ARM64 builds #endif @@ -14,9 +14,9 @@ #define INSTRFMT_K64 #include -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define USE_REDIRECT_FOR_GCSTRESS -#endif // FEATURE_PAL +#endif // TARGET_UNIX EXTERN_C void getFPReturn(int fpSize, INT64 *pRetVal); EXTERN_C void setFPReturn(int fpSize, INT64 retVal); diff --git a/src/coreclr/src/vm/arm64/stubs.cpp b/src/coreclr/src/vm/arm64/stubs.cpp index 81d1eb476fd5d..0810e0abb30e5 100644 --- a/src/coreclr/src/vm/arm64/stubs.cpp +++ b/src/coreclr/src/vm/arm64/stubs.cpp @@ -329,9 +329,9 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, do { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pvControlPc = Thread::VirtualUnwindCallFrame(&context, &nonVolContextPtrs); -#else // !FEATURE_PAL +#else // !TARGET_UNIX #ifdef DACCESS_COMPILE HRESULT hr = DacVirtualUnwind(threadId, &context, &nonVolContextPtrs); if (FAILED(hr)) @@ -347,7 +347,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, } #endif // DACCESS_COMPILE pvControlPc = GetIP(&context); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (funCallDepth > 0) { @@ -380,7 +380,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState, } } while (true); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX unwoundstate->captureX19_X29[0] = context.X19; unwoundstate->captureX19_X29[1] = context.X20; unwoundstate->captureX19_X29[2] = context.X21; @@ -499,7 +499,7 @@ void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) pRD->pCurrentContext->Pc = pRD->ControlPC; pRD->pCurrentContext->Sp = pRD->SP; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX pRD->pCurrentContext->X19 = m_MachState.ptrX19_X29[0] ? *m_MachState.ptrX19_X29[0] : m_MachState.captureX19_X29[0]; pRD->pCurrentContext->X20 = m_MachState.ptrX19_X29[1] ? *m_MachState.ptrX19_X29[1] : m_MachState.captureX19_X29[1]; pRD->pCurrentContext->X21 = m_MachState.ptrX19_X29[2] ? *m_MachState.ptrX19_X29[2] : m_MachState.captureX19_X29[2]; @@ -512,7 +512,7 @@ void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD) pRD->pCurrentContext->X28 = m_MachState.ptrX19_X29[9] ? *m_MachState.ptrX19_X29[9] : m_MachState.captureX19_X29[9]; pRD->pCurrentContext->Fp = m_MachState.ptrX19_X29[10] ? *m_MachState.ptrX19_X29[10] : m_MachState.captureX19_X29[10]; pRD->pCurrentContext->Lr = NULL; // Unwind again to get Caller's PC -#else // FEATURE_PAL +#else // TARGET_UNIX pRD->pCurrentContext->X19 = *m_MachState.ptrX19_X29[0]; pRD->pCurrentContext->X20 = *m_MachState.ptrX19_X29[1]; pRD->pCurrentContext->X21 = *m_MachState.ptrX19_X29[2]; diff --git a/src/coreclr/src/vm/array.cpp b/src/coreclr/src/vm/array.cpp index cb99125b95804..f61504897c150 100644 --- a/src/coreclr/src/vm/array.cpp +++ b/src/coreclr/src/vm/array.cpp @@ -173,7 +173,7 @@ VOID ArrayClass::GenerateArrayAccessorCallSig( break; } -#if defined(FEATURE_ARRAYSTUB_AS_IL ) && !defined(_TARGET_X86_) +#if defined(FEATURE_ARRAYSTUB_AS_IL ) && !defined(TARGET_X86) if(dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS && fForStubAsIL) { *pSig++ = ELEMENT_TYPE_I; @@ -188,7 +188,7 @@ VOID ArrayClass::GenerateArrayAccessorCallSig( *pSig++ = ELEMENT_TYPE_VAR; *pSig++ = 0; // variable 0 } -#if defined(FEATURE_ARRAYSTUB_AS_IL ) && defined(_TARGET_X86_) +#if defined(FEATURE_ARRAYSTUB_AS_IL ) && defined(TARGET_X86) else if(dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS && fForStubAsIL) { *pSig++ = ELEMENT_TYPE_I; @@ -479,10 +479,10 @@ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementTy if (arrayKind == ELEMENT_TYPE_ARRAY) baseSize += Rank*sizeof(DWORD)*2; -#if !defined(_TARGET_64BIT_) && (DATA_ALIGNMENT > 4) +#if !defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4) if (dwComponentSize >= DATA_ALIGNMENT) baseSize = (DWORD)ALIGN_UP(baseSize, DATA_ALIGNMENT); -#endif // !defined(_TARGET_64BIT_) && (DATA_ALIGNMENT > 4) +#endif // !defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4) pMT->SetBaseSize(baseSize); // Because of array method table persisting, we need to copy the map for (unsigned index = 0; index < pParentClass->GetNumInterfaces(); ++index) @@ -782,7 +782,7 @@ class ArrayOpLinker : public ILStubLinker UINT hiddenArgIdx = rank; _ASSERTE(rank>0); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS) { firstIdx = 1; @@ -1174,7 +1174,7 @@ void GenerateArrayOpScript(ArrayMethodDesc *pMD, ArrayOpScript *paos) ArgIterator argit(&msig); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 paos->m_cbretpop = argit.CbStackPop(); #endif diff --git a/src/coreclr/src/vm/assemblynative.cpp b/src/coreclr/src/vm/assemblynative.cpp index acb6c4d60f266..645d58b3d7b7e 100644 --- a/src/coreclr/src/vm/assemblynative.cpp +++ b/src/coreclr/src/vm/assemblynative.cpp @@ -364,7 +364,7 @@ void QCALLTYPE AssemblyNative::LoadFromStream(INT_PTR ptrNativeAssemblyLoadConte END_QCALL; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX /*static */ void QCALLTYPE AssemblyNative::LoadFromInMemoryModule(INT_PTR ptrNativeAssemblyLoadContext, INT_PTR hModule, QCall::ObjectHandleOnStack retLoadedAssembly) { diff --git a/src/coreclr/src/vm/assemblynative.hpp b/src/coreclr/src/vm/assemblynative.hpp index 6cc131e69a970..a74ca26fe0454 100644 --- a/src/coreclr/src/vm/assemblynative.hpp +++ b/src/coreclr/src/vm/assemblynative.hpp @@ -117,7 +117,7 @@ class AssemblyNative static void QCALLTYPE InternalLoad(QCall::ObjectHandleOnStack assemblyName, QCall::ObjectHandleOnStack requestingAssembly, QCall::StackCrawlMarkHandle stackMark,BOOL fThrowOnFileNotFound, QCall::ObjectHandleOnStack assemblyLoadContext, QCall::ObjectHandleOnStack retAssembly); static void QCALLTYPE LoadFromPath(INT_PTR ptrNativeAssemblyLoadContext, LPCWSTR pwzILPath, LPCWSTR pwzNIPath, QCall::ObjectHandleOnStack retLoadedAssembly); static void QCALLTYPE LoadFromStream(INT_PTR ptrNativeAssemblyLoadContext, INT_PTR ptrAssemblyArray, INT32 cbAssemblyArrayLength, INT_PTR ptrSymbolArray, INT32 cbSymbolArrayLength, QCall::ObjectHandleOnStack retLoadedAssembly); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static void QCALLTYPE LoadFromInMemoryModule(INT_PTR ptrNativeAssemblyLoadContext, INT_PTR hModule, QCall::ObjectHandleOnStack retLoadedAssembly); #endif static Assembly* LoadFromPEImage(ICLRPrivBinder* pBinderContext, PEImage *pILImage, PEImage *pNIImage); diff --git a/src/coreclr/src/vm/autotrace.cpp b/src/coreclr/src/vm/autotrace.cpp index 04427a3aebe60..158708dec69a8 100644 --- a/src/coreclr/src/vm/autotrace.cpp +++ b/src/coreclr/src/vm/autotrace.cpp @@ -22,9 +22,9 @@ #include "common.h" // Required for pre-compiled header #ifdef FEATURE_AUTO_TRACE -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include "pal.h" -#endif // FEATURE_PAL +#endif // TARGET_UNIX HANDLE auto_trace_event; static size_t g_n_tracers = 1; @@ -69,7 +69,7 @@ void auto_trace_launch_internal() STARTUPINFO si; ZeroMemory(&si, sizeof(si)); si.cb = sizeof(STARTUPINFO); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX si.dwFlags = STARTF_USESHOWWINDOW; si.wShowWindow = SW_HIDE; #endif diff --git a/src/coreclr/src/vm/callcounting.cpp b/src/coreclr/src/vm/callcounting.cpp index 8d1169c288f98..372c32b8d16f1 100644 --- a/src/coreclr/src/vm/callcounting.cpp +++ b/src/coreclr/src/vm/callcounting.cpp @@ -261,7 +261,7 @@ const CallCountingStub *CallCountingManager::CallCountingStubAllocator::Allocate do { bool forceLongStub = false; - #if defined(_DEBUG) && defined(_TARGET_AMD64_) + #if defined(_DEBUG) && defined(TARGET_AMD64) if (s_callCountingStubCount % 2 == 0) { forceLongStub = true; @@ -272,7 +272,7 @@ const CallCountingStub *CallCountingManager::CallCountingStubAllocator::Allocate { sizeInBytes = sizeof(CallCountingStubShort); AllocMemHolder allocationAddressHolder(heap->AllocAlignedMem(sizeInBytes, CallCountingStub::Alignment)); - #ifdef _TARGET_AMD64_ + #ifdef TARGET_AMD64 if (CallCountingStubShort::CanUseFor(allocationAddressHolder, targetForMethod)) #endif { @@ -282,7 +282,7 @@ const CallCountingStub *CallCountingManager::CallCountingStubAllocator::Allocate } } - #ifdef _TARGET_AMD64_ + #ifdef TARGET_AMD64 sizeInBytes = sizeof(CallCountingStubLong); void *allocationAddress = (void *)heap->AllocAlignedMem(sizeInBytes, CallCountingStub::Alignment); stub = new(allocationAddress) CallCountingStubLong(remainingCallCountCell, targetForMethod); diff --git a/src/coreclr/src/vm/callhelpers.cpp b/src/coreclr/src/vm/callhelpers.cpp index 4752dc015adfe..935eac92b8672 100644 --- a/src/coreclr/src/vm/callhelpers.cpp +++ b/src/coreclr/src/vm/callhelpers.cpp @@ -73,7 +73,7 @@ void CallDescrWorkerWithHandler( } -#if !defined(BIT64) && defined(_DEBUG) +#if !defined(HOST_64BIT) && defined(_DEBUG) //******************************************************************************* // assembly code, in i386/asmhelpers.asm @@ -119,7 +119,7 @@ void CallDescrWorker(CallDescrData * pCallDescrData) _ASSERTE(!curThread->HasUnbreakableLock() && (curThread->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM _ASSERTE(IsThumbCode(pCallDescrData->pTarget)); #endif @@ -132,7 +132,7 @@ void CallDescrWorker(CallDescrData * pCallDescrData) ENABLESTRESSHEAP(); } -#endif // !defined(BIT64) && defined(_DEBUG) +#endif // !defined(HOST_64BIT) && defined(_DEBUG) void DispatchCallDebuggerWrapper( CallDescrData * pCallDescrData, @@ -556,7 +556,7 @@ void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT * _ASSERTE((DWORD)cbReturnValue <= sizeof(callDescrData.returnValue)); memcpyNoGCRefs(pReturnValue, &callDescrData.returnValue, cbReturnValue); -#if !defined(BIT64) && BIGENDIAN +#if !defined(HOST_64BIT) && BIGENDIAN { GCX_FORBID(); @@ -565,7 +565,7 @@ void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT * pReturnValue[0] >>= 32; } } -#endif // !defined(BIT64) && BIGENDIAN +#endif // !defined(HOST_64BIT) && BIGENDIAN } } diff --git a/src/coreclr/src/vm/callhelpers.h b/src/coreclr/src/vm/callhelpers.h index 81a262d1de397..7f4446232aa63 100644 --- a/src/coreclr/src/vm/callhelpers.h +++ b/src/coreclr/src/vm/callhelpers.h @@ -39,7 +39,7 @@ struct CallDescrData // Return value // #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // Use NEON128 to ensure proper alignment for vectors. DECLSPEC_ALIGN(16) NEON128 returnValue[ENREGISTERED_RETURNTYPE_MAXSIZE / sizeof(NEON128)]; #else @@ -57,7 +57,7 @@ struct CallDescrData extern "C" void STDCALL CallDescrWorkerInternal(CallDescrData * pCallDescrData); -#if !defined(BIT64) && defined(_DEBUG) +#if !defined(HOST_64BIT) && defined(_DEBUG) void CallDescrWorker(CallDescrData * pCallDescrData); #else #define CallDescrWorker(pCallDescrData) CallDescrWorkerInternal(pCallDescrData) @@ -485,7 +485,7 @@ void FillInRegTypeMap(int argOffset, CorElementType typ, BYTE * pMap); /* Macros used to indicate a call to managed code is starting/ending */ /***********************************************************************/ -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Install a native exception holder that doesn't catch any exceptions but its presence // in a stack range of native frames indicates that there was a call from native to // managed code. It is used by the DispatchManagedException to detect the case when @@ -495,9 +495,9 @@ void FillInRegTypeMap(int argOffset, CorElementType typ, BYTE * pMap); #define INSTALL_CALL_TO_MANAGED_EXCEPTION_HOLDER() \ NativeExceptionHolderNoCatch __exceptionHolder; \ __exceptionHolder.Push(); -#else // FEATURE_PAL +#else // TARGET_UNIX #define INSTALL_CALL_TO_MANAGED_EXCEPTION_HOLDER() -#endif // FEATURE_PAL +#endif // TARGET_UNIX enum EEToManagedCallFlags { @@ -633,7 +633,7 @@ enum DispatchCallSimpleFlags #ifdef CALLDESCR_ARGREGS -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Arguments on x86 are passed backward #define ARGNUM_0 1 diff --git a/src/coreclr/src/vm/callingconvention.h b/src/coreclr/src/vm/callingconvention.h index ab54ad8640b77..d4b0996994d99 100644 --- a/src/coreclr/src/vm/callingconvention.h +++ b/src/coreclr/src/vm/callingconvention.h @@ -62,15 +62,15 @@ struct ArgLocDesc } } #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) unsigned m_hfaFieldSize; // Size of HFA field in bytes. void setHFAFieldSize(CorElementType hfaType) { m_hfaFieldSize = getHFAFieldSize(hfaType); } -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) BOOL m_fRequires64BitAlignment; // True if the argument should always be aligned (in registers or on the stack #endif @@ -88,12 +88,12 @@ struct ArgLocDesc m_cGenReg = 0; m_idxStack = -1; m_cStack = 0; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) m_fRequires64BitAlignment = FALSE; #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) m_hfaFieldSize = 0; -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) #if defined(UNIX_AMD64_ABI) m_eeClass = NULL; #endif @@ -106,17 +106,17 @@ struct ArgLocDesc // struct TransitionBlock { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) ArgumentRegisters m_argumentRegisters; CalleeSavedRegisters m_calleeSavedRegisters; TADDR m_ReturnAddress; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI ArgumentRegisters m_argumentRegisters; #endif CalleeSavedRegisters m_calleeSavedRegisters; TADDR m_ReturnAddress; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) union { CalleeSavedRegisters m_calleeSavedRegisters; // alias saved link register as m_ReturnAddress @@ -127,7 +127,7 @@ struct TransitionBlock }; }; ArgumentRegisters m_argumentRegisters; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) union { CalleeSavedRegisters m_calleeSavedRegisters; struct { @@ -152,7 +152,7 @@ struct TransitionBlock return offsetof(TransitionBlock, m_ReturnAddress); } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 static int GetOffsetOfRetBuffArgReg() { LIMITED_METHOD_CONTRACT; @@ -182,7 +182,7 @@ struct TransitionBlock { LIMITED_METHOD_CONTRACT; int offs; -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) offs = sizeof(TransitionBlock); #else offs = offsetof(TransitionBlock, m_argumentRegisters); @@ -212,7 +212,7 @@ struct TransitionBlock return offset >= ofsArgRegs && offset < (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE); } -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 static UINT GetArgumentIndexFromOffset(int offset) { LIMITED_METHOD_CONTRACT; @@ -278,7 +278,7 @@ struct TransitionBlock #ifdef CALLDESCR_FPARGREGS negSpaceSize += sizeof(FloatArgumentRegisters); #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM negSpaceSize += TARGET_POINTER_SIZE; // padding to make FloatArgumentRegisters address 8-byte aligned #endif return negSpaceSize; @@ -336,7 +336,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE UINT size = SizeOfArgStack(); -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // The argument registers are not included in the stack size on AMD64 size += ARGUMENTREGISTERS_SIZE; #endif @@ -346,7 +346,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE //------------------------------------------------------------------------ -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 UINT CbStackPop() { WRAPPER_NO_CONTRACT; @@ -376,7 +376,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE return m_dwFlags >> RETURN_FP_SIZE_SHIFT; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //========================================================================= // Indicates whether an argument is to be put in a register using the // default IL calling convention. This should be called on each parameter @@ -403,7 +403,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE return(FALSE); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE) @@ -419,9 +419,9 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE _ASSERTE(th.IsValueType()); size_t size = th.GetSize(); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return IsArgPassedByRef(size); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Composites greater than 16 bytes are passed by reference return ((size > ENREGISTERED_PARAMTYPE_MAXSIZE) && !th.IsHFA()); #else @@ -430,7 +430,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE #endif } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // This overload should only be used in AMD64-specific code only. static BOOL IsArgPassedByRef(size_t size) { @@ -445,14 +445,14 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE return (size > ENREGISTERED_PARAMTYPE_MAXSIZE) || ((size & (size-1)) != 0); #endif } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // This overload should be used for varargs only. static BOOL IsVarArgPassedByRef(size_t size) { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI PORTABILITY_ASSERT("ArgIteratorTemplate::IsVarArgPassedByRef"); return FALSE; @@ -469,9 +469,9 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return IsArgPassedByRef(m_argSize); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (m_argType == ELEMENT_TYPE_VALUETYPE) { _ASSERTE(!m_argTypeHandle.IsNull()); @@ -523,7 +523,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE void ForceSigWalk(); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // Accessors for built in argument descriptions of the special implicit parameters not mentioned directly // in signatures (this pointer and the like). Whether or not these can be used successfully before all the // explicit arguments have been scanned is platform dependent. @@ -535,18 +535,18 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE void GetRetBuffArgLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetRetBuffArgOffset(), pLoc); } #endif -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 ArgLocDesc* GetArgLocDescForStructInRegs() { -#if defined(UNIX_AMD64_ABI) || defined (_TARGET_ARM64_) +#if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) return m_hasArgLocDescForStructInRegs ? &m_argLocDescForStructInRegs : NULL; #else return NULL; #endif } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { @@ -569,7 +569,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE } #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { @@ -610,9 +610,9 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE pLoc->m_cStack = cSlots; } } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { @@ -647,7 +647,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE cSlots = 1; } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // Sanity check to make sure no caller is trying to get an ArgLocDesc that // describes the return buffer reg field that's in the TransitionBlock. _ASSERTE(argOffset != TransitionBlock::GetOffsetOfRetBuffArgReg()); @@ -664,9 +664,9 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE pLoc->m_cStack = cSlots; } } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc* pLoc) { @@ -726,7 +726,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE pLoc->m_cStack = (argOnStackSize + STACK_ELEM_SIZE - 1) / STACK_ELEM_SIZE; } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 protected: DWORD m_dwFlags; // Cached flags @@ -738,17 +738,17 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE CorElementType m_argType; int m_argSize; TypeHandle m_argTypeHandle; -#if (defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI)) || defined(_TARGET_ARM64_) +#if (defined(TARGET_AMD64) && defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) ArgLocDesc m_argLocDescForStructInRegs; bool m_hasArgLocDescForStructInRegs; -#endif // (_TARGET_AMD64_ && UNIX_AMD64_ABI) || _TARGET_ARM64_ +#endif // (TARGET_AMD64 && UNIX_AMD64_ABI) || TARGET_ARM64 -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 int m_curOfs; // Current position of the stack iterator int m_numRegistersUsed; #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI int m_idxGenReg; // Next general register to be assigned a value int m_idxStack; // Next stack slot to be assigned a value @@ -759,7 +759,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE #endif #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM int m_idxGenReg; // Next general register to be assigned a value int m_idxStack; // Next stack slot to be assigned a value @@ -767,7 +767,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE bool m_fRequires64BitAlignment; // Cached info about the current arg #endif -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 int m_idxGenReg; // Next general register to be assigned a value int m_idxStack; // Next stack slot to be assigned a value int m_idxFPReg; // Next FP register to be assigned a value @@ -779,7 +779,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE RETURN_FLAGS_COMPUTED = 0x0004, RETURN_HAS_RET_BUFFER = 0x0008, // Cached value of HasRetBuffArg -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 PARAM_TYPE_REGISTER_MASK = 0x0030, PARAM_TYPE_REGISTER_STACK = 0x0010, PARAM_TYPE_REGISTER_ECX = 0x0020, @@ -793,7 +793,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE void ComputeReturnFlags(); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 void GetSimpleLoc(int offset, ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; @@ -820,7 +820,7 @@ int ArgIteratorTemplate::GetThisOffset() // This pointer is in the first argument register by default int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // x86 is special as always ret += offsetof(ArgumentRegisters, ECX); #endif @@ -838,10 +838,10 @@ int ArgIteratorTemplate::GetRetBuffArgOffset() // RetBuf arg is in the second argument register by default int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); -#if _TARGET_X86_ +#if TARGET_X86 // x86 is special as always ret += this->HasThis() ? offsetof(ArgumentRegisters, EDX) : offsetof(ArgumentRegisters, ECX); -#elif _TARGET_ARM64_ +#elif TARGET_ARM64 ret = TransitionBlock::GetOffsetOfRetBuffArgReg(); #else if (this->HasThis()) @@ -858,7 +858,7 @@ int ArgIteratorTemplate::GetVASigCookieOffset() _ASSERTE(this->IsVarArg()); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // x86 is special as always return sizeof(TransitionBlock); #else @@ -897,7 +897,7 @@ int ArgIteratorTemplate::GetParamTypeArgOffset() _ASSERTE(this->HasParamType()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // x86 is special as always if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED)) ForceSigWalk(); @@ -959,14 +959,14 @@ int ArgIteratorTemplate::GetNextOffset() _ASSERTE(!this->IsVarArg() || !this->HasParamType()); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 if (this->IsVarArg() || this->HasParamType()) { numRegistersUsed++; } #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (this->IsVarArg()) { numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs @@ -997,7 +997,7 @@ int ArgIteratorTemplate::GetNextOffset() m_curOfs = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack(); #endif -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI m_idxGenReg = numRegistersUsed; m_idxStack = 0; @@ -1005,12 +1005,12 @@ int ArgIteratorTemplate::GetNextOffset() #else m_curOfs = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *); #endif -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) m_idxGenReg = numRegistersUsed; m_idxStack = 0; m_wFPRegs = 0; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) m_idxGenReg = numRegistersUsed; m_idxStack = 0; @@ -1037,11 +1037,11 @@ int ArgIteratorTemplate::GetNextOffset() m_argSize = argSize; m_argTypeHandle = thValueType; -#if defined(UNIX_AMD64_ABI) || defined (_TARGET_ARM64_) +#if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) m_hasArgLocDescForStructInRegs = false; #endif -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #ifdef FEATURE_INTERPRETER if (m_fUnmanagedCallConv) { @@ -1058,7 +1058,7 @@ int ArgIteratorTemplate::GetNextOffset() m_curOfs -= StackElemSize(argSize); _ASSERTE(m_curOfs >= TransitionBlock::GetOffsetOfArgs()); return m_curOfs; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI m_fArgInRegisters = true; @@ -1162,7 +1162,7 @@ int ArgIteratorTemplate::GetNextOffset() m_curOfs += sizeof(void *); return argOfs; #endif -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // First look at the underlying type of the argument to determine some basic properties: // 1) The size of the argument in bytes (rounded up to the stack slot size of 4 if necessary). // 2) Whether the argument represents a floating point primitive (ELEMENT_TYPE_R4 or ELEMENT_TYPE_R8). @@ -1330,7 +1330,7 @@ int ArgIteratorTemplate::GetNextOffset() m_idxStack += cArgSlots; return argOfs; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) int cFPRegs = 0; @@ -1541,7 +1541,7 @@ void ArgIteratorTemplate::ComputeReturnFlags() size_t size = thValueType.GetSize(); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // Return value types of size which are not powers of 2 using a RetBuffArg if ((size & (size-1)) != 0) { @@ -1583,7 +1583,7 @@ void ArgIteratorTemplate::ForceSigWalk() // This can be only used before the actual argument iteration started _ASSERTE((m_dwFlags & ITERATION_STARTED) == 0); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // // x86 is special as always // @@ -1664,7 +1664,7 @@ void ArgIteratorTemplate::ForceSigWalk() m_dwFlags |= paramTypeFlags; } -#else // _TARGET_X86_ +#else // TARGET_X86 int maxOffset = TransitionBlock::GetOffsetOfArgs(); @@ -1673,7 +1673,7 @@ void ArgIteratorTemplate::ForceSigWalk() { int stackElemSize; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI if (m_fArgInRegisters) { @@ -1687,13 +1687,13 @@ void ArgIteratorTemplate::ForceSigWalk() // than a stack slot are passed by reference. stackElemSize = STACK_ELEM_SIZE; #endif // UNIX_AMD64_ABI -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 stackElemSize = StackElemSize(GetArgSize()); #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE) if (IsArgPassedByRef()) stackElemSize = STACK_ELEM_SIZE; #endif -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 int endOfs = ofs + stackElemSize; if (endOfs > maxOffset) @@ -1718,12 +1718,12 @@ void ArgIteratorTemplate::ForceSigWalk() int nSizeOfArgStack = maxOffset - TransitionBlock::GetOffsetOfArgs(); -#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) +#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) nSizeOfArgStack = (nSizeOfArgStack > (int)sizeof(ArgumentRegisters)) ? (nSizeOfArgStack - sizeof(ArgumentRegisters)) : 0; #endif -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Cache the result m_nSizeOfArgStack = nSizeOfArgStack; @@ -1855,7 +1855,7 @@ inline BOOL HasRetBuffArgUnmanagedFixup(MetaSig * pSig) inline BOOL IsRetBuffPassedAsFirstArg() { WRAPPER_NO_CONTRACT; -#ifndef _TARGET_ARM64_ +#ifndef TARGET_ARM64 return TRUE; #else return FALSE; diff --git a/src/coreclr/src/vm/callsiteinspect.cpp b/src/coreclr/src/vm/callsiteinspect.cpp index 8a41ef6738ece..ee0de665c24c7 100644 --- a/src/coreclr/src/vm/callsiteinspect.cpp +++ b/src/coreclr/src/vm/callsiteinspect.cpp @@ -194,7 +194,7 @@ namespace ty = pSig->GetRetTypeHandleThrowing(); _ASSERTE((*src) != NULL || Nullable::IsNullableType(ty)); -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX // Unboxing on non-Windows ABIs must be special cased COMPlusThrowHR(COR_E_NOTSUPPORTED); #else @@ -289,7 +289,7 @@ namespace break; } -#if !defined(BIT64) && BIGENDIAN +#if !defined(HOST_64BIT) && BIGENDIAN if (fEndianessFixup) ret <<= 32; #endif diff --git a/src/coreclr/src/vm/castcache.cpp b/src/coreclr/src/vm/castcache.cpp index b1f14b43c70d1..5465d82ac0013 100644 --- a/src/coreclr/src/vm/castcache.cpp +++ b/src/coreclr/src/vm/castcache.cpp @@ -66,7 +66,7 @@ BASEARRAYREF CastCache::CreateCastCache(DWORD size) // Fibonacci hash reduces the value into desired range by shifting right by the number of leading zeroes in 'size-1' DWORD bitCnt; -#if BIT64 +#if HOST_64BIT BitScanReverse64(&bitCnt, size - 1); HashShift(table) = (BYTE)(63 - bitCnt); #else diff --git a/src/coreclr/src/vm/castcache.h b/src/coreclr/src/vm/castcache.h index 2cdc2d13bd50e..18d1564614bee 100644 --- a/src/coreclr/src/vm/castcache.h +++ b/src/coreclr/src/vm/castcache.h @@ -261,7 +261,7 @@ class CastCache // we do `rotl(source, ) ^ target` for mixing inputs. // then we use fibonacci hashing to reduce the value to desired size. -#if BIT64 +#if HOST_64BIT UINT64 hash = (((UINT64)source << 32) | ((UINT64)source >> 32)) ^ (UINT64)target; return (DWORD)((hash * 11400714819323198485llu) >> HashShift(table)); #else diff --git a/src/coreclr/src/vm/ceeload.cpp b/src/coreclr/src/vm/ceeload.cpp index 0dce1be99d8ad..3ff65d67432e5 100644 --- a/src/coreclr/src/vm/ceeload.cpp +++ b/src/coreclr/src/vm/ceeload.cpp @@ -83,13 +83,13 @@ #pragma warning(disable:4244) #endif // _MSC_VER -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define COR_VTABLE_PTRSIZED COR_VTABLE_64BIT #define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_32BIT -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT #define COR_VTABLE_PTRSIZED COR_VTABLE_32BIT #define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_64BIT -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT #define CEE_FILE_GEN_GROWTH_COLLECTIBLE 2048 @@ -6268,7 +6268,7 @@ using GetTokenForVTableEntry_t = mdToken(STDMETHODCALLTYPE*)(HMODULE module, BYT static HMODULE GetIJWHostForModule(Module* module) { -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) PEDecoder* pe = module->GetFile()->GetLoadedIL(); BYTE* baseAddress = (BYTE*)module->GetFile()->GetIJWBase(); @@ -9868,7 +9868,7 @@ void Module::RestoreMethodTablePointerRaw(MethodTable ** ppMT, if (CORCOMPILE_IS_POINTER_TAGGED(fixup)) { -#ifdef BIT64 +#ifdef HOST_64BIT CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0); #endif @@ -10082,7 +10082,7 @@ PTR_Module Module::RestoreModulePointerIfLoaded(DPTR(RelativeFixupPointer>32) == 0); #endif @@ -10133,7 +10133,7 @@ void Module::RestoreModulePointer(RelativeFixupPointer * ppModule, M if (CORCOMPILE_IS_POINTER_TAGGED(fixup)) { -#ifdef BIT64 +#ifdef HOST_64BIT CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0); #endif @@ -10194,7 +10194,7 @@ void Module::RestoreTypeHandlePointerRaw(TypeHandle *pHandle, Module* pContainin if (CORCOMPILE_IS_POINTER_TAGGED(fixup)) { -#ifdef BIT64 +#ifdef HOST_64BIT CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0); #endif @@ -10313,7 +10313,7 @@ void Module::RestoreMethodDescPointerRaw(PTR_MethodDesc * ppMD, Module *pContain { GCX_PREEMP(); -#ifdef BIT64 +#ifdef HOST_64BIT CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0); #endif @@ -10410,7 +10410,7 @@ void Module::RestoreFieldDescPointer(RelativeFixupPointer * ppFD) if (CORCOMPILE_IS_POINTER_TAGGED(fixup)) { -#ifdef BIT64 +#ifdef HOST_64BIT CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0); #endif @@ -10650,12 +10650,12 @@ CORCOMPILE_DEBUG_ENTRY Module::GetMethodDebugInfoOffset(MethodDesc *pMD) DWORD codeRVA = GetNativeImage()-> GetDataRva((const TADDR)pMD->GetNativeCode()); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Since the Thumb Bit is set on ARM, the RVA calculated above will have it set as well // and will result in the failure of checks in the loop below. Hence, mask off the // bit before proceeding ahead. codeRVA = ThumbCodeToDataPointer(codeRVA); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM for (;;) { @@ -11798,7 +11798,7 @@ void SaveManagedCommandLine(LPCWSTR pwzAssemblyPath, int argc, LPCWSTR *argv) // Get the command line. LPCWSTR osCommandLine = GetCommandLineW(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // On Windows, osCommandLine contains the executable and all arguments. s_pCommandLine = osCommandLine; #else diff --git a/src/coreclr/src/vm/ceeload.h b/src/coreclr/src/vm/ceeload.h index e9fa1ebac0c87..f4a0b05120dd9 100644 --- a/src/coreclr/src/vm/ceeload.h +++ b/src/coreclr/src/vm/ceeload.h @@ -98,13 +98,13 @@ extern VerboseLevel g_CorCompileVerboseLevel; #define GUID_TO_TYPE_HASH_BUCKETS 16 // The native symbol reader dll name -#if defined(_AMD64_) +#if defined(HOST_AMD64) #define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.amd64.dll") -#elif defined(_X86_) +#elif defined(HOST_X86) #define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.x86.dll") -#elif defined(_ARM_) +#elif defined(HOST_ARM) #define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm.dll") -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) // Use diasymreader until the package has an arm64 version - issue #7360 //#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm64.dll") #define NATIVE_SYMBOL_READER_DLL W("diasymreader.dll") @@ -206,12 +206,12 @@ enum { kLookupMapIndexStride = 0x10, // The range of table entries covered by one index entry (power of two for faster hash lookup) kBitsPerRVA = sizeof(DWORD) * 8, // Bits in an (uncompressed) table value RVA (RVAs // currently still 32-bit even on 64-bit platforms) -#ifdef BIT64 +#ifdef HOST_64BIT kFlagBits = 3, // Number of bits at the bottom of a value // pointer that may be used for flags -#else // BIT64 +#else // HOST_64BIT kFlagBits = 2, -#endif // BIT64 +#endif // HOST_64BIT }; diff --git a/src/coreclr/src/vm/ceemain.cpp b/src/coreclr/src/vm/ceemain.cpp index 5173d9f27780b..05e420f22d78d 100644 --- a/src/coreclr/src/vm/ceemain.cpp +++ b/src/coreclr/src/vm/ceemain.cpp @@ -167,9 +167,9 @@ #include "disassembler.h" #include "jithost.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "dwreport.h" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #include "stringarraylist.h" #include "stubhelpers.h" @@ -217,10 +217,10 @@ #include "diagnosticserver.h" #include "eventpipe.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Included for referencing __security_cookie #include "process.h" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_GDBJIT #include "gdbjit.h" @@ -319,7 +319,7 @@ HRESULT EnsureEEStarted(COINITIEE flags) AppX::SetIsAppXProcess(!!(startupFlags & STARTUP_APPX_APP_MODEL)); #endif -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // The sooner we do this, the sooner we avoid probing registry entries. // (Perf Optimization for VSWhidbey:113373.) REGUTIL::InitOptionalConfigCache(); @@ -399,7 +399,7 @@ HRESULT EnsureEEStarted(COINITIEE flags) #ifndef CROSSGEN_COMPILE -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // This is our Ctrl-C, Ctrl-Break, etc. handler. static BOOL WINAPI DbgCtrlCHandler(DWORD dwCtrlType) { @@ -510,12 +510,12 @@ void InitGSCookie() volatile GSCookie * pGSCookiePtr = GetProcessGSCookiePtr(); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // On Unix, the GS cookie is stored in a read only data segment DWORD newProtection = PAGE_READWRITE; -#else // FEATURE_PAL +#else // TARGET_UNIX DWORD newProtection = PAGE_EXECUTE_READWRITE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX DWORD oldProtection; if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), newProtection, &oldProtection)) @@ -523,12 +523,12 @@ void InitGSCookie() ThrowLastError(); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // PAL layer is unable to extract old protection for regions that were not allocated using VirtualAlloc oldProtection = PAGE_READONLY; -#endif // FEATURE_PAL +#endif // TARGET_UNIX -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // The GSCookie cannot be in a writeable page assert(((oldProtection & (PAGE_READWRITE|PAGE_WRITECOPY|PAGE_EXECUTE_READWRITE| PAGE_EXECUTE_WRITECOPY|PAGE_WRITECOMBINE)) == 0)); @@ -538,10 +538,10 @@ void InitGSCookie() pf = NULL; GSCookie val = (GSCookie)(__security_cookie ^ GetTickCount()); -#else // !FEATURE_PAL +#else // !TARGET_UNIX // REVIEW: Need something better for PAL... GSCookie val = (GSCookie)GetTickCount(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef _DEBUG // In _DEBUG, always use the same value to make it easier to search for the cookie @@ -610,7 +610,7 @@ do { \ #ifndef CROSSGEN_COMPILE -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX void EESocketCleanupHelper() { CONTRACTL @@ -630,7 +630,7 @@ void EESocketCleanupHelper() DiagnosticServer::Shutdown(); #endif // FEATURE_PERFTRACING } -#endif // FEATURE_PAL +#endif // TARGET_UNIX #endif // CROSSGEN_COMPILE void EEStartupHelper(COINITIEE fFlags) @@ -657,7 +657,7 @@ void EEStartupHelper(COINITIEE fFlags) #ifndef CROSSGEN_COMPILE -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX ::SetConsoleCtrlHandler(DbgCtrlCHandler, TRUE/*add*/); #endif @@ -674,9 +674,9 @@ void EEStartupHelper(COINITIEE fFlags) // Need to do this as early as possible. Used by creating object handle // table inside Ref_Initialization() before GC is initialized. NumaNodeInfo::InitNumaNodeInfo(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX CPUGroupInfo::EnsureInitialized(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Initialize global configuration settings based on startup flags // This needs to be done before the EE has started @@ -696,9 +696,9 @@ void EEStartupHelper(COINITIEE fFlags) #endif // FEATURE_PERFTRACING -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_SetShutdownCallback(EESocketCleanupHelper); -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_GDBJIT // Initialize gdbjit @@ -746,9 +746,9 @@ void EEStartupHelper(COINITIEE fFlags) STRESS_LOG0(LF_STARTUP, LL_ALWAYS, "===================EEStartup Starting==================="); #ifndef CROSSGEN_COMPILE -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX IfFailGoLog(EnsureRtlFunctions()); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX InitEventStore(); #endif @@ -800,7 +800,7 @@ void EEStartupHelper(COINITIEE fFlags) // Cross-process named objects are not supported in PAL // (see CorUnix::InternalCreateEvent - src/pal/src/synchobj/event.cpp.272) -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) // Initialize the sweeper thread. if (g_pConfig->GetZapBBInstr() != NULL) { @@ -814,7 +814,7 @@ void EEStartupHelper(COINITIEE fFlags) _ASSERTE(hBBSweepThread); g_BBSweep.SetBBSweepThreadHandle(hBBSweepThread); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_INTERPRETER Interpreter::Initialize(); @@ -822,7 +822,7 @@ void EEStartupHelper(COINITIEE fFlags) StubManager::InitializeStubManagers(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX { // Record mscorwks geometry PEDecoder pe(g_pMSCorEE); @@ -831,7 +831,7 @@ void EEStartupHelper(COINITIEE fFlags) g_runtimeVirtualSize = (SIZE_T)pe.GetVirtualSize(); InitCodeAllocHint(g_runtimeLoadedBaseAddress, g_runtimeVirtualSize, GetRandomInt(64)); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // CROSSGEN_COMPILE @@ -884,12 +884,12 @@ void EEStartupHelper(COINITIEE fFlags) #ifndef CROSSGEN_COMPILE -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (!RegisterOutOfProcessWatsonCallbacks()) { IfFailGo(E_FAIL); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef DEBUGGING_SUPPORTED if(!NingenEnabled()) @@ -1145,7 +1145,7 @@ HRESULT EEStartup(COINITIEE fFlags) { #ifndef CROSSGEN_COMPILE InitializeClrNotifications(); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX InitializeJITNotificationTable(); DacGlobals::Initialize(); #endif @@ -2248,15 +2248,15 @@ static HRESULT GetThreadUICultureNames(__inout StringArrayList* pCultureNames) SIZE_T cchParentCultureName=LOCALE_NAME_MAX_LENGTH; sCulture.Set(id); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (!::GetLocaleInfoEx((LPCWSTR)sCulture, LOCALE_SPARENT, sParentCulture.OpenUnicodeBuffer(static_cast(cchParentCultureName)),static_cast(cchParentCultureName))) { hr = HRESULT_FROM_GetLastError(); } sParentCulture.CloseBuffer(); -#else // !FEATURE_PAL +#else // !TARGET_UNIX sParentCulture = sCulture; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } // (LPCWSTR) to restrict the size to null terminated size pCultureNames->AppendIfNotThere((LPCWSTR)sCulture); @@ -2363,18 +2363,18 @@ static int GetThreadUICultureId(__out LocaleIDValue* pLocale) #endif if (Result == 0) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // This thread isn't set up to use a non-default culture. Let's grab the default // one and return that. Result = ::GetUserDefaultLocaleName(*pLocale, LOCALE_NAME_MAX_LENGTH); _ASSERTE(Result != 0); -#else // !FEATURE_PAL +#else // !TARGET_UNIX static const WCHAR enUS[] = W("en-US"); memcpy(*pLocale, enUS, sizeof(enUS)); Result = sizeof(enUS); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } return Result; } diff --git a/src/coreclr/src/vm/cgensys.h b/src/coreclr/src/vm/cgensys.h index 623fbbb987c03..540ed6cc0d0d6 100644 --- a/src/coreclr/src/vm/cgensys.h +++ b/src/coreclr/src/vm/cgensys.h @@ -28,11 +28,11 @@ class ComPlusCallMethodDesc; void ResumeAtJit(PT_CONTEXT pContext, LPVOID oldFP); #endif -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) void ResumeAtJitEH (CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, Thread *pThread, BOOL unwindStack); int CallJitEHFilter (CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF thrownObj); void CallJitEHFinally(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel); -#endif // _TARGET_X86_ +#endif // TARGET_X86 //These are in util.cpp extern size_t GetLogicalProcessorCacheSizeFromOS(); @@ -86,7 +86,7 @@ extern "C" void STDCALL DelayLoad_Helper_ObjObj(); // Note that this information may be the least-common-denominator in the // case of a multi-proc machine. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo); #else inline void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo) @@ -97,9 +97,9 @@ inline void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo) cpuInfo->dwExtendedFeatures = 0; } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 -#if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && !defined(CROSSGEN_COMPILE) +#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && !defined(CROSSGEN_COMPILE) extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]); extern "C" DWORD __stdcall getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16]); extern "C" DWORD __stdcall xmmYmmStateSupport(); @@ -107,7 +107,7 @@ extern "C" DWORD __stdcall xmmYmmStateSupport(); inline bool TargetHasAVXSupport() { -#if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && !defined(CROSSGEN_COMPILE) +#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && !defined(CROSSGEN_COMPILE) unsigned char buffer[16]; // All x86/AMD64 targets support cpuid. (void) getcpuid(1, buffer); @@ -115,7 +115,7 @@ inline bool TargetHasAVXSupport() // It returns the resulting eax, ebx, ecx and edx (in that order) in buffer[]. // The AVX feature is ECX bit 28. return ((buffer[11] & 0x10) != 0); -#endif // (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && !defined(CROSSGEN_COMPILE) +#endif // (defined(TARGET_X86) || defined(TARGET_AMD64)) && !defined(CROSSGEN_COMPILE) return false; } @@ -152,7 +152,7 @@ BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDes // class ResetProcessorStateHolder { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) ULONG m_mxcsr; #endif @@ -160,17 +160,17 @@ class ResetProcessorStateHolder ResetProcessorStateHolder () { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) m_mxcsr = _mm_getcsr(); _mm_setcsr(0x1f80); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } ~ResetProcessorStateHolder () { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) _mm_setcsr(m_mxcsr); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } }; diff --git a/src/coreclr/src/vm/class.cpp b/src/coreclr/src/vm/class.cpp index 17a2e92d35f55..2b123af9d02df 100644 --- a/src/coreclr/src/vm/class.cpp +++ b/src/coreclr/src/vm/class.cpp @@ -988,9 +988,9 @@ CorElementType EEClass::ComputeInternalCorElementTypeForValueType(MethodTable * if (pMT->GetNumInstanceFields() == 1 && (!pMT->HasLayout() || pMT->GetNumInstanceFieldBytes() == 4 -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT || pMT->GetNumInstanceFieldBytes() == 8 -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT )) // Don't do the optimization if we're getting specified anything but the trivial layout. { FieldDesc * pFD = pMT->GetApproxFieldDescListRaw(); @@ -1022,10 +1022,10 @@ CorElementType EEClass::ComputeInternalCorElementTypeForValueType(MethodTable * case ELEMENT_TYPE_U: case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { return type; @@ -1181,7 +1181,7 @@ int MethodTable::GetVectorSize() { // This is supported for finding HVA types for Arm64. In order to support the altjit, // we support this on 64-bit platforms (i.e. Arm64 and X64). -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (IsIntrinsicType()) { LPCUTF8 namespaceName; @@ -1218,7 +1218,7 @@ int MethodTable::GetVectorSize() } } } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT return 0; } @@ -1334,7 +1334,7 @@ EEClass::CheckForHFA() { case ELEMENT_TYPE_VALUETYPE: { -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // hfa/hva types are unique by size, except for Vector64 which we can conveniently // treat as if it were a double for ABI purposes. However, it only qualifies as // an HVA if all fields are the same type. This will ensure that we only @@ -1359,7 +1359,7 @@ EEClass::CheckForHFA() } } else -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 { #if defined(FEATURE_HFA) fieldType = pByValueClassCache[i]->GetHFAType(); @@ -1419,7 +1419,7 @@ EEClass::CheckForHFA() case ELEMENT_TYPE_R8: elemSize = 8; break; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case ELEMENT_TYPE_VALUETYPE: // Should already have set elemSize, but be conservative if (elemSize == 0) @@ -1533,7 +1533,7 @@ CorElementType EEClassLayoutInfo::GetNativeHFATypeRaw() { case ELEMENT_TYPE_R4: elemSize = sizeof(float); break; case ELEMENT_TYPE_R8: elemSize = sizeof(double); break; -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case ELEMENT_TYPE_VALUETYPE: elemSize = 16; break; #endif default: _ASSERTE(!"Invalid HFA Type"); @@ -3886,7 +3886,7 @@ namespace { // Safe cast - no primitive type is larger than 4gb! pManagedPlacementInfo->m_size = ((UINT32)CorTypeInfo::Size(corElemType)); - #if defined(_TARGET_X86_) && defined(UNIX_X86_ABI) + #if defined(TARGET_X86) && defined(UNIX_X86_ABI) switch (corElemType) { // The System V ABI for i386 defines different packing for these types. @@ -3904,7 +3904,7 @@ namespace break; } } - #else // _TARGET_X86_ && UNIX_X86_ABI + #else // TARGET_X86 && UNIX_X86_ABI pManagedPlacementInfo->m_alignment = pManagedPlacementInfo->m_size; #endif diff --git a/src/coreclr/src/vm/class.h b/src/coreclr/src/vm/class.h index 8fff516471e2b..f4304e63c8f6f 100644 --- a/src/coreclr/src/vm/class.h +++ b/src/coreclr/src/vm/class.h @@ -2332,7 +2332,7 @@ inline PCODE GetPreStubEntryPoint() return GetEEFuncEntryPoint(ThePreStub); } -#if defined(HAS_COMPACT_ENTRYPOINTS) && defined(_TARGET_ARM_) +#if defined(HAS_COMPACT_ENTRYPOINTS) && defined(TARGET_ARM) EXTERN_C void STDCALL ThePreStubCompactARM(); @@ -2341,7 +2341,7 @@ inline PCODE GetPreStubCompactARMEntryPoint() return GetEEFuncEntryPoint(ThePreStubCompactARM); } -#endif // defined(HAS_COMPACT_ENTRYPOINTS) && defined(_TARGET_ARM_) +#endif // defined(HAS_COMPACT_ENTRYPOINTS) && defined(TARGET_ARM) PCODE TheUMThunkPreStub(); diff --git a/src/coreclr/src/vm/clrtocomcall.cpp b/src/coreclr/src/vm/clrtocomcall.cpp index 4859e548603c1..a5f8f1270108a 100644 --- a/src/coreclr/src/vm/clrtocomcall.cpp +++ b/src/coreclr/src/vm/clrtocomcall.cpp @@ -1148,12 +1148,12 @@ BOOL ComPlusMethodFrame::TraceFrame(Thread *thread, BOOL fromPatch, // Get the call site info // -#if defined(BIT64) +#if defined(HOST_64BIT) // Interop debugging is currently not supported on WIN64, so we always return FALSE. // The result is that you can't step into an unmanaged frame or step out to one. You // also can't step a breakpoint in one. return FALSE; -#endif // BIT64 +#endif // HOST_64BIT TADDR ip, returnIP, returnSP; GetUnmanagedCallSite(&ip, &returnIP, &returnSP); @@ -1186,7 +1186,7 @@ BOOL ComPlusMethodFrame::TraceFrame(Thread *thread, BOOL fromPatch, } #endif //CROSSGEN_COMPILE -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #ifndef DACCESS_COMPILE @@ -1254,4 +1254,4 @@ LPVOID ComPlusCall::GetRetThunk(UINT numStackBytes) #endif // !DACCESS_COMPILE -#endif // _TARGET_X86_ +#endif // TARGET_X86 diff --git a/src/coreclr/src/vm/clrtocomcall.h b/src/coreclr/src/vm/clrtocomcall.h index dca83703d2709..4ada3cb362557 100644 --- a/src/coreclr/src/vm/clrtocomcall.h +++ b/src/coreclr/src/vm/clrtocomcall.h @@ -34,14 +34,14 @@ class ComPlusCall static MethodDesc *GetWinRTFactoryMethodForCtor(MethodDesc *pMDCtor, BOOL *pComposition); static MethodDesc *GetWinRTFactoryMethodForStatic(MethodDesc *pMDStatic); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 static void Init(); static LPVOID GetRetThunk(UINT numStackBytes); -#endif // _TARGET_X86_ +#endif // TARGET_X86 private: ComPlusCall(); // prevent "new"'s on this class -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 struct RetThunkCacheElement { RetThunkCacheElement() @@ -68,7 +68,7 @@ class ComPlusCall static SHash *s_pRetThunkCache; static CrstStatic s_RetThunkCacheCrst; -#endif // _TARGET_X86_ +#endif // TARGET_X86 }; #endif // __COMPLUSCALL_H__ diff --git a/src/coreclr/src/vm/clrvarargs.cpp b/src/coreclr/src/vm/clrvarargs.cpp index 56f6c41364a5d..05194582e17c1 100644 --- a/src/coreclr/src/vm/clrvarargs.cpp +++ b/src/coreclr/src/vm/clrvarargs.cpp @@ -22,7 +22,7 @@ DWORD VARARGS::CalcVaListSize(VARARGS *data) // the value since it counts the fixed args as well as the varargs. But that's harmless. DWORD dwVaListSize = data->ArgCookie->sizeOfArgs; -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 dwVaListSize += ARGUMENTREGISTERS_SIZE; #endif return dwVaListSize; @@ -32,7 +32,7 @@ void VARARGS::MarshalToManagedVaList(va_list va, VARARGS *dataout) { WRAPPER_NO_CONTRACT -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX _ASSERTE(dataout != NULL); dataout->SigPtr = SigPointer(NULL, 0); dataout->ArgCookie = NULL; @@ -49,7 +49,7 @@ void VARARGS::MarshalToUnmanagedVaList( va_list va, DWORD cbVaListSize, const VARARGS * data) { -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX BYTE * pdstbuffer = (BYTE *)va; int remainingArgs = data->RemainingArgs; @@ -84,14 +84,14 @@ VARARGS::MarshalToUnmanagedVaList( cbSize = sizeof(void*); #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (cbSize == 8) { // 64-bit primitives come from and must be copied to 64-bit aligned locations. psrc = (BYTE*)ALIGN_UP(psrc, 8); pdst = (BYTE*)ALIGN_UP(pdst, 8); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef STACK_GROWS_DOWN_ON_ARGS_WALK psrc -= cbSize; diff --git a/src/coreclr/src/vm/codeman.cpp b/src/coreclr/src/vm/codeman.cpp index 88c8c00865694..9faaa002cfc79 100644 --- a/src/coreclr/src/vm/codeman.cpp +++ b/src/coreclr/src/vm/codeman.cpp @@ -33,10 +33,10 @@ #include "configuration.h" -#ifdef BIT64 +#ifdef HOST_64BIT #define CHECK_DUPLICATED_STRUCT_LAYOUTS #include "../debug/daccess/fntableaccess.h" -#endif // BIT64 +#endif // HOST_64BIT #ifdef FEATURE_PERFMAP #include "perfmap.h" @@ -82,7 +82,7 @@ unsigned ExecutionManager::m_LCG_JumpStubBlockFullCount; #endif // DACCESS_COMPILE -#if defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE) // We don't do this on ARM just amd64 +#if defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) // We don't do this on ARM just amd64 // Support for new style unwind information (to allow OS to stack crawl JIT compiled code). @@ -122,7 +122,7 @@ bool InitUnwindFtns() NOTHROW; } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (!RtlUnwindFtnsInited) { HINSTANCE hNtdll = WszGetModuleHandle(W("ntdll.dll")); @@ -154,9 +154,9 @@ bool InitUnwindFtns() RtlUnwindFtnsInited = true; } return (pRtlAddGrowableFunctionTable != NULL); -#else // !FEATURE_PAL +#else // !TARGET_UNIX return false; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } /****************************************************************************/ @@ -537,7 +537,7 @@ extern CrstStatic g_StubUnwindInfoHeapSegmentsCrst; } EX_END_CATCH(SwallowAllExceptions); } -#endif // defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE) +#endif // defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) /*----------------------------------------------------------------------------- This is a listing of which methods uses which synchronization mechanism @@ -817,7 +817,7 @@ ExecutionManager::DeleteRangeHelper //----------------------------------------------------------------------------- -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) #define EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS #endif @@ -837,7 +837,7 @@ BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry) _ASSERTE((pFunctionEntry->UnwindData & 3) == 0); // The unwind data must be an RVA; we don't support packed unwind format DWORD unwindHeader = *(PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData); _ASSERTE((0 == ((unwindHeader >> 18) & 3)) || !"unknown unwind data format, version != 0"); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // On ARM, It's assumed that the prolog is always at the beginning of the function and cannot be split. // Given that, there are 4 possible ways to fragment a function: @@ -852,7 +852,7 @@ BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry) _ASSERTE((pFunctionEntry->BeginAddress & THUMB_CODE) == THUMB_CODE); // Sanity check: it's a thumb address DWORD Fbit = (unwindHeader >> 22) & 0x1; // F "fragment" bit return (Fbit == 1); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // ARM64 is a little bit more flexible, in the sense that it supports partial prologs. However only one of the // prolog regions are allowed to alter SP and that's the Host Record. Partial prologs are used in ShrinkWrapping @@ -995,7 +995,7 @@ PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFuncti { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) PTR_UNWIND_INFO pUnwindInfo(dac_cast(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction))); *pSize = ALIGN_UP(offsetof(UNWIND_INFO, UnwindCode) + @@ -1005,14 +1005,14 @@ PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFuncti return pUnwindInfo; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) PTR_UNWIND_INFO pUnwindInfo(dac_cast(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction))); *pSize = sizeof(UNWIND_INFO); return pUnwindInfo; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // if this function uses packed unwind data then at least one of the two least significant bits // will be non-zero. if this is the case then there will be no xdata record to enumerate. @@ -1049,7 +1049,7 @@ PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFuncti *pSize = size; return xdata; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // if this function uses packed unwind data then at least one of the two least significant bits // will be non-zero. if this is the case then there will be no xdata record to enumerate. _ASSERTE((pRuntimeFunction->UnwindData & 0x3) == 0); @@ -1099,7 +1099,7 @@ TADDR IJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo) { PTR_RUNTIME_FUNCTION pFunctionEntry = pCodeInfo->GetFunctionEntry(); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); #endif @@ -1215,10 +1215,10 @@ EEJitManager::EEJitManager() m_pCodeHeap = NULL; m_jit = NULL; m_JITCompiler = NULL; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 m_pEmergencyJumpStubReserveList = NULL; #endif -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) m_JITCompilerOther = NULL; #endif @@ -1231,13 +1231,13 @@ EEJitManager::EEJitManager() m_cleanupList = NULL; } -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) bool DoesOSSupportAVX() { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)(); PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL; @@ -1258,12 +1258,12 @@ bool DoesOSSupportAVX() { return FALSE; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return TRUE; } -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) void EEJitManager::SetCpuInfo() { @@ -1276,7 +1276,7 @@ void EEJitManager::SetCpuInfo() CORJIT_FLAGS CPUCompileFlags; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // NOTE: if you're adding any flags here, you probably should also be doing it // for ngen (zapper.cpp) CORINFO_CPU cpuInfo; @@ -1302,9 +1302,9 @@ void EEJitManager::SetCpuInfo() { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE2); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // NOTE: The below checks are based on the information reported by // Intel® 64 and IA-32 Architectures Software Developer’s Manual. Volume 2 // and @@ -1475,17 +1475,17 @@ void EEJitManager::SetCpuInfo() CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_LZCNT); } } -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) static ConfigDWORD fFeatureSIMD; if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0) { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD); } -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) PAL_GetJitCpuCapabilityFlags(&CPUCompileFlags); -#elif defined(BIT64) +#elif defined(HOST_64BIT) // FP and SIMD support are enabled by default CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD); CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP); @@ -1501,8 +1501,8 @@ void EEJitManager::SetCpuInfo() { CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_CRC32); } -#endif // BIT64 -#endif // _TARGET_ARM64_ +#endif // HOST_64BIT +#endif // TARGET_ARM64 m_CPUCompileFlags = CPUCompileFlags; } @@ -1744,7 +1744,7 @@ BOOL EEJitManager::LoadJIT() #else // !FEATURE_MERGE_JIT_AND_ENGINE m_JITCompiler = NULL; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) m_JITCompilerOther = NULL; #endif @@ -1984,7 +1984,7 @@ void ThrowOutOfMemoryWithinRange() EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_OUT_OF_MEMORY_WITHIN_RANGE)); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE * EEJitManager::AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize) { CONTRACTL { @@ -2098,13 +2098,13 @@ VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SI m_pEmergencyJumpStubReserveList = pNewReserve.Extract(); } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize) { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) // // Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce // chance that we won't be able allocate jump stub because of lack of suitable address space. @@ -2177,14 +2177,14 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap // Conserve emergency jump stub reserve until when it is really needed if (!pInfo->getThrowOnOutOfMemoryWithinRange()) RETURN NULL; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 pBaseAddr = ExecutionManager::GetEEJitManager()->AllocateFromEmergencyJumpStubReserve(loAddr, hiAddr, &reserveSize); if (!pBaseAddr) ThrowOutOfMemoryWithinRange(); fAllocatedFromEmergencyJumpStubReserve = true; #else ThrowOutOfMemoryWithinRange(); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } } else @@ -2224,9 +2224,9 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap DBG_ADDR(pHp->startAddress), DBG_ADDR(pHp->startAddress+pHp->maxCodeHeapSize) )); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT emitJump((LPBYTE)pHp->CLRPersonalityRoutine, (void *)ProcessCLRException); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT pCodeHeap.SuppressRelease(); RETURN pHp; @@ -2271,7 +2271,7 @@ void CodeHeapRequestInfo::Init() #ifdef FEATURE_EH_FUNCLETS -#ifdef BIT64 +#ifdef HOST_64BIT extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG64 ControlPc, IN PVOID Context) #else @@ -2325,7 +2325,7 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi size_t initialRequestSize = pInfo->getRequestSize(); size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB) -#ifdef BIT64 +#ifdef HOST_64BIT if (pInfo->m_hiAddr == 0) { if (pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD) @@ -2556,7 +2556,7 @@ CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t re alignment = max(alignment, 16); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // when not optimizing for code size, 8-byte align the method entry point, so that // the JIT can in turn 8-byte align the loop entry headers. else if ((g_pConfig->GenOptimizeType() != OPT_SIZE)) @@ -2971,7 +2971,7 @@ void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment HeapList *pCodeHeap = NULL; CodeHeapRequestInfo requestInfo(NULL, pLoaderAllocator, NULL, NULL); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // CodeFragments are pretty much always Precodes that may need to be patched with jump stubs at some point in future // We will assume the worst case that every FixupPrecode will need to be patched and reserve the jump stubs accordingly requestInfo.setReserveForJumpStubs((blockSize / 8) * JUMP_ALLOCATE_SIZE); @@ -3179,10 +3179,10 @@ void EEJitManager::RemoveJitData (CodeHeader * pCHdr, size_t GCinfo_len, size_t pResolver->m_recordCodePointer = NULL; } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Remove the unwind information (if applicable) UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)codeStart); -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) HostCodeHeap* pHeap = HostCodeHeap::GetCodeHeap((TADDR)codeStart); FreeCodeMemory(pHeap, codeStart); @@ -3882,7 +3882,7 @@ PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo) if (RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) <= address && address < RUNTIME_FUNCTION__EndAddress(pFunctionEntry, baseAddress)) { -#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) && defined(_TARGET_ARM64_) +#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) && defined(TARGET_ARM64) // If we might have fragmented unwind, and we're on ARM64, make sure // to returning the root record, as the trailing records don't have // prolog unwind codes. @@ -4715,9 +4715,9 @@ void ExecutionManager::AddRangeHelper(TADDR pStartRange, pnewrange->flags = flags; pnewrange->pLastUsed = NULL; pnewrange->pHeapListOrZapModule = pHeapListOrZapModule; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) pnewrange->pUnwindInfoTable = NULL; -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) { CrstHolder ch(&m_RangeCrst); // Acquire the Crst before linking in a new RangeList @@ -4828,10 +4828,10 @@ void ExecutionManager::DeleteRange(TADDR pStartRange) // if (pCurr != NULL) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) if (pCurr->pUnwindInfoTable != 0) delete pCurr->pUnwindInfoTable; -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) delete pCurr; } } @@ -5131,7 +5131,7 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target, numJumpStubs = 4; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Note this these values are not requirements, instead we are // just confirming the values that are mentioned in the comments. _ASSERTE(BACK_TO_BACK_JUMP_ALLOCATE_SIZE == 12); @@ -5170,7 +5170,7 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target, _ASSERTE((curBlock->m_used < curBlock->m_allocated)); -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // 8-byte alignment is required on ARM64 _ASSERTE(((UINT_PTR)jumpStub & 7) == 0); #endif @@ -5254,7 +5254,7 @@ static void GetFuncletStartOffsetsHelper(PCODE pCodeStart, SIZE_T size, SIZE_T o // Entries are sorted and terminated by sentinel value (DWORD)-1 for (; RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) < endAddress; pFunctionEntry++) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); #endif @@ -5695,12 +5695,12 @@ BOOL NativeImageJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, #ifdef FEATURE_EH_FUNCLETS PTR_RUNTIME_FUNCTION RawColdFunctionEntry = ColdFunctionTable + RawColdMethodIndex; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if ((RawColdFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0) { RawColdFunctionEntry = PTR_RUNTIME_FUNCTION(ImageBase + (RawColdFunctionEntry->UnwindData & ~RUNTIME_FUNCTION_INDIRECT)); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 pCodeInfo->m_pFunctionEntry = RawColdFunctionEntry; #endif } @@ -5804,7 +5804,7 @@ TADDR NativeImageJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo) { LIMITED_METHOD_DAC_CONTRACT; -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(pCodeInfo->GetMethodToken())->GetNGenLayoutInfo(); if (pLayoutInfo->m_CodeSections[2].IsInRange(pCodeInfo->GetCodeAddress())) @@ -5847,7 +5847,7 @@ DWORD NativeImageJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodTok // There are no funclets in cold section on ARM yet // @ARMTODO: support hot/cold splitting in functions with EH -#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM) && !defined(TARGET_ARM64) if (regionInfo.coldSize != NULL) { NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo(); @@ -5862,7 +5862,7 @@ DWORD NativeImageJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodTok _ASSERTE(regionInfo.coldStartAddress == moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry)); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Skip cold part of the method body if ((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0) pFunctionEntry++; @@ -5872,7 +5872,7 @@ DWORD NativeImageJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodTok pFunctionEntry, moduleBase, &nFunclets, pStartFuncletOffsets, dwLength); } -#endif // !_TARGET_ARM_ && !_TARGET_ARM64 +#endif // !TARGET_ARM && !_TARGET_ARM64 return nFunclets; } @@ -6188,7 +6188,7 @@ int NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(DWORD RelativePc, } CONTRACTL_END; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM RelativePc |= THUMB_CODE; #endif diff --git a/src/coreclr/src/vm/codeman.h b/src/coreclr/src/vm/codeman.h index 009c88ff6b243..1fef7fcd195cc 100644 --- a/src/coreclr/src/vm/codeman.h +++ b/src/coreclr/src/vm/codeman.h @@ -478,9 +478,9 @@ typedef struct _HeapList size_t maxCodeHeapSize;// Size of the entire contiguous block of memory size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) BYTE CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE]; // jump thunk to personality routine -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) UINT32 CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE/sizeof(UINT32)]; // jump thunk to personality routine #endif @@ -533,7 +533,7 @@ class LoaderCodeHeap : CodeHeap #endif }; -#if defined(BIT64) +#if defined(HOST_64BIT) // On non X86 platforms, the OS defined UnwindInfo (accessed from RUNTIME_FUNCTION // structures) to support the ability unwind the stack. Unfortunatey the pre-Win8 // APIs defined a callback API for publishing this data dynamically that ETW does @@ -594,7 +594,7 @@ class UnwindInfoTable { int cDeletedEntries; // Number of slots we removed. }; -#endif // defined(BIT64) +#endif // defined(HOST_64BIT) //----------------------------------------------------------------------------- // The ExecutionManager uses RangeSection as the abstraction of a contiguous @@ -636,9 +636,9 @@ struct RangeSection // PTR_Module pZapModule; // valid if RANGE_SECTION_HEAP is not set // }; TADDR pHeapListOrZapModule; -#if defined(BIT64) +#if defined(HOST_64BIT) PTR_UnwindInfoTable pUnwindInfoTable; // Points to unwind information for this memory range. -#endif // defined(BIT64) +#endif // defined(HOST_64BIT) }; /*****************************************************************************/ @@ -1168,7 +1168,7 @@ private : CUnorderedArray m_DynamicDomainCodeHeaps; #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 private: // // List of reserved memory blocks to be used for jump stub allocation if no suitable memory block is found @@ -1191,7 +1191,7 @@ private : public: ICorJitCompiler * m_jit; HINSTANCE m_JITCompiler; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) HINSTANCE m_JITCompilerOther; // Stores the handle of the legacy JIT, if one is loaded. #endif @@ -1281,7 +1281,7 @@ class ExecutionManager BOOL Acquired(); }; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT static ULONG GetCLRPersonalityRoutineValue() { LIMITED_METHOD_CONTRACT; @@ -1289,7 +1289,7 @@ class ExecutionManager (size_t)((ULONG)offsetof(HeapList, CLRPersonalityRoutine))); return offsetof(HeapList, CLRPersonalityRoutine); } -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT static EEJitManager * GetEEJitManager() { @@ -1463,7 +1463,7 @@ class ExecutionManager static count_t Hash(key_t k) { LIMITED_METHOD_CONTRACT; -#ifdef BIT64 +#ifdef HOST_64BIT return (count_t) ((size_t) k ^ ((size_t) k >> 32)); #else return (count_t)(size_t)k; @@ -1851,9 +1851,9 @@ class EECodeInfo EECodeInfo GetMainFunctionInfo(); ULONG GetFixedStackSize(); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) BOOL HasFrameRegister(); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #else // FEATURE_EH_FUNCLETS ULONG GetFixedStackSize() @@ -1863,14 +1863,14 @@ class EECodeInfo } #endif // FEATURE_EH_FUNCLETS -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) void GetOffsetsFromUnwindInfo(ULONG* pRSPOffset, ULONG* pRBPOffset); #if defined(_DEBUG) && defined(HAVE_GCCOVER) // Find first funclet inside (pvFuncletStart, pvFuncletStart + cbCode) static LPVOID findNextFunclet (LPVOID pvFuncletStart, SIZE_T cbCode, LPVOID *ppvFuncletEnd); #endif // _DEBUG && HAVE_GCCOVER -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 private: PCODE m_codeAddress; @@ -1882,10 +1882,10 @@ class EECodeInfo PTR_RUNTIME_FUNCTION m_pFunctionEntry; #endif // FEATURE_EH_FUNCLETS -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Simple helper to return a pointer to the UNWIND_INFO given the offset to the unwind info. UNWIND_INFO * GetUnwindInfoHelper(ULONG unwindInfoOffset); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 }; #include "codeman.inl" diff --git a/src/coreclr/src/vm/comcallablewrapper.cpp b/src/coreclr/src/vm/comcallablewrapper.cpp index 29f4be4368e1a..c621eeb74304d 100644 --- a/src/coreclr/src/vm/comcallablewrapper.cpp +++ b/src/coreclr/src/vm/comcallablewrapper.cpp @@ -587,7 +587,7 @@ extern "C" PCODE ComPreStubWorker(ComPrestubMethodFrame *pPFrame, UINT64 *pError UINT_PTR* ppofs = (UINT_PTR*) (((BYTE*)pCMD) - COMMETHOD_CALL_PRESTUB_SIZE + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 *ppofs = ((UINT_PTR)pStub - (size_t)pCMD); #else *ppofs = ((UINT_PTR)pStub); @@ -617,7 +617,7 @@ extern "C" PCODE ComPreStubWorker(ComPrestubMethodFrame *pPFrame, UINT64 *pError else _ASSERTE(pCMD->IsNativeVoidRetVal()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Number of bytes to pop is upper half of the return value on x86 *(((INT32 *)pErrorReturn) + 1) = pCMD->GetNumStackBytes(); #endif @@ -2221,7 +2221,7 @@ ComCallWrapper* ComCallWrapper::CopyFromTemplate(ComCallWrapperTemplate* pTempla // alloc wrapper, aligned to cache line NewCCWHolder pStartWrapper(pWrapperCache); pStartWrapper = (ComCallWrapper*)pWrapperCache->GetCacheLineAllocator()-> -#ifdef BIT64 +#ifdef HOST_64BIT GetCacheLine64(); _ASSERTE(sizeof(ComCallWrapper) <= 64); #else @@ -2262,7 +2262,7 @@ ComCallWrapper* ComCallWrapper::CopyFromTemplate(ComCallWrapperTemplate* pTempla { // alloc wrapper, aligned 32 bytes ComCallWrapper* pNewWrapper = (ComCallWrapper*)pWrapperCache->GetCacheLineAllocator()-> -#ifdef BIT64 +#ifdef HOST_64BIT GetCacheLine64(); _ASSERTE(sizeof(ComCallWrapper) <= 64); #else @@ -2541,18 +2541,18 @@ void ComCallWrapper::FreeWrapper(ComCallWrapperCache *pWrapperCache) while (pWrap2 != NULL) { ComCallWrapper* pTempWrap = GetNext(pWrap2); - #ifdef BIT64 + #ifdef HOST_64BIT pWrapperCache->GetCacheLineAllocator()->FreeCacheLine64(pWrap2); - #else //BIT64 + #else //HOST_64BIT pWrapperCache->GetCacheLineAllocator()->FreeCacheLine32(pWrap2); - #endif //BIT64 + #endif //HOST_64BIT pWrap2 = pTempWrap; } - #ifdef BIT64 + #ifdef HOST_64BIT pWrapperCache->GetCacheLineAllocator()->FreeCacheLine64(this); - #else //BIT64 + #else //HOST_64BIT pWrapperCache->GetCacheLineAllocator()->FreeCacheLine32(this); - #endif //BIT64 + #endif //HOST_64BIT } // release ccw mgr diff --git a/src/coreclr/src/vm/comcallablewrapper.h b/src/coreclr/src/vm/comcallablewrapper.h index 693395727c225..51b6f88d87b86 100644 --- a/src/coreclr/src/vm/comcallablewrapper.h +++ b/src/coreclr/src/vm/comcallablewrapper.h @@ -983,7 +983,7 @@ class ComCallWrapper enum { NumVtablePtrs = 5, -#ifdef BIT64 +#ifdef HOST_64BIT enum_ThisMask = ~0x3f, // mask on IUnknown ** to get at the OBJECT-REF handle #else enum_ThisMask = ~0x1f, // mask on IUnknown ** to get at the OBJECT-REF handle @@ -1418,7 +1418,7 @@ public : #define GET_COM_REF(x) ((ULONG)((x) & SimpleComCallWrapper::COM_REFCOUNT_MASK)) #define GET_EXT_COM_REF(x) ((ULONG)((x) & SimpleComCallWrapper::EXT_COM_REFCOUNT_MASK)) -#ifdef BIT64 +#ifdef HOST_64BIT #define READ_REF(x) (x) #else #define READ_REF(x) (::InterlockedCompareExchange64((LONGLONG *)&x, 0, 0)) diff --git a/src/coreclr/src/vm/comdelegate.cpp b/src/coreclr/src/vm/comdelegate.cpp index 33ed446725bae..a2f0fd7e3f695 100644 --- a/src/coreclr/src/vm/comdelegate.cpp +++ b/src/coreclr/src/vm/comdelegate.cpp @@ -35,7 +35,7 @@ #ifndef DACCESS_COMPILE -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Return an encoded shuffle entry describing a general register or stack offset that needs to be shuffled. static UINT16 ShuffleOfs(INT ofs, UINT stackSizeDelta = 0) @@ -321,7 +321,7 @@ BOOL GenerateShuffleArrayPortable(MethodDesc* pMethodSrc, MethodDesc *pMethodDst UINT stackSizeDelta = 0; -#if defined(_TARGET_X86_) && !defined(UNIX_X86_ABI) +#if defined(TARGET_X86) && !defined(UNIX_X86_ABI) { UINT stackSizeSrc = sArgPlacerSrc.SizeOfArgStack(); UINT stackSizeDst = sArgPlacerDst.SizeOfArgStack(); @@ -336,7 +336,7 @@ BOOL GenerateShuffleArrayPortable(MethodDesc* pMethodSrc, MethodDesc *pMethodDst stackSizeDelta = stackSizeSrc - stackSizeDst; } -#endif // Callee pop architectures - defined(_TARGET_X86_) && !defined(UNIX_X86_ABI) +#endif // Callee pop architectures - defined(TARGET_X86) && !defined(UNIX_X86_ABI) INT ofsSrc; INT ofsDst; @@ -381,7 +381,7 @@ BOOL GenerateShuffleArrayPortable(MethodDesc* pMethodSrc, MethodDesc *pMethodDst { // The return buffer argument is implicit in both signatures. -#if !defined(_TARGET_ARM64_) || !defined(CALLDESCR_RETBUFFARGREG) +#if !defined(TARGET_ARM64) || !defined(CALLDESCR_RETBUFFARGREG) // The ifdef above disables this code if the ret buff arg is always in the same register, which // means that we don't need to do any shuffling for it. @@ -390,7 +390,7 @@ BOOL GenerateShuffleArrayPortable(MethodDesc* pMethodSrc, MethodDesc *pMethodDst if (!AddNextShuffleEntryToArray(sArgSrc, sArgDst, pShuffleEntryArray, shuffleType)) return FALSE; -#endif // !defined(_TARGET_ARM64_) || !defined(CALLDESCR_RETBUFFARGREG) +#endif // !defined(TARGET_ARM64) || !defined(CALLDESCR_RETBUFFARGREG) } // Iterate all the regular arguments. mapping source registers and stack locations to the corresponding @@ -548,7 +548,7 @@ VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArrayGetLoaderAllocator()->GetUMEntryThunkCache()->GetUMEntryThunk(pMD); -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) // System.Runtime.InteropServices.NativeCallableAttribute BYTE* pData = NULL; @@ -1193,7 +1193,7 @@ PCODE COMDelegate::ConvertToCallback(MethodDesc* pMD) pUMThunkMarshalInfo->SetCallingConvention(callConv); } } -#endif //_TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif //TARGET_X86 && !FEATURE_STUBS_AS_IL pCode = (PCODE)pUMEntryThunk->GetCode(); _ASSERTE(pCode != NULL); @@ -1427,7 +1427,7 @@ OBJECTREF COMDelegate::ConvertToDelegate(LPVOID pCallback, MethodTable* pMT) delObj->SetInvocationCount(DELEGATE_MARKER_UNMANAGEDFPTR); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) GCPROTECT_BEGIN(delObj); Stub *pInterceptStub = NULL; @@ -1450,7 +1450,7 @@ OBJECTREF COMDelegate::ConvertToDelegate(LPVOID pCallback, MethodTable* pMT) } GCPROTECT_END(); -#endif // _TARGET_X86_ +#endif // TARGET_X86 return delObj; } @@ -1701,10 +1701,10 @@ FCIMPL3(PCODE, COMDelegate::AdjustTarget, Object* refThisUNSAFE, Object* targetU } FCIMPLEND -#if defined(_MSC_VER) && !defined(FEATURE_PAL) +#if defined(_MSC_VER) && !defined(TARGET_UNIX) // VC++ Compiler intrinsic. extern "C" void * _ReturnAddress(void); -#endif // _MSC_VER && !FEATURE_PAL +#endif // _MSC_VER && !TARGET_UNIX // This is the single constructor for all Delegates. The compiler // doesn't provide an implementation of the Delegate constructor. We @@ -2037,7 +2037,7 @@ PCODE COMDelegate::TheDelegateInvokeStub() } CONTRACT_END; -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) static PCODE s_pInvokeStub; if (s_pInvokeStub == NULL) @@ -2057,7 +2057,7 @@ PCODE COMDelegate::TheDelegateInvokeStub() RETURN s_pInvokeStub; #else RETURN GetEEFuncEntryPoint(SinglecastDelegateInvokeStub); -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL } // Get the cpu stub for a delegate invoke. @@ -2168,7 +2168,7 @@ BOOL COMDelegate::NeedsWrapperDelegate(MethodDesc* pTargetMD) { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // For arm VSD expects r4 to contain the indirection cell. However r4 is a non-volatile register // and its value must be preserved. So we need to erect a frame and store indirection cell in r4 before calling // virtual stub dispatch. Erecting frame is already done by wrapper delegates so the Wrapper Delegate infrastructure diff --git a/src/coreclr/src/vm/common.h b/src/coreclr/src/vm/common.h index 9e5083ce1f08b..cbaa63ba2f68a 100644 --- a/src/coreclr/src/vm/common.h +++ b/src/coreclr/src/vm/common.h @@ -11,7 +11,7 @@ #ifndef _common_h_ #define _common_h_ -#if defined(_MSC_VER) && defined(_X86_) && !defined(FPO_ON) +#if defined(_MSC_VER) && defined(HOST_X86) && !defined(FPO_ON) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #define FPO_ON 1 #define COMMON_TURNED_FPO_ON 1 @@ -211,7 +211,7 @@ EXTERN_C AppDomain* STDCALL GetAppDomain(); inline void RetailBreak() { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 __asm int 3 #else DebugBreak(); @@ -242,11 +242,11 @@ FORCEINLINE void* memcpyUnsafe(void *dest, const void *src, size_t len) //If memcpy has been defined to PAL_memcpy, we undefine it so that this case //can be covered by the if !defined(memcpy) block below - #ifdef FEATURE_PAL + #ifdef TARGET_UNIX #if IS_REDEFINED_IN_PAL(memcpy) #undef memcpy #endif //IS_REDEFINED_IN_PAL - #endif //FEATURE_PAL + #endif //TARGET_UNIX // You should be using CopyValueClass if you are doing an memcpy // in the CG heap. @@ -254,11 +254,11 @@ FORCEINLINE void* memcpyUnsafe(void *dest, const void *src, size_t len) FORCEINLINE void* memcpyNoGCRefs(void * dest, const void * src, size_t len) { WRAPPER_NO_CONTRACT; - #ifndef FEATURE_PAL + #ifndef TARGET_UNIX return memcpy(dest, src, len); - #else //FEATURE_PAL + #else //TARGET_UNIX return PAL_memcpy(dest, src, len); - #endif //FEATURE_PAL + #endif //TARGET_UNIX } extern "C" void * __cdecl GCSafeMemCpy(void *, const void *, size_t); @@ -375,7 +375,7 @@ namespace Loader HRESULT EnsureRtlFunctions(); HINSTANCE GetModuleInst(); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // // Strong memory model. No memory barrier necessary before writing object references into GC heap. // diff --git a/src/coreclr/src/vm/commtmemberinfomap.cpp b/src/coreclr/src/vm/commtmemberinfomap.cpp index b114ca5968960..be3e77b6757c8 100644 --- a/src/coreclr/src/vm/commtmemberinfomap.cpp +++ b/src/coreclr/src/vm/commtmemberinfomap.cpp @@ -176,7 +176,7 @@ DWORD EEModuleTokenHashTableHelper::Hash(EEModuleTokenPair *pKey) CONTRACTL_END; size_t val = (size_t) ((DWORD_PTR)pKey->m_tk + (DWORD_PTR)pKey->m_pModule); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 return (DWORD)val; #else // @TODO IA64: Is this a good hashing mechanism on IA64? diff --git a/src/coreclr/src/vm/compile.cpp b/src/coreclr/src/vm/compile.cpp index 42904e3cbf9fb..84212341c27fa 100644 --- a/src/coreclr/src/vm/compile.cpp +++ b/src/coreclr/src/vm/compile.cpp @@ -273,17 +273,17 @@ HRESULT CEECompileInfo::LoadAssemblyByPath( // by LoadAssembly then we can blame it on bitness mismatch. We do the check here // and not in the CATCH to distinguish between the COR_IMAGE_ERROR that can be thrown by // VerifyIsAssembly (not necessarily a bitness mismatch) and that from LoadAssembly -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (pImage->Has32BitNTHeaders()) { hrProcessLibraryBitnessMismatch = PEFMT_E_32BIT; } -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT if (!pImage->Has32BitNTHeaders()) { hrProcessLibraryBitnessMismatch = PEFMT_E_64BIT; } -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT AssemblySpec spec; spec.InitializeSpec(TokenFromRid(1, mdtAssembly), pImage->GetMDImport(), NULL); @@ -932,7 +932,7 @@ void CEECompileInfo::GetCallRefMap(CORINFO_METHOD_HANDLE hMethod, GCRefMapBuilde UINT nStackSlots; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 UINT cbStackPop = argit.CbStackPop(); pBuilder->WriteStackPop(cbStackPop / sizeof(TADDR)); @@ -945,7 +945,7 @@ void CEECompileInfo::GetCallRefMap(CORINFO_METHOD_HANDLE hMethod, GCRefMapBuilde { int ofs; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 ofs = (pos < NUM_ARGUMENT_REGISTERS) ? (TransitionBlock::GetOffsetOfArgumentRegisters() + ARGUMENTREGISTERS_SIZE - (pos + 1) * sizeof(TADDR)) : (TransitionBlock::GetOffsetOfArgs() + (pos - NUM_ARGUMENT_REGISTERS) * sizeof(TADDR)); @@ -977,7 +977,7 @@ void CEECompileInfo::GetCallRefMap(CORINFO_METHOD_HANDLE hMethod, GCRefMapBuilde GCRefMapDecoder decoder((BYTE *)pBlob + dwInitialLength); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 _ASSERTE(decoder.ReadStackPop() * sizeof(TADDR) == cbStackPop); #endif @@ -988,7 +988,7 @@ void CEECompileInfo::GetCallRefMap(CORINFO_METHOD_HANDLE hMethod, GCRefMapBuilde int ofs; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 ofs = (pos < NUM_ARGUMENT_REGISTERS) ? (TransitionBlock::GetOffsetOfArgumentRegisters() + ARGUMENTREGISTERS_SIZE - (pos + 1) * sizeof(TADDR)) : (TransitionBlock::GetOffsetOfArgs() + (pos - NUM_ARGUMENT_REGISTERS) * sizeof(TADDR)); @@ -6164,11 +6164,11 @@ void CEEPreloader::GenerateMethodStubs( if (IsReadyToRunCompilation() && (!GetAppDomain()->ToCompilationDomain()->GetTargetModule()->IsSystem() || !pMD->IsNDirect())) return; -#if defined(_TARGET_ARM_) && defined(FEATURE_PAL) +#if defined(TARGET_ARM) && defined(TARGET_UNIX) // Cross-bitness compilation of il stubs does not work. Disable here. if (IsReadyToRunCompilation()) return; -#endif // defined(_TARGET_ARM_) && defined(FEATURE_PAL) +#endif // defined(TARGET_ARM) && defined(TARGET_UNIX) DWORD dwNGenStubFlags = NDIRECTSTUB_FL_NGENEDSTUB; @@ -6289,10 +6289,10 @@ void CEEPreloader::GenerateMethodStubs( { EX_TRY { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // on x86, we call the target directly if Invoke has a no-marshal signature if (NDirect::MarshalingRequired(pMD)) -#endif // _TARGET_X86_ +#endif // TARGET_X86 { PInvokeStaticSigInfo sigInfo(pMD); pStubMD = UMThunkMarshInfo::GetILStubMethodDesc(pMD, &sigInfo, NDIRECTSTUB_FL_DELEGATE | dwNGenStubFlags); diff --git a/src/coreclr/src/vm/comsynchronizable.cpp b/src/coreclr/src/vm/comsynchronizable.cpp index 02c1e3784d009..37839ebbed488 100644 --- a/src/coreclr/src/vm/comsynchronizable.cpp +++ b/src/coreclr/src/vm/comsynchronizable.cpp @@ -28,7 +28,7 @@ #include "appdomain.hpp" #include "appdomain.inl" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "utilcode.h" #endif @@ -734,7 +734,7 @@ UINT64 QCALLTYPE ThreadNative::GetCurrentOSThreadId() UINT64 threadId; BEGIN_QCALL; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX threadId = (UINT64) GetCurrentThreadId(); #else threadId = (UINT64) PAL_GetCurrentOSThreadId(); @@ -1450,12 +1450,12 @@ FCIMPL0(INT32, ThreadNative::GetCurrentProcessorNumber) { FCALL_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX PROCESSOR_NUMBER proc_no_cpu_group; GetCurrentProcessorNumberEx(&proc_no_cpu_group); return (proc_no_cpu_group.Group << 6) | proc_no_cpu_group.Number; #else return ::GetCurrentProcessorNumber(); -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX } FCIMPLEND; diff --git a/src/coreclr/src/vm/comthreadpool.cpp b/src/coreclr/src/vm/comthreadpool.cpp index 12e8a93aa3f39..b726736639919 100644 --- a/src/coreclr/src/vm/comthreadpool.cpp +++ b/src/coreclr/src/vm/comthreadpool.cpp @@ -684,7 +684,7 @@ void WINAPI BindIoCompletionCallbackStub(DWORD ErrorCode, WRAPPER_NO_CONTRACT; BindIoCompletionCallbackStubEx(ErrorCode, numBytesTransferred, lpOverlapped, TRUE); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX extern Volatile g_fCompletionPortDrainNeeded; Thread *pThread = GetThread(); @@ -697,7 +697,7 @@ void WINAPI BindIoCompletionCallbackStub(DWORD ErrorCode, pThread->MarkCompletionPortDrained(); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } FCIMPL1(FC_BOOL_RET, ThreadPoolNative::CorBindIoCompletionCallback, HANDLE fileHandle) diff --git a/src/coreclr/src/vm/comtoclrcall.cpp b/src/coreclr/src/vm/comtoclrcall.cpp index 387089e96692e..0bb90dc652d30 100644 --- a/src/coreclr/src/vm/comtoclrcall.cpp +++ b/src/coreclr/src/vm/comtoclrcall.cpp @@ -45,7 +45,7 @@ #if !defined(DACCESS_COMPILE) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 static PCODE g_pGenericComCallStubFields = NULL; static PCODE g_pGenericComCallStub = NULL; #endif @@ -64,7 +64,7 @@ static void SetupGenericStubs() { STANDARD_VM_CONTRACT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ( (g_pGenericComCallStubFields != NULL) && (g_pGenericComCallStub != NULL)) return; @@ -85,7 +85,7 @@ static void SetupGenericStubs() if (InterlockedCompareExchangeT(&g_pGenericComCallStubFields, candidateFields->GetEntryPoint(), 0) == 0) candidateFields.SuppressRelease(); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } #ifdef PROFILING_SUPPORTED @@ -158,7 +158,7 @@ extern "C" HRESULT STDCALL StubRareDisableHRWorker(Thread *pThread) return hr; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // defined in i386\asmhelpers.asm extern "C" ARG_SLOT __fastcall COMToCLRDispatchHelper( @@ -205,7 +205,7 @@ inline static void InvokeStub(ComCallMethodDesc *pCMD, PCODE pManagedTarget, OBJ pThread->GetFrame()); // pCurFrame } -#else // _TARGET_X86_ +#else // TARGET_X86 // defined in amd64\GenericComCallStubs.asm extern "C" ARG_SLOT COMToCLRDispatchHelper( @@ -247,7 +247,7 @@ inline static void InvokeStub(ComCallMethodDesc *pCMD, PCODE pManagedTarget, OBJ dangerousThis); // pDangerousThis } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if defined(_MSC_VER) && !defined(_DEBUG) #pragma optimize("t", on) // optimize for speed @@ -545,7 +545,7 @@ extern "C" UINT64 __stdcall COMToCLRWorker(Thread *pThread, ComMethodFrame* pFra { NOTHROW; // Although CSE can be thrown GC_TRIGGERS; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) MODE_COOPERATIVE; // X86 sets up COOP in stublinker-generated stub #else // This contract is disabled because user code can illegally reenter here through no fault of the @@ -563,7 +563,7 @@ extern "C" UINT64 __stdcall COMToCLRWorker(Thread *pThread, ComMethodFrame* pFra ComCallMethodDesc* pCMD = pFrame->GetComCallMethodDesc(); -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) // // The following code is a transcription of the code that is generated by CreateGenericComCallStub. The // idea is that we needn't really do this work either in static assembly code nor in dynamically @@ -602,7 +602,7 @@ extern "C" UINT64 __stdcall COMToCLRWorker(Thread *pThread, ComMethodFrame* pFra // Link frame into the chain. pFrame->Push(pThread); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 _ASSERTE(pThread); @@ -611,13 +611,13 @@ extern "C" UINT64 __stdcall COMToCLRWorker(Thread *pThread, ComMethodFrame* pFra _ASSERTE(pThread->PreemptiveGCDisabled()); { -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 if (pCMD->IsFieldCall()) { retVal = FieldCallWorker(pThread, pFrame); } else -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 { IUnknown **pip = (IUnknown **)pFrame->GetPointerToArguments(); IUnknown *pUnk = (IUnknown *)*pip; @@ -629,7 +629,7 @@ extern "C" UINT64 __stdcall COMToCLRWorker(Thread *pThread, ComMethodFrame* pFra } } -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // Note: the EH subsystem will handle reseting the frame chain and setting // the correct GC mode on exception. pFrame->Pop(pThread); @@ -649,7 +649,7 @@ extern "C" UINT64 __stdcall COMToCLRWorker(Thread *pThread, ComMethodFrame* pFra } return retVal; -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 ErrorExit: if (pThread != NULL && pThread->PreemptiveGCDisabled()) pThread->EnablePreemptiveGC(); @@ -678,7 +678,7 @@ extern "C" UINT64 __stdcall COMToCLRWorker(Thread *pThread, ComMethodFrame* pFra } return retVal; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } #if defined(_MSC_VER) && !defined(_DEBUG) @@ -852,10 +852,10 @@ PCODE ComCallMethodDesc::CreateCOMToCLRStub(DWORD dwStubFlags, MethodDesc **ppSt *ppStubMD = pStubMD; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // make sure our native stack computation in code:ComCallMethodDesc.InitNativeInfo is right _ASSERTE(HasMarshalError() || !pStubMD->IsILStub() || pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() == m_StackBytes); -#else // _TARGET_X86_ +#else // TARGET_X86 if (pStubMD->IsILStub()) { m_StackBytes = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize(); @@ -865,7 +865,7 @@ PCODE ComCallMethodDesc::CreateCOMToCLRStub(DWORD dwStubFlags, MethodDesc **ppSt { m_StackBytes = pStubMD->SizeOfArgStack(); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 RETURN JitILStub(pStubMD); } @@ -882,7 +882,7 @@ void ComCallMethodDesc::InitRuntimeNativeInfo(MethodDesc *pStubMD) } CONTRACTL_END; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Parse the stub signature to figure out how we're going to transform the incoming arguments // into stub arguments (i.e. ECX and possibly EDX get enregisterable args, stack gets reversed). @@ -978,7 +978,7 @@ void ComCallMethodDesc::InitRuntimeNativeInfo(MethodDesc *pStubMD) *(SHORT *)&pMethodDescMemory[1] = nativeArgSize; FlushInstructionCache(GetCurrentProcess(), pMethodDescMemory, sizeof pMethodDescMemory[0] + sizeof(SHORT)); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } #endif //CROSSGEN_COMPILE @@ -999,10 +999,10 @@ void ComCallMethodDesc::InitMethod(MethodDesc *pMD, MethodDesc *pInterfaceMD, BO m_pInterfaceMD = PTR_MethodDesc(pInterfaceMD); m_pILStub = NULL; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 m_dwSlotInfo = 0; m_pwStubStackSlotOffsets = NULL; -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (fRedirectedInterface) m_flags |= enum_IsWinRTRedirected; @@ -1049,10 +1049,10 @@ void ComCallMethodDesc::InitField(FieldDesc* pFD, BOOL isGetter) m_pFD = pFD; m_pILStub = NULL; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 m_dwSlotInfo = 0; m_pwStubStackSlotOffsets = NULL; -#endif // _TARGET_X86_ +#endif // TARGET_X86 m_flags = enum_IsFieldCall; // mark the attribute as a field m_flags |= isGetter ? enum_IsGetter : 0; @@ -1082,7 +1082,7 @@ void ComCallMethodDesc::InitNativeInfo() EX_TRY { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On x86, this method has to compute size of arguments because we need to know size of the native stack // to be able to return back to unmanaged code UINT16 nativeArgSize; @@ -1101,7 +1101,7 @@ void ComCallMethodDesc::InitNativeInfo() CONSISTENCY_CHECK_MSGF(false, ("BreakOnComToClrNativeInfoInit: '%s' ", szDebugName)); #endif // _DEBUG -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 MetaSig fsig(pFD); fsig.NextArg(); @@ -1130,7 +1130,7 @@ void ComCallMethodDesc::InitNativeInfo() // setter takes 'this' and the input argument by-value nativeArgSize = sizeof(void *) + info.GetNativeArgSize(); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Field calls always return HRESULTs. m_flags |= enum_NativeHResultRetVal; @@ -1159,7 +1159,7 @@ void ComCallMethodDesc::InitNativeInfo() // Determine if we need to do HRESULT munging for this method. BOOL fPreserveSig = IsMiPreserveSig(dwImplFlags); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 if (!fPreserveSig) { // PreserveSig=false methods always return HRESULTs. @@ -1170,7 +1170,7 @@ void ComCallMethodDesc::InitNativeInfo() MetaSig msig(pMD); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 if (msig.IsReturnTypeVoid()) { // The method has a void return type on the native side. @@ -1199,7 +1199,7 @@ void ComCallMethodDesc::InitNativeInfo() mdParamDef *params = (mdParamDef*)_alloca((numArgs+1) * sizeof(mdParamDef)); CollateParamTokens(pInternalImport, md, numArgs, params); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // If this is a method call then check to see if we need to do LCID conversion. int iLCIDArg = GetLCIDParameterIndex(pMD); if (iLCIDArg != -1) @@ -1248,14 +1248,14 @@ void ComCallMethodDesc::InitNativeInfo() // Check to see if this is the parameter after which we need to read the LCID from. if (iArg == iLCIDArg) nativeArgSize += StackElemSize(sizeof(LCID)); -#endif // _TARGET_X86_ +#endif // TARGET_X86 // // Return value // -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // Handled above _ASSERTE(!msig.IsReturnTypeVoid()); #else @@ -1274,7 +1274,7 @@ void ComCallMethodDesc::InitNativeInfo() goto Done; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 { MarshalInfo info(msig.GetModule(), msig.GetReturnProps(), msig.GetSigTypeContext(), params[0], @@ -1286,7 +1286,7 @@ void ComCallMethodDesc::InitNativeInfo() #endif ); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // Handled above _ASSERTE(fPreserveSig); #else @@ -1306,7 +1306,7 @@ void ComCallMethodDesc::InitNativeInfo() goto Done; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Ignore the secret return buffer argument - we don't allow returning // structures by value in COM interop. @@ -1348,7 +1348,7 @@ void ComCallMethodDesc::InitNativeInfo() Done: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // The above algorithm to compute nativeArgSize is x86-specific. We will compute // the correct value later for other platforms. m_StackBytes = nativeArgSize; @@ -1457,7 +1457,7 @@ void ComCall::PopulateComCallMethodDesc(ComCallMethodDesc *pCMD, DWORD *pdwStubF } #ifndef CROSSGEN_COMPILE -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //--------------------------------------------------------- // Creates the generic ComCall stub. // @@ -1525,7 +1525,7 @@ Stub* ComCall::CreateGenericComCallStub(BOOL isFieldAccess) // Process-wide stubs that never unload. RETURN (psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap())); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 //--------------------------------------------------------- // Either creates or retrieves from the cache, a stub to @@ -1563,7 +1563,7 @@ PCODE ComCall::GetComCallMethodStub(ComCallMethodDesc *pCMD) InterlockedCompareExchangeT(pCMD->GetAddrOfILStubField(), pTempILStub, NULL); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Finally, we need to build a stub that represents the entire call. This // is always generic. RETURN (pCMD->IsFieldCall() ? g_pGenericComCallStubFields : g_pGenericComCallStub); diff --git a/src/coreclr/src/vm/comtoclrcall.h b/src/coreclr/src/vm/comtoclrcall.h index affc8a0b4e09b..f39f5a2c0a848 100644 --- a/src/coreclr/src/vm/comtoclrcall.h +++ b/src/coreclr/src/vm/comtoclrcall.h @@ -399,10 +399,10 @@ class ComCallMethodDesc } CONTRACTL_END; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (m_pwStubStackSlotOffsets != NULL) delete [] m_pwStubStackSlotOffsets; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } static void ReleaseComCallMethodDesc(ComCallMethodDesc *pCMD) @@ -433,7 +433,7 @@ class ComCallMethodDesc PCODE m_pILStub; // IL stub for COM to CLR call, invokes GetCallMethodDesc() // Platform specific data needed for efficient IL stub invocation: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 union { struct @@ -451,7 +451,7 @@ class ComCallMethodDesc // This is an array of m_wStubStackSlotCount numbers where each element is the offset // on the source stack where the particular stub stack slot should be copied from. UINT16 *m_pwStubStackSlotOffsets; -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Number of stack bytes pushed by the unmanaged caller. UINT16 m_StackBytes; diff --git a/src/coreclr/src/vm/comutilnative.cpp b/src/coreclr/src/vm/comutilnative.cpp index bcab78beb411b..1b31fff645917 100644 --- a/src/coreclr/src/vm/comutilnative.cpp +++ b/src/coreclr/src/vm/comutilnative.cpp @@ -625,7 +625,7 @@ void QCALLTYPE Buffer::Clear(void *dst, size_t length) { QCALL_CONTRACT; -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) if (length > 0x100) { // memset ends up calling rep stosb if the hardware claims to support it efficiently. rep stosb is up to 2x slower @@ -1073,7 +1073,7 @@ FCIMPL1(INT64, GCInterface::GetTotalAllocatedBytes, CLR_BOOL precise) if (!precise) { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT uint64_t unused_bytes = Thread::dead_threads_non_alloc_bytes; #else // As it could be noticed we read 64bit values that may be concurrently updated. @@ -1343,11 +1343,11 @@ void GCInterface::AddMemoryPressure(UINT64 bytesAllocated) } } -#ifdef BIT64 +#ifdef HOST_64BIT const unsigned MIN_MEMORYPRESSURE_BUDGET = 4 * 1024 * 1024; // 4 MB -#else // BIT64 +#else // HOST_64BIT const unsigned MIN_MEMORYPRESSURE_BUDGET = 3 * 1024 * 1024; // 3 MB -#endif // BIT64 +#endif // HOST_64BIT const unsigned MAX_MEMORYPRESSURE_RATIO = 10; // 40 MB or 30 MB diff --git a/src/coreclr/src/vm/contractimpl.h b/src/coreclr/src/vm/contractimpl.h index b44050a3f45d7..5937c3c1c16c4 100644 --- a/src/coreclr/src/vm/contractimpl.h +++ b/src/coreclr/src/vm/contractimpl.h @@ -156,7 +156,7 @@ struct DispatchToken // IMPORTANT: This is the ONLY member of this class. UINT_PTR m_token; -#ifndef BIT64 +#ifndef HOST_64BIT // NOTE: On 32-bit, we use the uppermost bit to indicate that the // token is really a DispatchTokenFat*, and to recover the pointer // we just shift left by 1; correspondingly, when storing a @@ -172,7 +172,7 @@ struct DispatchToken #endif // FAT_DISPATCH_TOKENS static const UINT_PTR INVALID_TOKEN = 0x7FFFFFFF; -#else //BIT64 +#else //HOST_64BIT static const UINT_PTR MASK_TYPE_ID = UI64(0x000000007FFFFFFF); static const UINT_PTR MASK_SLOT_NUMBER = UI64(0x000000000000FFFF); @@ -184,7 +184,7 @@ struct DispatchToken #endif // FAT_DISPATCH_TOKENS static const UINT_PTR INVALID_TOKEN = 0x7FFFFFFFFFFFFFFF; -#endif //BIT64 +#endif //HOST_64BIT #ifdef FAT_DISPATCH_TOKENS //------------------------------------------------------------------------ @@ -242,7 +242,7 @@ struct DispatchToken public: #ifdef FAT_DISPATCH_TOKENS -#if !defined(BIT64) +#if !defined(HOST_64BIT) static const UINT32 MAX_TYPE_ID_SMALL = 0x00007FFF; #else static const UINT32 MAX_TYPE_ID_SMALL = 0x7FFFFFFF; diff --git a/src/coreclr/src/vm/coreassemblyspec.cpp b/src/coreclr/src/vm/coreassemblyspec.cpp index 4a8c79e284c13..99ae244064758 100644 --- a/src/coreclr/src/vm/coreassemblyspec.cpp +++ b/src/coreclr/src/vm/coreassemblyspec.cpp @@ -243,14 +243,14 @@ STDAPI BinderHasNativeHeader(PEImage *pPEImage, BOOL* result) { *result = false; -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) // PAL_LOADLoadPEFile may fail while loading IL masquerading as NI. // This will result in a ThrowHR(E_FAIL). Suppress the error. if(hr == E_FAIL) { hr = S_OK; } -#endif // defined(FEATURE_PAL) +#endif // defined(TARGET_UNIX) } return hr; diff --git a/src/coreclr/src/vm/corhost.cpp b/src/coreclr/src/vm/corhost.cpp index c7ad7c7fa0405..c5fb057baa4cf 100644 --- a/src/coreclr/src/vm/corhost.cpp +++ b/src/coreclr/src/vm/corhost.cpp @@ -35,9 +35,9 @@ #include "finalizerthread.h" #include "threadsuspend.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "dwreport.h" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_COMINTEROP #include "winrttypenameconverter.h" @@ -51,11 +51,11 @@ EXTERN_C __declspec(thread) ThreadLocalInfo gCurrentThreadInfo; #else // !__GNUC__ EXTERN_C __thread ThreadLocalInfo gCurrentThreadInfo; #endif // !__GNUC__ -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX EXTERN_C UINT32 _tls_index; -#else // FEATURE_PAL +#else // TARGET_UNIX UINT32 _tls_index = 0; -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifndef DACCESS_COMPILE @@ -1143,12 +1143,12 @@ HRESULT CorHost2::QueryInterface(REFIID riid, void **ppUnk) *ppUnk = static_cast(this); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX else if (riid == IID_IPrivateManagedExceptionReporting) { *ppUnk = static_cast(this); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX else return (E_NOINTERFACE); AddRef(); @@ -1156,7 +1156,7 @@ HRESULT CorHost2::QueryInterface(REFIID riid, void **ppUnk) } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HRESULT CorHost2::GetBucketParametersForCurrentException(BucketParameters *pParams) { CONTRACTL @@ -1179,7 +1179,7 @@ HRESULT CorHost2::GetBucketParametersForCurrentException(BucketParameters *pPara return hr; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX HRESULT CorHost2::CreateObject(REFIID riid, void **ppUnk) { @@ -2028,11 +2028,11 @@ SIZE_T STDMETHODCALLTYPE CExecutionEngine::ClrVirtualQuery(LPCVOID lpAddress, } #define ClrVirtualQuery EEVirtualQuery -#if defined(_DEBUG) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && !defined(TARGET_UNIX) static VolatilePtr s_pStartOfUEFSection = NULL; static VolatilePtr s_pEndOfUEFSectionBoundary = NULL; static Volatile s_dwProtection = 0; -#endif // _DEBUG && !FEATURE_PAL +#endif // _DEBUG && !TARGET_UNIX #undef ClrVirtualProtect @@ -2077,7 +2077,7 @@ BOOL STDMETHODCALLTYPE CExecutionEngine::ClrVirtualProtect(LPVOID lpAddress, // // We assert if either of the two conditions above are true. -#if defined(_DEBUG) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && !defined(TARGET_UNIX) // We do this check in debug/checked builds only // Do we have the UEF details? @@ -2147,7 +2147,7 @@ BOOL STDMETHODCALLTYPE CExecutionEngine::ClrVirtualProtect(LPVOID lpAddress, "Do not virtual protect the section in which UEF lives!"); } } -#endif // _DEBUG && !FEATURE_PAL +#endif // _DEBUG && !TARGET_UNIX return EEVirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect); } diff --git a/src/coreclr/src/vm/crossgencompile.cpp b/src/coreclr/src/vm/crossgencompile.cpp index 656a53a1d0382..7030d9d7f39d1 100644 --- a/src/coreclr/src/vm/crossgencompile.cpp +++ b/src/coreclr/src/vm/crossgencompile.cpp @@ -246,7 +246,7 @@ PCODE MethodDesc::TryGetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags return 0x321; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod, LoaderAllocator *pLoaderAllocator /* = NULL */, bool throwOnOutOfMemoryWithinRange /*= true*/) { diff --git a/src/coreclr/src/vm/customattribute.h b/src/coreclr/src/vm/customattribute.h index 35729b4535e18..4aa3f227a121f 100644 --- a/src/coreclr/src/vm/customattribute.h +++ b/src/coreclr/src/vm/customattribute.h @@ -45,7 +45,7 @@ struct CustomAttributeType struct CustomAttributeValue { -#ifdef BIT64 +#ifdef HOST_64BIT // refs come before longs on win64 CaValueArrayREF m_value; STRINGREF m_enumOrTypeName; @@ -65,7 +65,7 @@ struct CustomAttributeValue struct CustomAttributeArgument { CustomAttributeType m_type; -#if (!defined(BIT64) && (DATA_ALIGNMENT > 4)) || defined(FEATURE_64BIT_ALIGNMENT) +#if (!defined(HOST_64BIT) && (DATA_ALIGNMENT > 4)) || defined(FEATURE_64BIT_ALIGNMENT) DWORD m_padding; #endif CustomAttributeValue m_value; @@ -76,11 +76,11 @@ struct CustomAttributeNamedArgument STRINGREF m_argumentName; CorSerializationType m_propertyOrField; CorSerializationType m_padding; -#if !defined(BIT64) && (DATA_ALIGNMENT > 4) +#if !defined(HOST_64BIT) && (DATA_ALIGNMENT > 4) DWORD m_padding2; #endif CustomAttributeType m_type; -#if !defined(BIT64) && (DATA_ALIGNMENT > 4) +#if !defined(HOST_64BIT) && (DATA_ALIGNMENT > 4) DWORD m_padding3; #endif CustomAttributeValue m_value; diff --git a/src/coreclr/src/vm/dataimage.cpp b/src/coreclr/src/vm/dataimage.cpp index a7aaade461d67..c1387157b16da 100644 --- a/src/coreclr/src/vm/dataimage.cpp +++ b/src/coreclr/src/vm/dataimage.cpp @@ -375,11 +375,11 @@ static void EncodeTargetOffset(PVOID pLocation, SSIZE_T targetOffset, ZapRelocat *(UNALIGNED TADDR *)pLocation = 0; break; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) case IMAGE_REL_BASED_REL32: *(UNALIGNED INT32 *)pLocation = (INT32)targetOffset; break; -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 default: _ASSERTE(0); @@ -402,10 +402,10 @@ static SSIZE_T DecodeTargetOffset(PVOID pLocation, ZapRelocationType type) _ASSERTE(*(UNALIGNED TADDR *)pLocation == 0); return 0; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) case IMAGE_REL_BASED_REL32: return *(UNALIGNED INT32 *)pLocation; -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 default: _ASSERTE(0); @@ -1198,7 +1198,7 @@ ZapNode * DataImage::GetGenericSignature(PVOID signature, BOOL fMethod) return pGenericSignature; } -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) class ZapStubPrecode : public ZapNode { @@ -1319,7 +1319,7 @@ void DataImage::SavePrecode(PVOID ptr, MethodDesc * pMD, PrecodeType t, ItemKind AddStructureInOrder(pNode); } -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 void DataImage::FixupModulePointer(Module * pModule, PVOID p, SSIZE_T offset, ZapRelocationType type) { @@ -2307,7 +2307,7 @@ class ZapCompressedLookupMap : public ZapNode { LIMITED_METHOD_CONTRACT; -#if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && defined(_MSC_VER) +#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && defined(_MSC_VER) // This this operation could impact the performance of ngen (we call this a *lot*) we'll try and // optimize this where we can. x86 and amd64 actually have instructions to find the least and most @@ -2318,7 +2318,7 @@ class ZapCompressedLookupMap : public ZapNode else return 1; -#else // (_TARGET_X86_ || _TARGET_AMD64_) && _MSC_VER +#else // (TARGET_X86 || TARGET_AMD64) && _MSC_VER // Otherwise we'll calculate this the slow way. Pick off the 32-bit case first due to avoid the // usual << problem (x << 32 == x, not 0). @@ -2331,7 +2331,7 @@ class ZapCompressedLookupMap : public ZapNode return cBits; -#endif // (_TARGET_X86_ || _TARGET_AMD64_) && _MSC_VER +#endif // (TARGET_X86 || TARGET_AMD64) && _MSC_VER } // Sort the given input array (of kLookupMapLengthEntries entries, where the last entry is already sorted) diff --git a/src/coreclr/src/vm/dataimage.h b/src/coreclr/src/vm/dataimage.h index a7a98d0b72682..9ba2d98c69cec 100644 --- a/src/coreclr/src/vm/dataimage.h +++ b/src/coreclr/src/vm/dataimage.h @@ -8,11 +8,11 @@ #define _DATAIMAGE_H_ // IMAGE_REL_BASED_PTR is architecture specific reloc of virtual address -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define IMAGE_REL_BASED_PTR IMAGE_REL_BASED_DIR64 -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT #define IMAGE_REL_BASED_PTR IMAGE_REL_BASED_HIGHLOW -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT // Special NGEN-specific relocation type for relative pointer (used to make NGen relocation section smaller) #define IMAGE_REL_BASED_RELPTR 0x7D diff --git a/src/coreclr/src/vm/debughelp.cpp b/src/coreclr/src/vm/debughelp.cpp index 1a70337e80aef..afa3714d62c78 100644 --- a/src/coreclr/src/vm/debughelp.cpp +++ b/src/coreclr/src/vm/debughelp.cpp @@ -23,11 +23,11 @@ BOOL isMemoryReadable(const TADDR start, unsigned len) } CONTRACTL_END; -#if !defined(DACCESS_COMPILE) && defined(FEATURE_PAL) +#if !defined(DACCESS_COMPILE) && defined(TARGET_UNIX) return PAL_ProbeMemory((PVOID)start, len, FALSE); -#else // !DACCESS_COMPILE && FEATURE_PAL +#else // !DACCESS_COMPILE && TARGET_UNIX // // To accomplish this in a no-throw way, we have to touch each and every page @@ -92,7 +92,7 @@ BOOL isMemoryReadable(const TADDR start, unsigned len) } return 1; -#endif // !DACCESS_COMPILE && FEATURE_PAL +#endif // !DACCESS_COMPILE && TARGET_UNIX } @@ -207,7 +207,7 @@ void *DumpEnvironmentBlock(void) return WszGetEnvironmentStrings(); } -#if defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if defined(TARGET_X86) && !defined(TARGET_UNIX) /*******************************************************************/ // Dump the SEH chain to stderr void PrintSEHChain(void) @@ -227,7 +227,7 @@ void PrintSEHChain(void) pEHR = pEHR->Next; } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 /*******************************************************************/ MethodDesc* IP2MD(ULONG_PTR IP) @@ -600,7 +600,7 @@ int DumpCurrentStack() } CONTRACTL_END; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 BYTE* top = (BYTE *)GetCurrentSP(); // go back at most 64K, it will stop if we go off the @@ -609,7 +609,7 @@ int DumpCurrentStack() #else _ASSERTE(!"@NYI - DumpCurrentStack(DebugHelp.cpp)"); return 0; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } /*******************************************************************/ @@ -1012,7 +1012,7 @@ void PrintDomainName(size_t ob) } } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #include "gcdump.h" diff --git a/src/coreclr/src/vm/debuginfostore.cpp b/src/coreclr/src/vm/debuginfostore.cpp index b395abce23c40..295dbe5f201bc 100644 --- a/src/coreclr/src/vm/debuginfostore.cpp +++ b/src/coreclr/src/vm/debuginfostore.cpp @@ -92,7 +92,7 @@ class TransferWriter MODE_ANY; } CONTRACTL_END; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 _ASSERTE(dwOffset % sizeof(DWORD) == 0); // should be dword aligned. That'll save us 2 bits. m_w.WriteEncodedI32(dwOffset / sizeof(DWORD)); #else @@ -175,7 +175,7 @@ class TransferReader void DoEncodedStackOffset(signed & dwOffset) { SUPPORTS_DAC; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 dwOffset = m_r.ReadEncodedI32() * sizeof(DWORD); #else // Non x86 platforms don't need it to be dword aligned. diff --git a/src/coreclr/src/vm/diagnosticserver.cpp b/src/coreclr/src/vm/diagnosticserver.cpp index a60a10f0f88a6..c1a8aa7acf649 100644 --- a/src/coreclr/src/vm/diagnosticserver.cpp +++ b/src/coreclr/src/vm/diagnosticserver.cpp @@ -9,9 +9,9 @@ #include "profilerdiagnosticprotocolhelper.h" #include "diagnosticsprotocol.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include "pal.h" -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_AUTO_TRACE #include "autotrace.h" @@ -76,7 +76,7 @@ DWORD WINAPI DiagnosticServer::DiagnosticsServerThread(LPVOID) EventPipeProtocolHelper::HandleIpcMessage(message, pStream); break; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX case DiagnosticsIpc::DiagnosticServerCommandSet::Dump: DumpDiagnosticProtocolHelper::HandleIpcMessage(message, pStream); break; diff --git a/src/coreclr/src/vm/disassembler.cpp b/src/coreclr/src/vm/disassembler.cpp index 77497f66da6d0..3d5eb16032ce3 100755 --- a/src/coreclr/src/vm/disassembler.cpp +++ b/src/coreclr/src/vm/disassembler.cpp @@ -20,17 +20,17 @@ DisasmInstruction_t *Disassembler::External_DisasmInstruction = nullptr; Disassembler::ExternalDisassembler *Disassembler::s_availableExternalDisassembler = nullptr; -#if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#if defined(TARGET_AMD64) || defined(TARGET_X86) // static bool Disassembler::IsRexPrefix(UINT8 potentialRexByte) { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return (potentialRexByte & 0xf0) == REX_PREFIX_BASE; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 return false; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } // static @@ -53,7 +53,7 @@ UINT8 Disassembler::DecodeRmFromModRm(UINT8 modRm) LIMITED_METHOD_CONTRACT; return modRm & 0x7; } -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#endif // defined(TARGET_AMD64) || defined(TARGET_X86) // static bool Disassembler::IsAvailable() @@ -176,11 +176,11 @@ Disassembler::Disassembler() // - A string of the form "x86_64-pc-win32" externalDisassembler = External_InitDisasm(Target_Host); #elif USE_MSVC_DISASSEMBLER - #ifdef _TARGET_X86_ + #ifdef TARGET_X86 externalDisassembler = ExternalDisassembler::PdisNew(ExternalDisassembler::distX86); - #elif defined(_TARGET_AMD64_) + #elif defined(TARGET_AMD64) externalDisassembler = ExternalDisassembler::PdisNew(ExternalDisassembler::distX8664); - #endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) + #endif // defined(TARGET_X86) || defined(TARGET_AMD64) #endif // USE_COREDISTOOLS_DISASSEMBLER || USE_MSVC_DISASSEMBLER } @@ -263,9 +263,9 @@ InstructionType Disassembler::DetermineInstructionType( switch (instructionCode[i]) { case 0xe8: // call near rel - #ifdef _TARGET_X86_ + #ifdef TARGET_X86 case 0x9a: // call far ptr - #endif // _TARGET_X86_ + #endif // TARGET_X86 return InstructionType::Call_DirectUnconditional; case 0xff: diff --git a/src/coreclr/src/vm/disassembler.h b/src/coreclr/src/vm/disassembler.h index 5a7976a44f443..07447d659b0ee 100644 --- a/src/coreclr/src/vm/disassembler.h +++ b/src/coreclr/src/vm/disassembler.h @@ -13,7 +13,7 @@ // COREDISTOOLS disassembler only supports amd64 and x86, so if this is // CoreCLR but not amd64 and not x86, we will fall out of this check and not // set USE_DISASSEMBLER. - #if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) + #if defined(TARGET_AMD64) || defined(TARGET_X86) #undef USE_COREDISTOOLS_DISASSEMBLER #define USE_COREDISTOOLS_DISASSEMBLER 1 #endif @@ -66,13 +66,13 @@ class Disassembler typedef DIS ExternalDisassembler; #endif // USE_COREDISTOOLS_DISASSEMBLER || USE_MSVC_DISASSEMBLER -#if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#if defined(TARGET_AMD64) || defined(TARGET_X86) public: static bool IsRexPrefix(UINT8 potentialRexByte); static UINT8 DecodeModFromModRm(UINT8 modRm); static UINT8 DecodeRegOrOpCodeFromModRm(UINT8 modRm); static UINT8 DecodeRmFromModRm(UINT8 modRm); -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#endif // defined(TARGET_AMD64) || defined(TARGET_X86) public: static bool IsAvailable(); diff --git a/src/coreclr/src/vm/dispparammarshaler.cpp b/src/coreclr/src/vm/dispparammarshaler.cpp index 9289a4cd3ba0e..98ddd13cb008e 100644 --- a/src/coreclr/src/vm/dispparammarshaler.cpp +++ b/src/coreclr/src/vm/dispparammarshaler.cpp @@ -465,7 +465,7 @@ void DispParamDelegateMarshaler::MarshalNativeToManaged(VARIANT *pSrcVar, OBJECT switch(V_VT(pSrcVar)) { -#ifdef BIT64 +#ifdef HOST_64BIT case VT_I8: pDelegate = reinterpret_cast(static_cast(V_I8(pSrcVar))); break; @@ -508,7 +508,7 @@ void DispParamDelegateMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARI SafeVariantClear(pDestVar); // Convert to VARIANT -#ifdef BIT64 +#ifdef HOST_64BIT V_VT(pDestVar) = VT_I8; #else V_VT(pDestVar) = VT_I4; @@ -517,7 +517,7 @@ void DispParamDelegateMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARI // ConvertToCallback automatically takes care of the pSrcObj == NULL case void *pDelegate = (void*) COMDelegate::ConvertToCallback(*pSrcObj); -#ifdef BIT64 +#ifdef HOST_64BIT V_I8(pDestVar) = static_cast(reinterpret_cast(pDelegate)); #else V_I4(pDestVar) = static_cast(reinterpret_cast(pDelegate)); diff --git a/src/coreclr/src/vm/dllimport.cpp b/src/coreclr/src/vm/dllimport.cpp index 41d800811823d..c17f4a0ca2d79 100644 --- a/src/coreclr/src/vm/dllimport.cpp +++ b/src/coreclr/src/vm/dllimport.cpp @@ -629,7 +629,7 @@ class ILStubState : public StubState pStubMD->SetStatic(); } -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // we store the real managed argument stack size in the stub MethodDesc on non-X86 UINT stackSize = pStubMD->SizeOfNativeArgStack(); @@ -637,7 +637,7 @@ class ILStubState : public StubState COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX); pStubMD->AsDynamicMethodDesc()->SetNativeStackArgSize(static_cast(stackSize)); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } DWORD cbTempModuleIndependentSigLength; @@ -871,7 +871,7 @@ class ILStubState : public StubState pcsDispatch->EmitCALL(METHOD__STUBHELPERS__SET_LAST_ERROR, 0, 0); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (SF_IsForwardDelegateStub(m_dwStubFlags)) { // the delegate may have an intercept stub attached to its sync block so we should @@ -879,7 +879,7 @@ class ILStubState : public StubState pcsDispatch->EmitLoadThis(); pcsDispatch->EmitCALL(METHOD__GC__KEEP_ALIVE, 1, 0); } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) #ifdef VERIFY_HEAP if (SF_IsForwardStub(m_dwStubFlags) && g_pConfig->InteropValidatePinnedObjects()) @@ -972,7 +972,7 @@ class ILStubState : public StubState { // Struct marshal stubs don't actually call anything so they do not need the secrect parameter. } -#ifndef BIT64 +#ifndef HOST_64BIT else if (SF_IsForwardDelegateStub(m_dwStubFlags) || (SF_IsForwardCOMStub(m_dwStubFlags) && SF_IsWinRTDelegateStub(m_dwStubFlags))) { @@ -980,7 +980,7 @@ class ILStubState : public StubState // don't use the secret parameter. Except for AMD64 where we use the secret // argument to pass the real target to the stub-for-host. } -#endif // !BIT64 +#endif // !HOST_64BIT else { // All other IL stubs will need to use the secret parameter. @@ -1609,7 +1609,7 @@ class CLRToCOM_ILStubState : public ILStubState // convert 'this' to COM IP and the target method entry point m_slIL.EmitLoadRCWThis(pcsDispatch, m_dwStubFlags); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT if (SF_IsWinRTDelegateStub(m_dwStubFlags)) { // write the stub context (EEImplMethodDesc representing the Invoke) @@ -1622,7 +1622,7 @@ class CLRToCOM_ILStubState : public ILStubState pcsDispatch->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT, 0, 1); } else -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT { m_slIL.EmitLoadStubContext(pcsDispatch, dwStubFlags); } @@ -1879,7 +1879,7 @@ void NDirectStubLinker::SetCallingConvention(CorPinvokeMap unmngCallConv, BOOL f LIMITED_METHOD_CONTRACT; ULONG uNativeCallingConv = 0; -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) if (fIsVarArg) { // The JIT has to use a different calling convention for unmanaged vararg targets on 64-bit and ARM: @@ -1887,7 +1887,7 @@ void NDirectStubLinker::SetCallingConvention(CorPinvokeMap unmngCallConv, BOOL f uNativeCallingConv = CORINFO_CALLCONV_NATIVEVARARG; } else -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 { switch (unmngCallConv) { @@ -2404,15 +2404,15 @@ void NDirectStubLinker::DoNDirect(ILCodeStream *pcsEmit, DWORD dwStubFlags, Meth // get the delegate unmanaged target - we call a helper instead of just grabbing // the _methodPtrAux field because we may need to intercept the call for host, etc. pcsEmit->EmitLoadThis(); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // on AMD64 GetDelegateTarget will return address of the generic stub for host when we are hosted // and update the secret argument with real target - the secret arg will be embedded in the // InlinedCallFrame by the JIT and fetched via TLS->Thread->Frame->Datum by the stub for host pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT_ADDR, 0, 1); -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT // we don't need to do this on x86 because stub for host is generated dynamically per target pcsEmit->EmitLDNULL(); -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_DELEGATE_TARGET, 2, 1); } else // direct invocation @@ -2425,7 +2425,7 @@ void NDirectStubLinker::DoNDirect(ILCodeStream *pcsEmit, DWORD dwStubFlags, Meth // for managed-to-unmanaged CALLI that requires marshaling, the target is passed // as the secret argument to the stub by GenericPInvokeCalliHelper (asmhelpers.asm) EmitLoadStubContext(pcsEmit, dwStubFlags); -#ifdef BIT64 +#ifdef HOST_64BIT // the secret arg has been shifted to left and ORed with 1 (see code:GenericPInvokeCalliHelper) pcsEmit->EmitLDC(1); pcsEmit->EmitSHR_UN(); @@ -2970,7 +2970,7 @@ PInvokeStaticSigInfo::PInvokeStaticSigInfo(MethodDesc* pMD, ThrowOnError throwOn case nltUnicode: nlt = nltUnicode; break; case nltAuto: -#ifdef PLATFORM_WINDOWS +#ifdef TARGET_WINDOWS nlt = nltUnicode; #else nlt = nltAnsi; // We don't have a utf8 charset in metadata yet, but ANSI == UTF-8 off-Windows @@ -3112,7 +3112,7 @@ void PInvokeStaticSigInfo::DllImportInit(MethodDesc* pMD, LPCUTF8 *ppLibName, LP } else if (charSetMask == pmCharSetAuto) { -#ifdef PLATFORM_WINDOWS +#ifdef TARGET_WINDOWS SetCharSet(nltUnicode); #else SetCharSet(nltAnsi); // We don't have a utf8 charset in metadata yet, but ANSI == UTF-8 off-Windows @@ -3127,7 +3127,7 @@ void PInvokeStaticSigInfo::DllImportInit(MethodDesc* pMD, LPCUTF8 *ppLibName, LP #if !defined(CROSSGEN_COMPILE) // IJW // This function would work, but be unused on Unix. Ifdefing out to avoid build errors due to the unused function. -#if !defined (FEATURE_PAL) +#if !defined (TARGET_UNIX) static LPBYTE FollowIndirect(LPBYTE pTarget) { CONTRACT(LPBYTE) @@ -3145,12 +3145,12 @@ static LPBYTE FollowIndirect(LPBYTE pTarget) { AVInRuntimeImplOkayHolder AVOkay; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (pTarget != NULL && !(pTarget[0] != 0xff || pTarget[1] != 0x25)) { pRet = **(LPBYTE**)(pTarget + 2); } -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) if (pTarget != NULL && !(pTarget[0] != 0xff || pTarget[1] != 0x25)) { INT64 rva = *(INT32*)(pTarget + 2); @@ -3166,7 +3166,7 @@ static LPBYTE FollowIndirect(LPBYTE pTarget) RETURN pRet; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX BOOL HeuristicDoesThisLookLikeAGetLastErrorCall(LPBYTE pTarget) { @@ -3178,7 +3178,7 @@ BOOL HeuristicDoesThisLookLikeAGetLastErrorCall(LPBYTE pTarget) } CONTRACTL_END; -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) static LPBYTE pGetLastError = NULL; if (!pGetLastError) { @@ -3213,7 +3213,7 @@ BOOL HeuristicDoesThisLookLikeAGetLastErrorCall(LPBYTE pTarget) // jmp [xxxx] - could be an import thunk return pTarget2 == pGetLastError; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return FALSE; } @@ -3266,11 +3266,11 @@ void PInvokeStaticSigInfo::BestGuessNDirectDefaults(MethodDesc* pMD) inline CorPinvokeMap GetDefaultCallConv(BOOL bIsVarArg) { -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX return pmCallConvCdecl; -#else // PLATFORM_UNIX +#else // TARGET_UNIX return bIsVarArg ? pmCallConvCdecl : pmCallConvStdcall; -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX } void PInvokeStaticSigInfo::InitCallConv(CorPinvokeMap callConv, BOOL bIsVarArg) @@ -3756,13 +3756,13 @@ static MarshalInfo::MarshalType DoMarshalReturnValue(MetaSig& msig, static inline UINT GetStackOffsetFromStackSize(UINT stackSize, bool fThisCall) { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (fThisCall) { // -1 means that the argument is not on the stack return (stackSize >= sizeof(SLOT) ? (stackSize - sizeof(SLOT)) : (UINT)-1); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 return stackSize; } @@ -3971,7 +3971,7 @@ static void CreateNDirectStubWorker(StubState* pss, { // We cannot just use pSig.GetReturnType() here since it will return ELEMENT_TYPE_VALUETYPE for enums. bool isReturnTypeValueType = msig.GetRetTypeHandleThrowing().GetVerifierCorElementType() == ELEMENT_TYPE_VALUETYPE; -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) // JIT32 has problems in generating code for pinvoke ILStubs which do a return in return buffer. // Therefore instead we change the signature of calli to return void and make the return buffer as first // argument. This matches the ABI i.e. return buffer is passed as first arg. So native target will get the @@ -3982,15 +3982,15 @@ static void CreateNDirectStubWorker(StubState* pss, #ifdef UNIX_X86_ABI // For functions with value type class, managed and unmanaged calling convention differ fMarshalReturnValueFirst = HasRetBuffArgUnmanagedFixup(&msig); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) fMarshalReturnValueFirst = HasRetBuffArg(&msig); #else // On Windows-X86, the native signature might need a return buffer when the managed doesn't (specifically when the native signature is a member function). fMarshalReturnValueFirst = HasRetBuffArg(&msig) || (isInstanceMethod && isReturnTypeValueType); #endif // UNIX_X86_ABI -#elif defined(_TARGET_AMD64_) || defined (_TARGET_ARM64_) +#elif defined(TARGET_AMD64) || defined (TARGET_ARM64) fMarshalReturnValueFirst = isInstanceMethod && isReturnTypeValueType; -#endif // defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#endif // defined(TARGET_X86) || defined(TARGET_ARM) #ifdef _WIN32 fReverseWithReturnBufferArg = fMarshalReturnValueFirst && SF_IsReverseStub(dwStubFlags); #endif @@ -4249,13 +4249,13 @@ static void CreateNDirectStubWorker(StubState* pss, // to sharing we come here only for the first call with given signature and // the target MD may even be NULL. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (fThisCall) { _ASSERTE(nativeStackSize >= sizeof(SLOT)); nativeStackSize -= sizeof(SLOT); } -#else // _TARGET_X86_ +#else // TARGET_X86 // // The algorithm to compute nativeStackSize on the fly is x86-specific. // Recompute the correct size for other platforms from the stub signature. @@ -4271,7 +4271,7 @@ static void CreateNDirectStubWorker(StubState* pss, { // native stack size is updated in code:ILStubState.SwapStubSignatures } -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (!FitsInU2(nativeStackSize)) COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX); @@ -4309,7 +4309,7 @@ static CorNativeLinkType GetLinkTypeOfMethodTable(MethodTable* pMT) } else if (IsTdAutoClass(clFlags)) { -#ifdef PLATFORM_WINDOWS +#ifdef TARGET_WINDOWS nltType = nltUnicode; #else nltType = nltAnsi; // We don't have a utf8 charset in metadata yet, but ANSI == UTF-8 off-Windows @@ -4838,7 +4838,7 @@ void NDirect::PopulateNDirectMethodDesc(NDirectMethodDesc* pNMD, PInvokeStaticSi pNMD->ndirect.m_pszEntrypointName.SetValueMaybeNull(szEntryPointName); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (ndirectflags & NDirectMethodDesc::kStdCall) { // Compute the kStdCallWithRetBuf flag which is needed at link time for entry point mangling. @@ -4853,7 +4853,7 @@ void NDirect::PopulateNDirectMethodDesc(NDirectMethodDesc* pNMD, PInvokeStaticSi } } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Call this exactly ONCE per thread. Do not publish incomplete prestub flags // or you will introduce a race condition. @@ -5441,7 +5441,7 @@ MethodDesc* CreateInteropILStub( ilStubCreatorHelper.SuppressRelease(); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (SF_IsForwardStub(dwStubFlags) && pTargetMD != NULL && !pTargetMD->IsVarArg()) { // copy the stack arg byte count from the stub MD to the target MD - this number is computed @@ -5472,7 +5472,7 @@ MethodDesc* CreateInteropILStub( } #endif // FEATURE_COMINTEROP } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) RETURN pStubMD; } @@ -6239,7 +6239,7 @@ class LoadLibErrorTracker DWORD priority; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX SetMessage(PAL_GetLoadLibraryError()); #else @@ -6292,7 +6292,7 @@ class LoadLibErrorTracker #if defined(__APPLE__) COMPlusThrow(kDllNotFoundException, IDS_EE_NDIRECT_LOADLIB_MAC, libraryNameOrPath.GetUnicode(), GetMessage()); -#elif defined(FEATURE_PAL) +#elif defined(TARGET_UNIX) COMPlusThrow(kDllNotFoundException, IDS_EE_NDIRECT_LOADLIB_LINUX, libraryNameOrPath.GetUnicode(), GetMessage()); #else // __APPLE__ HRESULT theHRESULT = GetHR(); @@ -6306,7 +6306,7 @@ class LoadLibErrorTracker GetHRMsg(theHRESULT, hrString); COMPlusThrow(kDllNotFoundException, IDS_EE_NDIRECT_LOADLIB_WIN, libraryNameOrPath.GetUnicode(), hrString); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX __UNREACHABLE(); } @@ -6340,7 +6340,7 @@ static NATIVE_LIBRARY_HANDLE LocalLoadLibraryHelper( LPCWSTR name, DWORD flags, NATIVE_LIBRARY_HANDLE hmod = NULL; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if ((flags & 0xFFFFFF00) != 0) { @@ -6360,9 +6360,9 @@ static NATIVE_LIBRARY_HANDLE LocalLoadLibraryHelper( LPCWSTR name, DWORD flags, hmod = CLRLoadLibraryEx(name, NULL, flags & 0xFF); -#else // !FEATURE_PAL +#else // !TARGET_UNIX hmod = PAL_LoadLibraryDirect(name); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (hmod == NULL) { @@ -6375,13 +6375,13 @@ static NATIVE_LIBRARY_HANDLE LocalLoadLibraryHelper( LPCWSTR name, DWORD flags, #define TOLOWER(a) (((a) >= W('A') && (a) <= W('Z')) ? (W('a') + (a - W('A'))) : (a)) #define TOHEX(a) ((a)>=10 ? W('a')+(a)-10 : W('0')+(a)) -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #define PLATFORM_SHARED_LIB_SUFFIX_W PAL_SHLIB_SUFFIX_W #define PLATFORM_SHARED_LIB_PREFIX_W PAL_SHLIB_PREFIX_W -#else // !FEATURE_PAL +#else // !TARGET_UNIX #define PLATFORM_SHARED_LIB_SUFFIX_W W(".dll") #define PLATFORM_SHARED_LIB_PREFIX_W W("") -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // The Bit 0x2 has different semantics in DllImportSearchPath and LoadLibraryExA flags. // In DllImportSearchPath enum, bit 0x2 represents SearchAssemblyDirectory -- which is performed by CLR. @@ -6521,11 +6521,11 @@ void NDirect::FreeNativeLibrary(NATIVE_LIBRARY_HANDLE handle) STANDARD_VM_CONTRACT; _ASSERTE(handle != NULL); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL retVal = FreeLibrary(handle); -#else // !FEATURE_PAL +#else // !TARGET_UNIX BOOL retVal = PAL_FreeLibraryDirect(handle); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (retVal == 0) COMPlusThrow(kInvalidOperationException, W("Arg_InvalidOperationException")); @@ -6544,20 +6544,20 @@ INT_PTR NDirect::GetNativeLibraryExport(NATIVE_LIBRARY_HANDLE handle, LPCWSTR sy MAKE_UTF8PTR_FROMWIDE(lpstr, symbolName); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX INT_PTR address = reinterpret_cast(GetProcAddress((HMODULE)handle, lpstr)); if ((address == NULL) && throwOnError) COMPlusThrow(kEntryPointNotFoundException, IDS_EE_NDIRECT_GETPROCADDR_WIN_DLL, symbolName); -#else // !FEATURE_PAL +#else // !TARGET_UNIX INT_PTR address = reinterpret_cast(PAL_GetProcAddressDirect(handle, lpstr)); if ((address == NULL) && throwOnError) COMPlusThrow(kEntryPointNotFoundException, IDS_EE_NDIRECT_GETPROCADDR_UNIX_SO, symbolName); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return address; } -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX BOOL IsWindowsAPISet(PCWSTR wszLibName) { STANDARD_VM_CONTRACT; @@ -6566,7 +6566,7 @@ BOOL IsWindowsAPISet(PCWSTR wszLibName) return SString::_wcsnicmp(wszLibName, W("api-"), 4) == 0 || SString::_wcsnicmp(wszLibName, W("ext-"), 4) == 0; } -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX // static NATIVE_LIBRARY_HANDLE NDirect::LoadLibraryModuleViaHost(NDirectMethodDesc * pMD, PCWSTR wszLibName) @@ -6575,13 +6575,13 @@ NATIVE_LIBRARY_HANDLE NDirect::LoadLibraryModuleViaHost(NDirectMethodDesc * pMD, //Dynamic Pinvoke Support: //Check if we need to provide the host a chance to provide the unmanaged dll -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX if (IsWindowsAPISet(wszLibName)) { // Prevent Overriding of Windows API sets. return NULL; } -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX NATIVE_LIBRARY_HANDLE hmod = NULL; AppDomain* pDomain = GetAppDomain(); @@ -6823,7 +6823,7 @@ NATIVE_LIBRARY_HANDLE NDirect::LoadFromNativeDllSearchDirectories(LPCWSTR libNam return hmod; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX static const int MaxVariationCount = 4; static void DetermineLibNameVariations(const WCHAR** libNameVariations, int* numberOfVariations, const SString& libName, bool libNameIsRelativePath) { @@ -6884,7 +6884,7 @@ static void DetermineLibNameVariations(const WCHAR** libNameVariations, int* num *numberOfVariations = varCount; } -#else // FEATURE_PAL +#else // TARGET_UNIX static const int MaxVariationCount = 2; static void DetermineLibNameVariations(const WCHAR** libNameVariations, int* numberOfVariations, const SString& libName, bool libNameIsRelativePath) { @@ -6921,7 +6921,7 @@ static void DetermineLibNameVariations(const WCHAR** libNameVariations, int* num *numberOfVariations = varCount; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Search for the library and variants of its name in probing directories. //static @@ -6933,7 +6933,7 @@ NATIVE_LIBRARY_HANDLE NDirect::LoadLibraryModuleBySearch(Assembly *callingAssemb NATIVE_LIBRARY_HANDLE hmod = NULL; -#if defined(FEATURE_CORESYSTEM) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_CORESYSTEM) && !defined(TARGET_UNIX) // Try to go straight to System32 for Windows API sets. This is replicating quick check from // the OS implementation of api sets. if (IsWindowsAPISet(wszLibName)) @@ -6944,7 +6944,7 @@ NATIVE_LIBRARY_HANDLE NDirect::LoadLibraryModuleBySearch(Assembly *callingAssemb return hmod; } } -#endif // FEATURE_CORESYSTEM && !FEATURE_PAL +#endif // FEATURE_CORESYSTEM && !TARGET_UNIX AppDomain* pDomain = GetAppDomain(); DWORD loadWithAlteredPathFlags = GetLoadWithAlteredSearchPathFlag(); @@ -7170,7 +7170,7 @@ VOID NDirect::NDirectLink(NDirectMethodDesc *pMD) wszEPName[0] = W('?'); wszEPName[1] = W('\0'); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX COMPlusThrow(kEntryPointNotFoundException, IDS_EE_NDIRECT_GETPROCADDRESS_UNIX, ssLibName.GetUnicode(), wszEPName); #else COMPlusThrow(kEntryPointNotFoundException, IDS_EE_NDIRECT_GETPROCADDRESS_WIN, ssLibName.GetUnicode(), wszEPName); diff --git a/src/coreclr/src/vm/dllimportcallback.cpp b/src/coreclr/src/vm/dllimportcallback.cpp index 2f6c009d052b5..dd41b4d575753 100644 --- a/src/coreclr/src/vm/dllimportcallback.cpp +++ b/src/coreclr/src/vm/dllimportcallback.cpp @@ -105,7 +105,7 @@ class UMEntryThunkFreeList static UMEntryThunkFreeList s_thunkFreeList(DEFAULT_THUNK_FREE_LIST_THRESHOLD); -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) EXTERN_C VOID __cdecl UMThunkStubRareDisable(); EXTERN_C Thread* __stdcall CreateThreadBlockThrow(); @@ -754,7 +754,7 @@ Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStat return pcpusl->Link(pLoaderHeap); } -#else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#else // TARGET_X86 && !FEATURE_STUBS_AS_IL PCODE UMThunkMarshInfo::GetExecStubEntryPoint() { @@ -763,7 +763,7 @@ PCODE UMThunkMarshInfo::GetExecStubEntryPoint() return GetEEFuncEntryPoint(UMThunkStub); } -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL UMEntryThunkCache::UMEntryThunkCache(AppDomain *pDomain) : m_crst(CrstUMEntryThunkCache), @@ -1038,7 +1038,7 @@ UMThunkMarshInfo::~UMThunkMarshInfo() } CONTRACTL_END; -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) if (m_pExecStub) m_pExecStub->DecRef(); #endif @@ -1100,7 +1100,7 @@ VOID UMThunkMarshInfo::LoadTimeInit(Signature sig, Module * pModule, MethodDesc m_pModule = pModule; m_sig = sig; -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) INDEBUG(m_cbRetPop = 0xcccc;) #endif } @@ -1143,7 +1143,7 @@ VOID UMThunkMarshInfo::RunTimeInit() pFinalILStub = GetStubForInteropMethod(pMD, dwStubFlags, &pStubMD); } -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) PInvokeStaticSigInfo sigInfo; if (pMD != NULL) @@ -1190,7 +1190,7 @@ VOID UMThunkMarshInfo::RunTimeInit() pFinalExecStub->DecRef(); } -#else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#else // TARGET_X86 && !FEATURE_STUBS_AS_IL if (pFinalILStub == NULL) { @@ -1223,7 +1223,7 @@ VOID UMThunkMarshInfo::RunTimeInit() } } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) MetaSig sig(pMD); int numRegistersUsed = 0; UINT16 cbRetPop = 0; @@ -1277,22 +1277,22 @@ VOID UMThunkMarshInfo::RunTimeInit() // For all the other calling convention except cdecl, callee pops the stack arguments m_cbRetPop = cbRetPop + static_cast(m_cbActualArgSize); } -#else // _TARGET_X86_ +#else // TARGET_X86 // // m_cbActualArgSize gets the number of arg bytes for the NATIVE signature // m_cbActualArgSize = (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : pMD->SizeOfArgStack(); -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL // Must be the last thing we set! InterlockedCompareExchangeT(&m_pILStub, pFinalILStub, (PCODE)1); } -#if defined(_TARGET_X86_) && defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && defined(FEATURE_STUBS_AS_IL) VOID UMThunkMarshInfo::SetupArguments(char *pSrc, ArgumentRegisters *pArgRegs, char *pDst) { MethodDesc *pMD = GetMethod(); @@ -1370,7 +1370,7 @@ EXTERN_C VOID STDCALL UMThunkStubSetupArgumentsWorker(UMThunkMarshInfo *pMarshIn { pMarshInfo->SetupArguments(pSrc, pArgRegs, pDst); } -#endif // _TARGET_X86_ && FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && FEATURE_STUBS_AS_IL #ifdef _DEBUG void STDCALL LogUMTransition(UMEntryThunk* thunk) diff --git a/src/coreclr/src/vm/dllimportcallback.h b/src/coreclr/src/vm/dllimportcallback.h index 6fe9e6441f1a2..0b3414ffc1696 100644 --- a/src/coreclr/src/vm/dllimportcallback.h +++ b/src/coreclr/src/vm/dllimportcallback.h @@ -23,10 +23,10 @@ enum UMThunkStubFlags umtmlThisCall = 0x0002, umtmlThisCallHiddenArg = 0x0004, umtmlFpu = 0x0008, -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // the signature is trivial so stub need not be generated and the target can be called directly umtmlSkipStub = 0x0080, -#endif // _TARGET_X86_ +#endif // TARGET_X86 }; #include @@ -109,7 +109,7 @@ class UMThunkMarshInfo return m_pMD; } -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) PCODE GetExecStubEntryPoint() { WRAPPER_NO_CONTRACT; @@ -198,13 +198,13 @@ class UMThunkMarshInfo return (UINT32)offsetof(UMThunkMarshInfo, m_pILStub); } -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) // Compiles an unmanaged to managed thunk for the given signature. The thunk // will call the stub or, if fNoStub == TRUE, directly the managed target. Stub *CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStaticSigInfo* pSigInfo, MetaSig *pMetaSig, BOOL fNoStub); -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL -#if defined(_TARGET_X86_) && defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && defined(FEATURE_STUBS_AS_IL) struct ArgumentRegisters { UINT32 Ecx; @@ -212,7 +212,7 @@ class UMThunkMarshInfo }; VOID SetupArguments(char *pSrc, ArgumentRegisters *pArgRegs, char *pDst); -#endif // _TARGET_X86_ && FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && FEATURE_STUBS_AS_IL private: PCODE m_pILStub; // IL stub for marshaling @@ -220,7 +220,7 @@ class UMThunkMarshInfo // On non-x86, the managed entrypoint for no-delegate no-marshal signatures UINT32 m_cbActualArgSize; // caches m_pSig.SizeOfFrameArgumentArray() // On x86/Linux we have to augment with numRegistersUsed * STACK_ELEM_SIZE -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) UINT16 m_cbRetPop; // stack bytes popped by callee (for UpdateRegDisplay) #if defined(FEATURE_STUBS_AS_IL) UINT32 m_cbStackArgSize; // stack bytes pushed for managed code @@ -228,7 +228,7 @@ class UMThunkMarshInfo Stub* m_pExecStub; // UMEntryThunk jumps directly here UINT16 m_callConv; // unmanaged calling convention and flags (CorPinvokeMap) #endif // FEATURE_STUBS_AS_IL -#endif // _TARGET_X86_ +#endif // TARGET_X86 MethodDesc * m_pMD; // maybe null Module * m_pModule; @@ -264,7 +264,7 @@ class UMEntryThunk static UMEntryThunk* CreateUMEntryThunk(); static VOID FreeUMEntryThunk(UMEntryThunk* p); -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) // Compiles an unmanaged to managed thunk with the given calling convention adaptation. // - psrcofsregs are stack offsets that should be loaded to argument registers (ECX, EDX) // - psrcofs are stack offsets that should be repushed for the managed target @@ -279,7 +279,7 @@ class UMEntryThunk UINT *psrcofsregs, UINT *psrcofs, UINT retbufofs); -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL #ifndef DACCESS_COMPILE VOID LoadTimeInit(PCODE pManagedTarget, @@ -538,20 +538,20 @@ class UMEntryThunkCache AppDomain *m_pDomain; }; -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) //------------------------------------------------------------------------- // One-time creation of special prestub to initialize UMEntryThunks. //------------------------------------------------------------------------- Stub *GenerateUMThunkPrestub(); -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL //------------------------------------------------------------------------- // NExport stub //------------------------------------------------------------------------- -#if !defined(BIT64) && !defined(DACCESS_COMPILE) && !defined(CROSS_COMPILE) +#if !defined(HOST_64BIT) && !defined(DACCESS_COMPILE) && !defined(CROSS_COMPILE) EXCEPTION_HANDLER_DECL(FastNExportExceptHandler); EXCEPTION_HANDLER_DECL(UMThunkPrestubHandler); -#endif // BIT64 +#endif // HOST_64BIT extern "C" void TheUMEntryPrestub(void); extern "C" PCODE TheUMEntryPrestubWorker(UMEntryThunk * pUMEntryThunk); diff --git a/src/coreclr/src/vm/domainfile.cpp b/src/coreclr/src/vm/domainfile.cpp index 0a30958815194..2adb813cf16ef 100644 --- a/src/coreclr/src/vm/domainfile.cpp +++ b/src/coreclr/src/vm/domainfile.cpp @@ -1641,7 +1641,7 @@ void GetNGenCpuInfo(CORINFO_CPU * cpuInfo) { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 static CORINFO_CPU ngenCpuInfo = { @@ -1653,11 +1653,11 @@ void GetNGenCpuInfo(CORINFO_CPU * cpuInfo) // We always generate P3-compatible code on CoreCLR *cpuInfo = ngenCpuInfo; -#else // _TARGET_X86_ +#else // TARGET_X86 cpuInfo->dwCPUType = 0; cpuInfo->dwFeatures = 0; cpuInfo->dwExtendedFeatures = 0; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } // -------------------------------------------------------------------------------- @@ -1681,7 +1681,7 @@ void DomainAssembly::GetCurrentVersionInfo(CORCOMPILE_VERSION_INFO *pNativeVersi &fForceProfiling, &fForceInstrument); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pNativeVersionInfo->wOSPlatformID = VER_PLATFORM_WIN32_NT; #else pNativeVersionInfo->wOSPlatformID = VER_PLATFORM_UNIX; diff --git a/src/coreclr/src/vm/dumpdiagnosticprotocolhelper.cpp b/src/coreclr/src/vm/dumpdiagnosticprotocolhelper.cpp index 032b94f825d0c..e4bc7d527d8ec 100644 --- a/src/coreclr/src/vm/dumpdiagnosticprotocolhelper.cpp +++ b/src/coreclr/src/vm/dumpdiagnosticprotocolhelper.cpp @@ -10,7 +10,7 @@ #ifdef FEATURE_PERFTRACING -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX void DumpDiagnosticProtocolHelper::HandleIpcMessage(DiagnosticsIpc::IpcMessage& message, IpcStream* pStream) { @@ -115,6 +115,6 @@ void DumpDiagnosticProtocolHelper::GenerateCoreDump(DiagnosticsIpc::IpcMessage& delete pStream; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX #endif // FEATURE_PERFTRACING diff --git a/src/coreclr/src/vm/dumpdiagnosticprotocolhelper.h b/src/coreclr/src/vm/dumpdiagnosticprotocolhelper.h index a5e0aef89deed..47be69d02625f 100644 --- a/src/coreclr/src/vm/dumpdiagnosticprotocolhelper.h +++ b/src/coreclr/src/vm/dumpdiagnosticprotocolhelper.h @@ -41,7 +41,7 @@ class DumpDiagnosticProtocolHelper { public: // IPC event handlers. -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX static void GenerateCoreDump(DiagnosticsIpc::IpcMessage& message, IpcStream *pStream); // `dotnet-dump collect` static void HandleIpcMessage(DiagnosticsIpc::IpcMessage& message, IpcStream* pStream); #endif diff --git a/src/coreclr/src/vm/dynamicmethod.cpp b/src/coreclr/src/vm/dynamicmethod.cpp index b25dfb73e2774..da8cf538d8a39 100644 --- a/src/coreclr/src/vm/dynamicmethod.cpp +++ b/src/coreclr/src/vm/dynamicmethod.cpp @@ -456,7 +456,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) pHp->maxCodeHeapSize = m_TotalBytesAvailable - pTracker->size; pHp->reserveForJumpStubs = 0; -#ifdef BIT64 +#ifdef HOST_64BIT emitJump((LPBYTE)pHp->CLRPersonalityRoutine, (void *)ProcessCLRException); #endif @@ -986,10 +986,10 @@ void LCGMethodResolver::Destroy() if (m_recordCodePointer) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Remove the unwind information (if applicable) UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)m_recordCodePointer); -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) HostCodeHeap *pHeap = HostCodeHeap::GetCodeHeap((TADDR)m_recordCodePointer); LOG((LF_BCL, LL_INFO1000, "Level3 - Resolver {0x%p} - Release reference to heap {%p, vt(0x%x)} \n", this, pHeap, *(size_t*)pHeap)); diff --git a/src/coreclr/src/vm/ecall.cpp b/src/coreclr/src/vm/ecall.cpp index 7c3fbf2d0c341..4d0ad323f2307 100644 --- a/src/coreclr/src/vm/ecall.cpp +++ b/src/coreclr/src/vm/ecall.cpp @@ -501,7 +501,7 @@ PCODE ECall::GetFCallImpl(MethodDesc * pMD, BOOL * pfSharedOrDynamicFCallImpl /* // Use the ECFunc address as a unique fake entrypoint to make the entrypoint<->MethodDesc mapping work PCODE pImplementation = (PCODE)ret; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM pImplementation |= THUMB_CODE; #endif diff --git a/src/coreclr/src/vm/ecalllist.h b/src/coreclr/src/vm/ecalllist.h index 667f328e6e2c1..4f1b50c12058a 100644 --- a/src/coreclr/src/vm/ecalllist.h +++ b/src/coreclr/src/vm/ecalllist.h @@ -146,12 +146,12 @@ FCFuncStart(gDiagnosticsStackTrace) FCFuncEnd() FCFuncStart(gDateTimeFuncs) -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) FCFuncElement("GetSystemTimeWithLeapSecondsHandling", SystemNative::GetSystemTimeWithLeapSecondsHandling) FCFuncElement("ValidateSystemTime", SystemNative::ValidateSystemTime) FCFuncElement("FileTimeToSystemTime", SystemNative::FileTimeToSystemTime) FCFuncElement("SystemTimeToFileTime", SystemNative::SystemTimeToFileTime) -#endif // FEATURE_PAL +#endif // TARGET_UNIX FCFuncElement("GetSystemTimeAsFileTime", SystemNative::__GetSystemTimeAsFileTime) FCFuncEnd() @@ -503,7 +503,7 @@ FCFuncStart(gAssemblyLoadContextFuncs) #ifdef FEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION QCFuncElement("LoadTypeForWinRTTypeNameInContextInternal", AssemblyNative::LoadTypeForWinRTTypeNameInContext) #endif -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX QCFuncElement("LoadFromInMemoryModuleInternal", AssemblyNative::LoadFromInMemoryModule) #endif QCFuncElement("GetLoadContextForAssembly", AssemblyNative::GetLoadContextForAssembly) @@ -1037,9 +1037,9 @@ FCFuncStart(gStubHelperFuncs) FCFuncElement("ValidateByref", StubHelpers::ValidateByref) FCFuncElement("LogPinnedArgument", StubHelpers::LogPinnedArgument) FCIntrinsic("GetStubContext", StubHelpers::GetStubContext, CORINFO_INTRINSIC_StubHelpers_GetStubContext) -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT FCIntrinsic("GetStubContextAddr", StubHelpers::GetStubContextAddr, CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr) -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT #ifdef FEATURE_ARRAYSTUB_AS_IL FCFuncElement("ArrayTypeCheck", StubHelpers::ArrayTypeCheck) #endif //FEATURE_ARRAYSTUB_AS_IL @@ -1115,7 +1115,7 @@ FCFuncStart(gWeakReferenceOfTFuncs) FCFuncElement("IsTrackResurrection", WeakReferenceOfTNative::IsTrackResurrection) FCFuncEnd() -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX FCFuncStart(gPalKernel32Funcs) QCFuncElement("CloseHandle", CloseHandle) QCFuncElement("CreateEvent", CreateEventW) @@ -1234,7 +1234,7 @@ FCClassElement("IReflect", "System.Reflection", gStdMngIReflectFuncs) FCClassElement("InterfaceMarshaler", "System.StubHelpers", gInterfaceMarshalerFuncs) #endif FCClassElement("Interlocked", "System.Threading", gInterlockedFuncs) -#if FEATURE_PAL +#if TARGET_UNIX FCClassElement("Kernel32", "", gPalKernel32Funcs) #endif FCClassElement("LoaderAllocatorScout", "System.Reflection", gLoaderAllocatorFuncs) @@ -1264,7 +1264,7 @@ FCClassElement("Object", "System", gObjectFuncs) #ifdef FEATURE_COMINTEROP FCClassElement("ObjectMarshaler", "System.StubHelpers", gObjectMarshalerFuncs) #endif -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX FCClassElement("Ole32", "", gPalOle32Funcs) FCClassElement("OleAut32", "", gPalOleAut32Funcs) #endif diff --git a/src/coreclr/src/vm/eeconfig.cpp b/src/coreclr/src/vm/eeconfig.cpp index 909e9c687fd46..597c4476165b8 100644 --- a/src/coreclr/src/vm/eeconfig.cpp +++ b/src/coreclr/src/vm/eeconfig.cpp @@ -281,11 +281,11 @@ HRESULT EEConfig::Init() pZapSet = DEFAULT_ZAP_SET; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) dwDisableStackwalkCache = 0; -#else // _TARGET_X86_ +#else // TARGET_X86 dwDisableStackwalkCache = 1; -#endif // _TARGET_X86_ +#endif // TARGET_X86 szZapBBInstr = NULL; szZapBBInstrDir = NULL; @@ -320,7 +320,7 @@ HRESULT EEConfig::Init() fSuppressLockViolationsOnReentryFromOS = false; #endif -#if defined(_DEBUG) && defined(_TARGET_AMD64_) +#if defined(_DEBUG) && defined(TARGET_AMD64) // For determining if we should force generation of long jump dispatch stubs. m_cGenerateLongJumpDispatchStubRatio = (size_t)(-1); m_cDispatchStubsGenerated = 0; @@ -804,7 +804,7 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ #endif //STRESS_HEAP -#ifdef BIT64 +#ifdef HOST_64BIT iGCAffinityMask = GetConfigULONGLONG_DontUse_(CLRConfig::EXTERNAL_GCHeapAffinitizeMask, iGCAffinityMask); if (!iGCAffinityMask) iGCAffinityMask = Configuration::GetKnobULONGLONGValue(W("System.GC.HeapAffinitizeMask")); if (!iGCSegmentSize) iGCSegmentSize = GetConfigULONGLONG_DontUse_(CLRConfig::UNSUPPORTED_GCSegmentSize, iGCSegmentSize); @@ -814,7 +814,7 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ if (!iGCAffinityMask) iGCAffinityMask = Configuration::GetKnobDWORDValue(W("System.GC.HeapAffinitizeMask"), 0); if (!iGCSegmentSize) iGCSegmentSize = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCSegmentSize, iGCSegmentSize); if (!iGCgen0size) iGCgen0size = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCgen0size, iGCgen0size); -#endif //BIT64 +#endif //HOST_64BIT const ULONGLONG ullHeapHardLimit = Configuration::GetKnobULONGLONGValue(W("System.GC.HeapHardLimit")); iGCHeapHardLimit = FitsIn(ullHeapHardLimit) @@ -846,7 +846,7 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ iGCConservative = (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_gcConservative) != 0); #endif // FEATURE_CONSERVATIVE_GC -#ifdef BIT64 +#ifdef HOST_64BIT iGCAllowVeryLargeObjects = (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_gcAllowVeryLargeObjects) != 0); #endif @@ -998,7 +998,7 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ iJitOptimizeType = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_JitOptimizeType, iJitOptimizeType); if (iJitOptimizeType > OPT_RANDOM) iJitOptimizeType = OPT_DEFAULT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 fPInvokeRestoreEsp = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_Jit_NetFx40PInvokeStackResilience); #endif @@ -1176,7 +1176,7 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ } -#if defined(_DEBUG) && defined(_TARGET_AMD64_) +#if defined(_DEBUG) && defined(TARGET_AMD64) m_cGenerateLongJumpDispatchStubRatio = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_GenerateLongJumpDispatchStubRatio, static_cast(m_cGenerateLongJumpDispatchStubRatio)); #endif @@ -1222,11 +1222,11 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ tieredCompilation_CallCountingDelayMs = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_CallCountingDelayMs); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX bool hadSingleProcessorAtStartup = CPUGroupInfo::HadSingleProcessorAtStartup(); -#else // !FEATURE_PAL +#else // !TARGET_UNIX bool hadSingleProcessorAtStartup = g_SystemInfo.dwNumberOfProcessors == 1; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (hadSingleProcessorAtStartup) { DWORD delayMultiplier = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TC_DelaySingleProcMultiplier); diff --git a/src/coreclr/src/vm/eeconfig.h b/src/coreclr/src/vm/eeconfig.h index e3772cb46d190..8f3e19e5947a7 100644 --- a/src/coreclr/src/vm/eeconfig.h +++ b/src/coreclr/src/vm/eeconfig.h @@ -626,7 +626,7 @@ class EEConfig #ifdef FEATURE_CONSERVATIVE_GC bool GetGCConservative() const {LIMITED_METHOD_CONTRACT; return iGCConservative;} #endif -#ifdef BIT64 +#ifdef HOST_64BIT bool GetGCAllowVeryLargeObjects() const {LIMITED_METHOD_CONTRACT; return iGCAllowVeryLargeObjects;} #endif #ifdef _DEBUG @@ -931,9 +931,9 @@ class EEConfig #ifdef FEATURE_CONSERVATIVE_GC bool iGCConservative; #endif // FEATURE_CONSERVATIVE_GC -#ifdef BIT64 +#ifdef HOST_64BIT bool iGCAllowVeryLargeObjects; -#endif // BIT64 +#endif // HOST_64BIT bool fGCBreakOnOOM; @@ -1063,7 +1063,7 @@ class EEConfig #endif #if defined(_DEBUG) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) private: // Defaults to 0, which means we will not generate long jump dispatch stubs. @@ -1088,7 +1088,7 @@ class EEConfig { return FALSE; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif // _DEBUG #if defined(_DEBUG) diff --git a/src/coreclr/src/vm/eehash.h b/src/coreclr/src/vm/eehash.h index 536058e510ba5..20a1c3494ac5a 100644 --- a/src/coreclr/src/vm/eehash.h +++ b/src/coreclr/src/vm/eehash.h @@ -332,7 +332,7 @@ class EEPtrPlusIntHashTableHelper LIMITED_METHOD_CONTRACT; return (DWORD)ppiKey.iValue ^ -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 (DWORD)(size_t) ppiKey.pValue; #else // IA64: Is this a good hashing mechanism on IA64? @@ -529,7 +529,7 @@ class EEPtrHashTableHelper LIMITED_METHOD_CONTRACT; SUPPORTS_DAC; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 return (DWORD)(size_t) dac_cast(pKey); #else // IA64: Is this a good hashing mechanism on IA64? diff --git a/src/coreclr/src/vm/eepolicy.cpp b/src/coreclr/src/vm/eepolicy.cpp index 8bf682327a47c..e1de20a54b25c 100644 --- a/src/coreclr/src/vm/eepolicy.cpp +++ b/src/coreclr/src/vm/eepolicy.cpp @@ -21,9 +21,9 @@ #include "typestring.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "dwreport.h" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #include "eventtrace.h" #undef ExitProcess @@ -505,7 +505,7 @@ void SafeExitProcess(UINT exitCode, BOOL fAbort = FALSE, ShutdownCompleteAction // Watson code CONTRACT_VIOLATION(ThrowsViolation); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (fAbort) { TerminateProcess(GetCurrentProcess(), exitCode); @@ -888,7 +888,7 @@ void EEPolicy::LogFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage GetClrInstanceId()); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Write an event log entry. We do allocate some resources here (spread between the stack and maybe the heap for longer // messages), so it's possible for the event write to fail. If needs be we can use a more elaborate scheme here in the future // (maybe trying multiple approaches and backing off on failure, falling back on a limited size static buffer as a last @@ -958,7 +958,7 @@ void EEPolicy::LogFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage { } EX_END_CATCH(SwallowAllExceptions) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef _DEBUG // If we're native-only (Win32) debugging this process, we'd love to break now. @@ -1107,7 +1107,7 @@ void DECLSPEC_NORETURN EEPolicy::HandleFatalStackOverflow(EXCEPTION_POINTERS *pE fef.InitAndLink(pExceptionInfo->ContextRecord); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (IsWatsonEnabled() && (g_pDebugInterface != NULL)) { _ASSERTE(pExceptionInfo != NULL); @@ -1117,7 +1117,7 @@ void DECLSPEC_NORETURN EEPolicy::HandleFatalStackOverflow(EXCEPTION_POINTERS *pE param.pExceptionRecord = pExceptionInfo->ExceptionRecord; g_pDebugInterface->RequestFavor(ResetWatsonBucketsFavorWorker, reinterpret_cast(¶m)); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX WatsonLastChance(pThread, pExceptionInfo, (fTreatAsNativeUnhandledException == FALSE)? TypeOfReportedError::UnhandledException: TypeOfReportedError::NativeThreadUnhandledException); @@ -1127,7 +1127,7 @@ void DECLSPEC_NORETURN EEPolicy::HandleFatalStackOverflow(EXCEPTION_POINTERS *pE UNREACHABLE(); } -#if defined(_TARGET_X86_) && defined(PLATFORM_WINDOWS) +#if defined(TARGET_X86) && defined(TARGET_WINDOWS) // This noinline method is required to ensure that RtlCaptureContext captures // the context of HandleFatalError. On x86 RtlCaptureContext will not capture // the current method's context @@ -1141,7 +1141,7 @@ int NOINLINE WrapperClrCaptureContext(CONTEXT* context) return 0; } #pragma optimize("", on) -#endif // defined(_TARGET_X86_) && defined(PLATFORM_WINDOWS) +#endif // defined(TARGET_X86) && defined(TARGET_WINDOWS) // This method must return a value to avoid getting non-actionable dumps on x86. // If this method were a DECLSPEC_NORETURN then dumps would not provide the necessary @@ -1164,10 +1164,10 @@ int NOINLINE EEPolicy::HandleFatalError(UINT exitCode, UINT_PTR address, LPCWSTR ZeroMemory(&context, sizeof(context)); context.ContextFlags = CONTEXT_CONTROL; -#if defined(_TARGET_X86_) && defined(PLATFORM_WINDOWS) +#if defined(TARGET_X86) && defined(TARGET_WINDOWS) // Add a frame to ensure that the context captured is this method and not the caller WrapperClrCaptureContext(&context); -#else // defined(_TARGET_X86_) && defined(PLATFORM_WINDOWS) +#else // defined(TARGET_X86) && defined(TARGET_WINDOWS) ClrCaptureContext(&context); #endif @@ -1265,9 +1265,9 @@ void EEPolicy::HandleCodeContractFailure(LPCWSTR pMessage, LPCWSTR pCondition, L // Since we have no exception object, make sure // UE tracker is clean so that RetrieveManagedBucketParameters // does not take any bucket details. -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pThread->GetExceptionState()->GetUEWatsonBucketTracker()->ClearWatsonBucketDetails(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX pPolicy->HandleFatalError(COR_E_CODECONTRACTFAILED, NULL, pMessage); break; } diff --git a/src/coreclr/src/vm/eetoprofinterfaceimpl.cpp b/src/coreclr/src/vm/eetoprofinterfaceimpl.cpp index f0ce71bb23f8a..bdd615a6094ec 100644 --- a/src/coreclr/src/vm/eetoprofinterfaceimpl.cpp +++ b/src/coreclr/src/vm/eetoprofinterfaceimpl.cpp @@ -979,7 +979,7 @@ EEToProfInterfaceImpl::~EEToProfInterfaceImpl() if (m_pSavedAllocDataBlock) { -#ifdef BIT64 +#ifdef HOST_64BIT _ASSERTE((UINT_PTR)m_pSavedAllocDataBlock != 0xFFFFFFFFFFFFFFFF); #else _ASSERTE((UINT_PTR)m_pSavedAllocDataBlock != 0xFFFFFFFF); @@ -5753,7 +5753,7 @@ HRESULT EEToProfInterfaceImpl::MovedReferences(GCReferencesData *pData) return hr; } -#ifdef BIT64 +#ifdef HOST_64BIT // Recompute sizes as ULONGs for legacy callback for (ULONG i = 0; i < pData->curIdx; i++) pData->arrULONG[i] = (pData->arrMemBlockSize[i] > UINT32_MAX) ? UINT32_MAX : (ULONG)pData->arrMemBlockSize[i]; @@ -5783,7 +5783,7 @@ HRESULT EEToProfInterfaceImpl::MovedReferences(GCReferencesData *pData) return hr; } -#ifdef BIT64 +#ifdef HOST_64BIT // Recompute sizes as ULONGs for legacy callback for (ULONG i = 0; i < pData->curIdx; i++) pData->arrULONG[i] = (pData->arrMemBlockSize[i] > UINT32_MAX) ? UINT32_MAX : (ULONG)pData->arrMemBlockSize[i]; diff --git a/src/coreclr/src/vm/eetwain.cpp b/src/coreclr/src/vm/eetwain.cpp index ad01bab3a9cc3..d55d22bdf8370 100644 --- a/src/coreclr/src/vm/eetwain.cpp +++ b/src/coreclr/src/vm/eetwain.cpp @@ -841,7 +841,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, // Grab a copy of the context before the EnC update. T_CONTEXT oldCtx = *pCtx; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) LOG((LF_CORDB, LL_INFO100, "EECM::FixContextForEnC\n")); /* Extract the necessary information from the info block header */ @@ -910,7 +910,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: Checks out\n")); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // Strategy for zeroing out the frame on x64: // @@ -1199,7 +1199,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, * EnC_FAIL, as this should be a transacted commit, **=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Zero out all the registers as some may hold new variables. pCtx->Eax = pCtx->Ecx = pCtx->Edx = pCtx->Ebx = pCtx->Esi = pCtx->Edi = 0; @@ -1219,7 +1219,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, _ASSERTE( frameHeaderSize <= oldInfo.stackSize ); _ASSERTE( GetSizeOfFrameHeaderForEnC( &oldInfo ) == frameHeaderSize ); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // Next few statements zero out all registers that may end up holding new variables. @@ -1294,7 +1294,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, // Sanity-check that the range we're clearing contains all of the stack variables -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) const ICorDebugInfo::VarLoc &varLoc = newMethodVarsSortedBase[i].loc; if( varLoc.vlType == ICorDebugInfo::VLT_STK ) { @@ -1314,7 +1314,7 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, // and so (since the stack grows towards 0) can't easily determine where the end of // the local lies. } -#elif defined (_TARGET_AMD64_) +#elif defined (TARGET_AMD64) switch(newMethodVarsSortedBase[i].loc.vlType) { default: @@ -1368,9 +1368,9 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx, // Clear the local and temporary stack space -#if defined (_TARGET_X86_) +#if defined (TARGET_X86) memset((void*)(size_t)(pCtx->Esp), 0, newInfo.stackSize - frameHeaderSize ); -#elif defined (_TARGET_AMD64_) +#elif defined (TARGET_AMD64) memset((void*)newStackBase, 0, newFixedStackSize - frameHeaderSize); // On AMD64, after zeroing out the stack, restore the security object and PSPSym... @@ -1456,7 +1456,7 @@ bool EECodeManager::IsGcSafe( EECodeInfo *pCodeInfo, return gcInfoDecoder.IsInterruptible(); } -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) bool EECodeManager::HasTailCalls( EECodeInfo *pCodeInfo) { CONTRACTL { @@ -1474,9 +1474,9 @@ bool EECodeManager::HasTailCalls( EECodeInfo *pCodeInfo) return gcInfoDecoder.HasTailCalls(); } -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 -#if defined(_TARGET_AMD64_) && defined(_DEBUG) +#if defined(TARGET_AMD64) && defined(_DEBUG) struct FindEndOfLastInterruptibleRegionState { @@ -1553,7 +1553,7 @@ unsigned EECodeManager::FindEndOfLastInterruptibleRegion(unsigned curOffset, #endif // #ifndef DACCESS_COMPILE } -#endif // _TARGET_AMD64_ && _DEBUG +#endif // TARGET_AMD64 && _DEBUG #else // !USE_GC_INFO_DECODER @@ -3195,7 +3195,7 @@ void EECodeManager::QuickUnwindStackFrame(PREGDISPLAY pRD, StackwalkCacheEntry * _ASSERTE(pCacheEntry); _ASSERTE(GetControlPC(pRD) == (PCODE)(pCacheEntry->IP)); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) _ASSERTE(flag == UnwindCurrentStackFrame); _ASSERTE(!pCacheEntry->fUseEbp || pCacheEntry->fUseEbpAsFrameReg); @@ -3219,7 +3219,7 @@ void EECodeManager::QuickUnwindStackFrame(PREGDISPLAY pRD, StackwalkCacheEntry * pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr); pRD->SP += sizeof(void*) + pCacheEntry->argSize; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) if (pRD->IsCallerContextValid) { pRD->pCurrentContext->Rbp = pRD->pCallerContext->Rbp; @@ -3265,14 +3265,14 @@ void EECodeManager::QuickUnwindStackFrame(PREGDISPLAY pRD, StackwalkCacheEntry * pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary. } -#else // !_TARGET_X86_ && !_TARGET_AMD64_ +#else // !TARGET_X86 && !TARGET_AMD64 PORTABILITY_ASSERT("EECodeManager::QuickUnwindStackFrame is not implemented on this platform."); -#endif // !_TARGET_X86_ && !_TARGET_AMD64_ +#endif // !TARGET_X86 && !TARGET_AMD64 } #endif // HAS_QUICKUNWIND /*****************************************************************************/ -#ifdef _TARGET_X86_ // UnwindStackFrame +#ifdef TARGET_X86 // UnwindStackFrame /*****************************************************************************/ const RegMask CALLEE_SAVED_REGISTERS_MASK[] = @@ -4115,10 +4115,10 @@ bool UnwindStackFrame(PREGDISPLAY pContext, return true; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifdef FEATURE_EH_FUNCLETS -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 size_t EECodeManager::GetResumeSp( PCONTEXT pContext ) { PCODE currentPc = PCODE(pContext->Eip); @@ -4162,7 +4162,7 @@ size_t EECodeManager::GetResumeSp( PCONTEXT pContext ) const size_t curEBP = (size_t)(pContext->Ebp); return GetOutermostBaseFP(curEBP, info); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // FEATURE_EH_FUNCLETS #ifndef CROSSGEN_COMPILE @@ -4184,9 +4184,9 @@ bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext, CodeManState *pState, StackwalkCacheUnwindInfo *pUnwindInfo /* out-only, perf improvement */) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 return ::UnwindStackFrame(pContext, pCodeInfo, flags, pState, pUnwindInfo); -#else // _TARGET_X86_ +#else // TARGET_X86 PORTABILITY_ASSERT("EECodeManager::UnwindStackFrame"); return false; #endif // _TARGET_???_ @@ -4207,7 +4207,7 @@ bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext, GC_NOTRIGGER; } CONTRACTL_END; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // To avoid unnecessary computation, we only crack the unwind info if pUnwindInfo is not NULL, which only happens // if the LIGHTUNWIND flag is passed to StackWalkFramesEx(). if (pUnwindInfo != NULL) @@ -4215,7 +4215,7 @@ bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext, pCodeInfo->GetOffsetsFromUnwindInfo(&(pUnwindInfo->RSPOffsetFromUnwindInfo), &(pUnwindInfo->RBPOffset)); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 _ASSERTE(pCodeInfo != NULL); Thread::VirtualUnwindCallFrame(pContext, pCodeInfo); @@ -4246,7 +4246,7 @@ void promoteVarArgs(PTR_BYTE argsStart, PTR_VASigCookie varArgSig, GCCONTEXT* ct ArgIterator argit(&msig); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // For the X86 target the JIT does not report any of the fixed args for a varargs method // So we report the fixed args via the promoteArgs call below bool skipFixedArgs = false; @@ -5008,11 +5008,11 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD, unsigned curOffs = pCodeInfo->GetRelOffset(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On ARM, the low-order bit of an instruction pointer indicates Thumb vs. ARM mode. // Mask this off; all instructions are two-byte aligned. curOffs &= (~THUMB_CODE); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #ifdef _DEBUG // Get the name of the current method @@ -5109,11 +5109,11 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD, curOffs = relOffsetOverride; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On ARM, the low-order bit of an instruction pointer indicates Thumb vs. ARM mode. // Mask this off; all instructions are two-byte aligned. curOffs &= (~THUMB_CODE); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM LOG((LF_GCINFO, LL_INFO1000, "Adjusted GC reporting offset to provided override offset. Now reporting GC refs for %s at offset %04x.\n", methodName, curOffs)); @@ -5178,7 +5178,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD, // This does not apply to x86 because of how it handles varargs (it never // reports the arguments from the explicit method signature). // -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // // SPECIAL CASE: // IL marshaling stubs have signatures that are marked as vararg, @@ -5198,7 +5198,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD, { return true; } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 LOG((LF_GCINFO, LL_INFO100, "Reporting incoming vararg GC refs\n")); @@ -5236,7 +5236,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD, #endif // USE_GC_INFO_DECODER #endif // !CROSSGEN_COMPILE -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /***************************************************************************** * * Return the address of the local security object reference @@ -5255,7 +5255,7 @@ OBJECTREF* EECodeManager::GetAddrOfSecurityObjectFromCachedInfo(PREGDISPLAY pRD, // _ASSERTE(stackwalkCacheUnwindInfo->fUseEbpAsFrameReg); return (OBJECTREF *) (size_t) (GetRegdisplayFP(pRD) - (securityObjectOffset * sizeof(void*))); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) OBJECTREF* EECodeManager::GetAddrOfSecurityObject(CrawlFrame *pCF) @@ -5623,19 +5623,19 @@ PTR_VOID EECodeManager::GetExactGenericsToken(SIZE_T baseStackSlot, INT32 spOffsetPSPSym = gcInfoDecoder.GetPSPSymStackSlot(); _ASSERTE(spOffsetPSPSym != NO_PSP_SYM); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // On AMD64 the spOffsetPSPSym is relative to the "Initial SP": the stack // pointer at the end of the prolog before and dynamic allocations, so it // can be the same for funclets and the main function. // However, we have a caller SP, so we need to convert baseStackSlot -= pCodeInfo->GetFixedStackSize(); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // For funclets we have to do an extra dereference to get the PSPSym first. TADDR newBaseStackSlot = *PTR_TADDR(baseStackSlot + spOffsetPSPSym); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // On AMD64 the PSPSym stores the "Initial SP": the stack pointer at the end of // prolog, before any dynamic allocations. // However, the GenericsContext offset is relative to the caller SP for all @@ -5643,7 +5643,7 @@ PTR_VOID EECodeManager::GetExactGenericsToken(SIZE_T baseStackSlot, // But we have to be careful to use the main function's EECodeInfo, not the // funclet's EECodeInfo because they have different stack sizes! newBaseStackSlot += pCodeInfo->GetMainFunctionInfo().GetFixedStackSize(); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 taSlot = (TADDR)( spOffsetGenericsContext + newBaseStackSlot ); } @@ -6001,7 +6001,7 @@ void EECodeManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) #endif // #ifdef DACCESS_COMPILE -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* * GetAmbientSP * @@ -6098,7 +6098,7 @@ TADDR EECodeManager::GetAmbientSP(PREGDISPLAY pContext, return baseSP; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 /* Get the number of bytes used for stack parameters. @@ -6114,7 +6114,7 @@ ULONG32 EECodeManager::GetStackParameterSize(EECodeInfo * pCodeInfo) SUPPORTS_DAC; } CONTRACTL_END; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #if defined(FEATURE_EH_FUNCLETS) if (pCodeInfo->IsFunclet()) { @@ -6142,6 +6142,6 @@ ULONG32 EECodeManager::GetStackParameterSize(EECodeInfo * pCodeInfo) #else return 0; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } diff --git a/src/coreclr/src/vm/encee.cpp b/src/coreclr/src/vm/encee.cpp index 0c5aef93b6506..ac104d2d7f9cf 100644 --- a/src/coreclr/src/vm/encee.cpp +++ b/src/coreclr/src/vm/encee.cpp @@ -727,7 +727,7 @@ NOINLINE void EditAndContinueModule::FixContextAndResume( memcpy(&context, pContext, sizeof(CONTEXT)); pContext = &context; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Since we made a copy of the incoming CONTEXT in context, clear any new flags we // don't understand (like XSAVE), since we'll eventually be passing a CONTEXT based // on this copy to RtlRestoreContext, and this copy doesn't have the extra info @@ -736,7 +736,7 @@ NOINLINE void EditAndContinueModule::FixContextAndResume( // FUTURE: No reason to ifdef this for amd64-only, except to make this late fix as // surgical as possible. Would be nice to enable this on x86 early in the next cycle. pContext->ContextFlags &= CONTEXT_ALL; -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) EECodeInfo oldCodeInfo; memcpy(&oldCodeInfo, pOldCodeInfo, sizeof(EECodeInfo)); @@ -757,7 +757,7 @@ NOINLINE void EditAndContinueModule::FixContextAndResume( g_pDebugInterface->GetVarInfo(pMD, oldDebuggerFuncHandle, &oldVarInfoCount, &pOldVarInfo); g_pDebugInterface->GetVarInfo(pMD, NULL, &newVarInfoCount, &pNewVarInfo); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // save the frame pointer as FixContextForEnC might step on it. LPVOID oldSP = dac_cast(GetSP(pContext)); @@ -802,7 +802,7 @@ NOINLINE void EditAndContinueModule::FixContextAndResume( // and return because we are potentially writing new vars onto the stack. pCurThread->SetFilterContext( NULL ); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) ResumeAtJit(pContext, oldSP); #else RtlRestoreContext(pContext, NULL); diff --git a/src/coreclr/src/vm/eventpipe.cpp b/src/coreclr/src/vm/eventpipe.cpp index 4712aff6a6c10..0c4581fe523e0 100644 --- a/src/coreclr/src/vm/eventpipe.cpp +++ b/src/coreclr/src/vm/eventpipe.cpp @@ -22,9 +22,9 @@ #include "win32threadpool.h" #include "ceemain.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include "pal.h" -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_PERFTRACING @@ -34,13 +34,13 @@ EventPipeConfiguration EventPipe::s_config; EventPipeEventSource *EventPipe::s_pEventSource = nullptr; VolatilePtr EventPipe::s_pSessions[MaxNumberOfSessions]; Volatile EventPipe::s_allowWrite = 0; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX unsigned int * EventPipe::s_pProcGroupOffsets = nullptr; #endif Volatile EventPipe::s_numberOfSessions(0); // This function is auto-generated from /src/scripts/genEventPipe.py -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX extern "C" #endif void InitProvidersAndEvents(); @@ -78,7 +78,7 @@ void EventPipe::Initialize() if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EventPipeProcNumbers) != 0) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // setup the windows processor group offset table WORD numGroups = ::GetActiveProcessorGroupCount(); s_pProcGroupOffsets = new (nothrow) unsigned int[numGroups]; diff --git a/src/coreclr/src/vm/eventpipe.h b/src/coreclr/src/vm/eventpipe.h index f6a60a9784647..aa1c10cfd9445 100644 --- a/src/coreclr/src/vm/eventpipe.h +++ b/src/coreclr/src/vm/eventpipe.h @@ -137,7 +137,7 @@ class EventPipe // running on. If for any reason we can't tell then return 0xFFFFFFFF. static unsigned int GetCurrentProcessorNumber() { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (s_pProcGroupOffsets) { PROCESSOR_NUMBER procNum; @@ -219,7 +219,7 @@ class EventPipe //! Bitmask tracking EventPipe active sessions. // in all groups preceding it. For example if there are three groups with sizes: // 1, 7, 6 the table would be 0, 1, 8 -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static unsigned int * s_pProcGroupOffsets; #endif static Volatile s_numberOfSessions; diff --git a/src/coreclr/src/vm/eventpipebuffer.cpp b/src/coreclr/src/vm/eventpipebuffer.cpp index 69eec1e268759..eeed5246cd0e8 100644 --- a/src/coreclr/src/vm/eventpipebuffer.cpp +++ b/src/coreclr/src/vm/eventpipebuffer.cpp @@ -93,7 +93,7 @@ bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeSession &session, Eve event, procNumber, (pThread == NULL) ? -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX ::PAL_GetCurrentOSThreadId() #else ::GetCurrentThreadId() diff --git a/src/coreclr/src/vm/eventpipeconfiguration.cpp b/src/coreclr/src/vm/eventpipeconfiguration.cpp index 412c447b261de..69c0dd29840d9 100644 --- a/src/coreclr/src/vm/eventpipeconfiguration.cpp +++ b/src/coreclr/src/vm/eventpipeconfiguration.cpp @@ -470,7 +470,7 @@ EventPipeEventInstance *EventPipeConfiguration::BuildEventMetadataEvent(EventPip EventPipeEventInstance *pInstance = new EventPipeEventInstance( *m_pMetadataEvent, EventPipe::GetCurrentProcessorNumber(), -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_GetCurrentOSThreadId(), #else GetCurrentThreadId(), diff --git a/src/coreclr/src/vm/eventpipeinternal.cpp b/src/coreclr/src/vm/eventpipeinternal.cpp index 17c383d0e9393..de3186c4af363 100644 --- a/src/coreclr/src/vm/eventpipeinternal.cpp +++ b/src/coreclr/src/vm/eventpipeinternal.cpp @@ -11,9 +11,9 @@ #include "eventpipesession.h" #include "eventpipesessionprovider.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #include "pal.h" -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_PERFTRACING diff --git a/src/coreclr/src/vm/eventpipesession.cpp b/src/coreclr/src/vm/eventpipesession.cpp index 5a47bef1fb268..3c699b7d44581 100644 --- a/src/coreclr/src/vm/eventpipesession.cpp +++ b/src/coreclr/src/vm/eventpipesession.cpp @@ -133,12 +133,12 @@ static void PlatformSleep() // Wait until it's time to sample again. const uint32_t PeriodInNanoSeconds = 100000000; // 100 msec. -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_nanosleep(PeriodInNanoSeconds); -#else //FEATURE_PAL +#else //TARGET_UNIX const uint32_t NUM_NANOSECONDS_IN_1_MS = 1000000; ClrSleepEx(PeriodInNanoSeconds / NUM_NANOSECONDS_IN_1_MS, FALSE); -#endif //FEATURE_PAL +#endif //TARGET_UNIX } DWORD WINAPI EventPipeSession::ThreadProc(void *args) diff --git a/src/coreclr/src/vm/eventpipethread.cpp b/src/coreclr/src/vm/eventpipethread.cpp index f02eec15db7ef..24c678658fbab 100644 --- a/src/coreclr/src/vm/eventpipethread.cpp +++ b/src/coreclr/src/vm/eventpipethread.cpp @@ -117,7 +117,7 @@ EventPipeThread::EventPipeThread() m_lock.Init(LOCK_TYPE_DEFAULT); m_refCount = 0; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_osThreadId = ::PAL_GetCurrentOSThreadId(); #else m_osThreadId = ::GetCurrentThreadId(); diff --git a/src/coreclr/src/vm/eventreporter.cpp b/src/coreclr/src/vm/eventreporter.cpp index 35b800916757f..d516d76847dc2 100644 --- a/src/coreclr/src/vm/eventreporter.cpp +++ b/src/coreclr/src/vm/eventreporter.cpp @@ -453,7 +453,7 @@ void EventReporter::Report() { // If the event log file was neither corrupt nor full, then assert, // since something is wrong! -#ifndef _TARGET_ARM_ +#ifndef TARGET_ARM //ARMTODO: Event reporting is currently non-functional on winpe. _ASSERTE(!"EventReporter::Report - Unable to log details to event log!"); #endif diff --git a/src/coreclr/src/vm/eventtrace.cpp b/src/coreclr/src/vm/eventtrace.cpp index c794dac8e6458..dfe2ff9a74801 100644 --- a/src/coreclr/src/vm/eventtrace.cpp +++ b/src/coreclr/src/vm/eventtrace.cpp @@ -50,7 +50,7 @@ BOOL g_fEEHostedStartup = FALSE; #include "eventtracepriv.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DOTNET_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context = { &MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_EVENTPIPE_Context }; DOTNET_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_DOTNET_Context = { &MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_EVENTPIPE_Context }; DOTNET_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_DOTNET_Context = { &MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context, MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_EVENTPIPE_Context }; @@ -60,7 +60,7 @@ DOTNET_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context = { DOTNET_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_DOTNET_Context = { MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_EVENTPIPE_Context, &MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_LTTNG_Context }; DOTNET_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_DOTNET_Context = { MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_EVENTPIPE_Context, &MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_LTTNG_Context }; DOTNET_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_STRESS_PROVIDER_DOTNET_Context = { MICROSOFT_WINDOWS_DOTNETRUNTIME_STRESS_PROVIDER_EVENTPIPE_Context, &MICROSOFT_WINDOWS_DOTNETRUNTIME_STRESS_PROVIDER_LTTNG_Context }; -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_REDHAWK volatile LONGLONG ETW::GCLog::s_l64LastClientSequenceNumber = 0; @@ -228,7 +228,7 @@ BOOL IsRundownNgenKeywordEnabledAndNotSuppressed() /*******************************************************/ /* Fast assembly function to get the topmost EBP frame */ /*******************************************************/ -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) extern "C" { CallStackFrame* GetEbp() @@ -241,12 +241,12 @@ extern "C" return frame; } } -#endif //_TARGET_X86_ +#endif //TARGET_X86 /*************************************/ /* Function to append a frame to an existing stack */ /*************************************/ -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) void ETW::SamplingLog::Append(SIZE_T currentFrame) { LIMITED_METHOD_CONTRACT; @@ -319,14 +319,14 @@ ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::SaveCurrentStack(int skip return ETW::SamplingLog::UnInitialized; } #ifndef DACCESS_COMPILE -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (RtlVirtualUnwind_Unsafe == NULL) { // We haven't even set up the RtlVirtualUnwind function pointer yet, // so it's too early to try stack walking. return ETW::SamplingLog::UnInitialized; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 Thread *pThread = GetThread(); if (pThread == NULL) { @@ -343,7 +343,7 @@ ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::SaveCurrentStack(int skip pThread->MarkEtwStackWalkInProgress(); EX_TRY { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 CallStackFrame *currentEBP = GetEbp(); CallStackFrame *lastEBP = NULL; @@ -416,7 +416,7 @@ ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::SaveCurrentStack(int skip PrevSP = CurrentSP; } -#endif //_TARGET_X86_ +#endif //TARGET_X86 } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); pThread->MarkEtwStackWalkCompleted(); #endif //!DACCESS_COMPILE @@ -424,7 +424,7 @@ ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::SaveCurrentStack(int skip return ETW::SamplingLog::Completed; } -#endif // !defined(FEATURE_PAL) +#endif // !defined(TARGET_UNIX) #endif // !FEATURE_REDHAWK /****************************************************************************/ @@ -1137,7 +1137,7 @@ void BulkComLogger::FlushRcw() unsigned short instance = GetClrInstanceId(); -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) EVENT_DATA_DESCRIPTOR eventData[3]; EventDataDescCreate(&eventData[0], &m_currRcw, sizeof(const unsigned int)); EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short)); @@ -1146,7 +1146,7 @@ void BulkComLogger::FlushRcw() ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRCW, _countof(eventData), eventData); #else ULONG result = FireEtXplatGCBulkRCW(m_currRcw, instance, sizeof(EventRCWEntry) * m_currRcw, m_etwRcwData); -#endif // !defined(FEATURE_PAL) +#endif // !defined(TARGET_UNIX) result |= EventPipeWriteEventGCBulkRCW(m_currRcw, instance, sizeof(EventRCWEntry) * m_currRcw, m_etwRcwData); _ASSERTE(result == ERROR_SUCCESS); @@ -1227,7 +1227,7 @@ void BulkComLogger::FlushCcw() unsigned short instance = GetClrInstanceId(); -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) EVENT_DATA_DESCRIPTOR eventData[3]; EventDataDescCreate(&eventData[0], &m_currCcw, sizeof(const unsigned int)); EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short)); @@ -1236,7 +1236,7 @@ void BulkComLogger::FlushCcw() ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootCCW, _countof(eventData), eventData); #else ULONG result = FireEtXplatGCBulkRootCCW(m_currCcw, instance, sizeof(EventCCWEntry) * m_currCcw, m_etwCcwData); -#endif //!defined(FEATURE_PAL) +#endif //!defined(TARGET_UNIX) result |= EventPipeWriteEventGCBulkRootCCW(m_currCcw, instance, sizeof(EventCCWEntry) * m_currCcw, m_etwCcwData); _ASSERTE(result == ERROR_SUCCESS); @@ -1431,7 +1431,7 @@ void BulkStaticsLogger::FireBulkStaticsEvent() unsigned short instance = GetClrInstanceId(); unsigned __int64 appDomain = (unsigned __int64)m_domain; -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) EVENT_DATA_DESCRIPTOR eventData[4]; EventDataDescCreate(&eventData[0], &m_count, sizeof(const unsigned int) ); EventDataDescCreate(&eventData[1], &appDomain, sizeof(unsigned __int64) ); @@ -1441,7 +1441,7 @@ void BulkStaticsLogger::FireBulkStaticsEvent() ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootStaticVar, _countof(eventData), eventData); #else ULONG result = FireEtXplatGCBulkRootStaticVar(m_count, appDomain, instance, m_used, m_buffer); -#endif //!defined(FEATURE_PAL) +#endif //!defined(TARGET_UNIX) result |= EventPipeWriteEventGCBulkRootStaticVar(m_count, appDomain, instance, m_used, m_buffer); _ASSERTE(result == ERROR_SUCCESS); @@ -4176,7 +4176,7 @@ void InitializeEventTracing() if (FAILED(hr)) return; -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) // Register CLR providers with the OS if (g_pEtwTracer == NULL) { @@ -4192,9 +4192,9 @@ void InitializeEventTracing() // providers can do so now ETW::TypeSystemLog::PostRegistrationInit(); -#if defined(FEATURE_PAL) && defined (FEATURE_PERFTRACING) +#if defined(TARGET_UNIX) && defined (FEATURE_PERFTRACING) XplatEventLogger::InitializeLogger(); -#endif // FEATURE_PAL && FEATURE_PERFTRACING +#endif // TARGET_UNIX && FEATURE_PERFTRACING } // Plumbing to funnel event pipe callbacks and ETW callbacks together into a single common @@ -4207,12 +4207,12 @@ void InitializeEventTracing() // a suitable token, this implementation has a different callback for every EventPipe provider // that ultimately funnels them all into a common handler. -#if defined(FEATURE_PAL) +#if defined(TARGET_UNIX) // CLR_GCHEAPCOLLECT_KEYWORD is defined by the generated ETW manifest on Windows. // On non-Windows, we need to make sure that this is defined. Given that we can't change // the value due to compatibility, we specify it here rather than generating defines based on the manifest. #define CLR_GCHEAPCOLLECT_KEYWORD 0x800000 -#endif // defined(FEATURE_PAL) +#endif // defined(TARGET_UNIX) // CallbackProviderIndex provides a quick identification of which provider triggered the // ETW callback. @@ -4238,13 +4238,13 @@ VOID EtwCallbackCommon( LIMITED_METHOD_CONTRACT; bool bIsPublicTraceHandle = ProviderIndex == DotNETRuntime; -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) static_assert(GCEventLevel_Fatal == TRACE_LEVEL_FATAL, "GCEventLevel_Fatal value mismatch"); static_assert(GCEventLevel_Error == TRACE_LEVEL_ERROR, "GCEventLevel_Error value mismatch"); static_assert(GCEventLevel_Warning == TRACE_LEVEL_WARNING, "GCEventLevel_Warning mismatch"); static_assert(GCEventLevel_Information == TRACE_LEVEL_INFORMATION, "GCEventLevel_Information mismatch"); static_assert(GCEventLevel_Verbose == TRACE_LEVEL_VERBOSE, "GCEventLevel_Verbose mismatch"); -#endif // !defined(FEATURE_PAL) +#endif // !defined(TARGET_UNIX) GCEventKeyword keywords = static_cast(MatchAnyKeyword); GCEventLevel level = static_cast(Level); GCHeapUtilities::RecordEventStateChange(bIsPublicTraceHandle, keywords, level); @@ -4286,7 +4286,7 @@ VOID EtwCallbackCommon( // Profilers may (optionally) specify extra data in the filter parameter // to log with the GCStart event. LONGLONG l64ClientSequenceNumber = 0; -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) PEVENT_FILTER_DESCRIPTOR FilterData = (PEVENT_FILTER_DESCRIPTOR)pFilterData; if ((FilterData != NULL) && (FilterData->Type == 1) && @@ -4294,7 +4294,7 @@ VOID EtwCallbackCommon( { l64ClientSequenceNumber = *(LONGLONG *) (FilterData->Ptr); } -#endif // !defined(FEATURE_PAL) +#endif // !defined(TARGET_UNIX) ETW::GCLog::ForceGC(l64ClientSequenceNumber); } // TypeSystemLog needs a notification when certain keywords are modified, so @@ -4364,7 +4364,7 @@ VOID EventPipeEtwCallbackDotNETRuntimePrivate( } -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) HRESULT ETW::CEtwTracer::Register() { WRAPPER_NO_CONTRACT; @@ -4540,7 +4540,7 @@ extern "C" } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // We only do this on amd64 (NOT ARM, because ARM uses frame based stack crawling) // If we have turned on the JIT keyword to the INFORMATION setting (needed to get JIT names) then // we assume that we also want good stack traces so we need to publish unwind information so @@ -4580,7 +4580,7 @@ extern "C" } #endif // FEATURE_REDHAWK -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifndef FEATURE_REDHAWK /****************************************************************************/ @@ -4654,11 +4654,11 @@ VOID ETW::ExceptionLog::ExceptionThrown(CrawlFrame *pCf, BOOL bIsReThrownExcept if (pCf->IsFrameless()) { -#ifndef BIT64 +#ifndef HOST_64BIT exceptionEIP = (PVOID)pCf->GetRegisterSet()->ControlPC; #else exceptionEIP = (PVOID)GetIP(pCf->GetRegisterSet()->pContext); -#endif //!BIT64 +#endif //!HOST_64BIT } else { @@ -5041,7 +5041,7 @@ VOID ETW::InfoLog::RuntimeInformation(INT32 type) VOID ETW::CodeSymbolLog::EmitCodeSymbols(Module* pModule) { -#if !defined(FEATURE_PAL) //UNIXTODO: Enable EmitCodeSymbols +#if !defined(TARGET_UNIX) //UNIXTODO: Enable EmitCodeSymbols CONTRACTL { NOTHROW; GC_NOTRIGGER; @@ -5098,7 +5098,7 @@ VOID ETW::CodeSymbolLog::EmitCodeSymbols(Module* pModule) } } } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions); -#endif// !defined(FEATURE_PAL) +#endif// !defined(TARGET_UNIX) } /* Returns the length of an in-memory symbol stream @@ -7641,10 +7641,10 @@ bool EventPipeHelper::IsEnabled(DOTNET_TRACE_CONTEXT Context, UCHAR Level, ULONG } #endif // FEATURE_PERFTRACING -#if defined(FEATURE_PAL) && defined(FEATURE_PERFTRACING) +#if defined(TARGET_UNIX) && defined(FEATURE_PERFTRACING) // This is a wrapper method for LTTng. See https://github.com/dotnet/coreclr/pull/27273 for details. extern "C" bool XplatEventLoggerIsEnabled() { return XplatEventLogger::IsEventLoggingEnabled(); } -#endif // FEATURE_PAL && FEATURE_PERFTRACING +#endif // TARGET_UNIX && FEATURE_PERFTRACING diff --git a/src/coreclr/src/vm/excep.cpp b/src/coreclr/src/vm/excep.cpp index 16fe93e235b38..3394a3fc01c4f 100644 --- a/src/coreclr/src/vm/excep.cpp +++ b/src/coreclr/src/vm/excep.cpp @@ -29,9 +29,9 @@ #include "virtualcallstub.h" #include "typestring.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "dwreport.h" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #include "eventreporter.h" @@ -43,10 +43,10 @@ #endif #include -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Include definition of GenericModeBlock #include -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Support for extracting MethodDesc of a delegate. @@ -56,12 +56,12 @@ #include "gccover.h" #endif // HAVE_GCCOVER -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Windows uses 64kB as the null-reference area #define NULL_AREA_SIZE (64 * 1024) -#else // !FEATURE_PAL +#else // !TARGET_UNIX #define NULL_AREA_SIZE GetOsPageSize() -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifndef CROSSGEN_COMPILE @@ -2597,14 +2597,14 @@ ReplaceExceptionContextRecord(CONTEXT *pTarget, CONTEXT *pSource) _ASSERTE(pTarget); _ASSERTE(pSource); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // // @TODO IA64: CONTEXT_DEBUG_REGISTERS not defined on IA64, may need updated SDK // // Want CONTROL, INTEGER, SEGMENTS. If we have Floating Point, fine. _ASSERTE((pSource->ContextFlags & CONTEXT_FULL) == CONTEXT_FULL); -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifdef CONTEXT_EXTENDED_REGISTERS @@ -2872,7 +2872,7 @@ VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL r if (param.throwable->GetMethodTable() == g_pThreadAbortExceptionClass) { _ASSERTE(GetThread()->IsAbortRequested() -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 || GetFirstCOMPlusSEHRecord(this) == EXCEPTION_CHAIN_END #endif @@ -3277,7 +3277,7 @@ DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord) // NullReferenceException instead of doing the new AV translation logic. if ((g_pConfig != NULL) && !g_pConfig->LegacyNullReferenceExceptionPolicy()) { -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) // If we got the exception on a redirect function it means the original exception happened in managed code: if (Thread::IsAddrOfRedirectFunc(pExceptionRecord->ExceptionAddress)) return (DWORD) kNullReferenceException; @@ -3286,7 +3286,7 @@ DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord) { return (DWORD) kNullReferenceException; } -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX // If the IP of the AV is not in managed code, then its an AccessViolationException. if (!ExecutionManager::IsManagedCode((PCODE)pExceptionRecord->ExceptionAddress)) @@ -3570,7 +3570,7 @@ BOOL StackTraceInfo::AppendElement(BOOL bAllowAllocMem, UINT_PTR currentIP, UINT bRetVal = TRUE; } -#ifndef FEATURE_PAL // Watson is supported on Windows only +#ifndef TARGET_UNIX // Watson is supported on Windows only Thread *pThread = GetThread(); _ASSERTE(pThread); @@ -3593,7 +3593,7 @@ BOOL StackTraceInfo::AppendElement(BOOL bAllowAllocMem, UINT_PTR currentIP, UINT SetupInitialThrowBucketDetails(adjustedIp); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return bRetVal; } @@ -3833,7 +3833,7 @@ LONG NotifyDebuggerLastChance(Thread *pThread, return retval; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //---------------------------------------------------------------------------- // // DoReportFault - wrapper for ReportFault in FaultRep.dll, which also handles @@ -3905,7 +3905,7 @@ void DisableOSWatson(void) LOG((LF_EH, LL_INFO100, "DisableOSWatson: SetErrorMode = 0x%x\n", lastErrorMode | SEM_NOGPFAULTERRORBOX)); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX //------------------------------------------------------------------------------ // This function is called on an unhandled exception, via the runtime's @@ -3934,7 +3934,7 @@ LONG WatsonLastChance( // EXCEPTION_CONTINUE_SEARCH, _CONTINUE_ CONTRACT_VIOLATION(AllViolation); LOG((LF_EH, LL_INFO10, "D::WLC: Enter WatsonLastChance\n")); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static DWORD fDisableWatson = -1; if (fDisableWatson == -1) { @@ -3947,7 +3947,7 @@ LONG WatsonLastChance( // EXCEPTION_CONTINUE_SEARCH, _CONTINUE_ LOG((LF_EH, LL_INFO10, "D::WLC: OS Watson is disabled for an managed unhandled exception\n")); return EXCEPTION_CONTINUE_SEARCH; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // We don't want to launch Watson if a debugger is already attached to // the process. @@ -3974,7 +3974,7 @@ LONG WatsonLastChance( // EXCEPTION_CONTINUE_SEARCH, _CONTINUE_ { LOG((LF_EH, LL_INFO10, "WatsonLastChance: Debugger not attached at sp %p ...\n", GetCurrentSP())); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX FaultReportResult result = FaultReportResultQuit; BOOL fSOException = FALSE; @@ -4129,7 +4129,7 @@ LONG WatsonLastChance( // EXCEPTION_CONTINUE_SEARCH, _CONTINUE_ #else } else if (CORDebuggerAttached()) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX { // Already debugging with a managed debugger. Should let that debugger know. LOG((LF_EH, LL_INFO100, "WatsonLastChance: Managed debugger already attached at sp %p ...\n", GetCurrentSP())); @@ -4143,9 +4143,9 @@ LONG WatsonLastChance( // EXCEPTION_CONTINUE_SEARCH, _CONTINUE_ } } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DisableOSWatson(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (!shouldNotifyDebugger) { @@ -4315,7 +4315,7 @@ LONG UserBreakpointFilter(EXCEPTION_POINTERS* pEP) // @todo: The InternalUnhandledExceptionFilter can trigger. CONTRACT_VIOLATION(GCViolation | ThrowsViolation | ModeViolation | FaultViolation | FaultNotFatal); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX int result = COMUnhandledExceptionFilter(pEP); #else int result = UnhandledExceptionFilter(pEP); @@ -4486,7 +4486,7 @@ BOOL InstallUnhandledExceptionFilter() { STATIC_CONTRACT_MODE_ANY; STATIC_CONTRACT_FORBID_FAULT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // We will be here only for CoreCLR on WLC since we dont // register UEF for SL. if (g_pOriginalUnhandledExceptionFilter == FILTER_NOT_INSTALLED) { @@ -4500,7 +4500,7 @@ BOOL InstallUnhandledExceptionFilter() { LOG((LF_EH, LL_INFO10, "InstallUnhandledExceptionFilter registered UEF with OS for CoreCLR!\n")); } _ASSERTE(g_pOriginalUnhandledExceptionFilter != FILTER_NOT_INSTALLED); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // All done - successfully! return TRUE; @@ -4512,7 +4512,7 @@ void UninstallUnhandledExceptionFilter() { STATIC_CONTRACT_MODE_ANY; STATIC_CONTRACT_FORBID_FAULT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // We will be here only for CoreCLR on WLC or on Mac SL. if (g_pOriginalUnhandledExceptionFilter != FILTER_NOT_INSTALLED) { @@ -4524,7 +4524,7 @@ void UninstallUnhandledExceptionFilter() { g_pOriginalUnhandledExceptionFilter = FILTER_NOT_INSTALLED; LOG((LF_EH, LL_INFO10, "UninstallUnhandledExceptionFilter unregistered UEF from OS for CoreCLR!\n")); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } // @@ -4818,12 +4818,12 @@ LONG InternalUnhandledExceptionFilter_Worker( else fIsProcessTerminating = !(pParam->pThread->HasThreadStateNC(Thread::TSNC_IgnoreUnhandledExceptions)); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Setup the watson bucketing details for UE processing. // do this before notifying appdomains of the UE so if an AD attempts to // retrieve the bucket params in the UE event handler it gets the correct data. SetupWatsonBucketsForUEF(useLastThrownObject); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Send notifications to the AppDomains. NotifyAppDomainsOfUnhandledException(pParam->pExceptionInfo, NULL, useLastThrownObject, fIsProcessTerminating /*isTerminating*/); @@ -4883,7 +4883,7 @@ LONG InternalUnhandledExceptionFilter_Worker( if (tore.GetType() == TypeOfReportedError::NativeThreadUnhandledException) { pParam->retval = EXCEPTION_CONTINUE_SEARCH; -#if defined(FEATURE_EVENT_TRACE) && !defined(FEATURE_PAL) +#if defined(FEATURE_EVENT_TRACE) && !defined(TARGET_UNIX) DoReportForUnhandledNativeException(pParam->pExceptionInfo); #endif goto lDone; @@ -4893,7 +4893,7 @@ LONG InternalUnhandledExceptionFilter_Worker( { LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker, ignoring the exception\n")); pParam->retval = EXCEPTION_CONTINUE_SEARCH; -#if defined(FEATURE_EVENT_TRACE) && !defined(FEATURE_PAL) +#if defined(FEATURE_EVENT_TRACE) && !defined(TARGET_UNIX) DoReportForUnhandledNativeException(pParam->pExceptionInfo); #endif goto lDone; @@ -5045,7 +5045,7 @@ void ParseUseEntryPointFilter(LPCWSTR value) bool GetUseEntryPointFilter() { -#ifdef PLATFORM_WINDOWS // This feature has only been tested on Windows, keep it disabled on other platforms +#ifdef TARGET_WINDOWS // This feature has only been tested on Windows, keep it disabled on other platforms static bool s_useEntryPointFilterEnv = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_UseEntryPointFilter) != 0; return s_useEntryPointFilterCorhostProperty || s_useEntryPointFilterEnv; @@ -5118,9 +5118,9 @@ LONG EntryPointFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID _pData) // Returns // the result of calling InternalUnhandledExceptionFilter //------------------------------------------------------------------------------ -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) #pragma code_seg(push, uef, CLR_UEF_SECTION_NAME) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX LONG __stdcall COMUnhandledExceptionFilter( // EXCEPTION_CONTINUE_SEARCH or EXCEPTION_CONTINUE_EXECUTION EXCEPTION_POINTERS *pExceptionInfo) // Information about the exception. { @@ -5157,9 +5157,9 @@ LONG __stdcall COMUnhandledExceptionFilter( // EXCEPTION_CONTINUE_SEARCH or return retVal; } // LONG __stdcall COMUnhandledExceptionFilter() -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) #pragma code_seg(pop, uef) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX void PrintStackTraceToStdout(); @@ -5205,7 +5205,7 @@ DefaultCatchHandlerExceptionMessageWorker(Thread* pThread, PrintToStdErrA("\n"); -#if defined(FEATURE_EVENT_TRACE) && !defined(FEATURE_PAL) +#if defined(FEATURE_EVENT_TRACE) && !defined(TARGET_UNIX) // Send the log to Windows Event Log if (sendWindowsEventLog && ShouldLogInEventLog()) { @@ -6194,7 +6194,7 @@ LPVOID COMPlusCheckForAbort(UINT_PTR uTryCatchResumeAddress) exit: -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Only proceed if Watson is enabled - CoreCLR may have it disabled. if (IsWatsonEnabled()) @@ -6245,7 +6245,7 @@ LPVOID COMPlusCheckForAbort(UINT_PTR uTryCatchResumeAddress) } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return pRetAddress; } @@ -6319,7 +6319,7 @@ void AdjustContextForThreadStop(Thread* pThread, // doesn't trap. We're not going to use these objects after the exception. // // Only callee saved registers are going to be reported by the faulting excepiton frame. -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Ebx,esi,edi are important. Eax,ecx,edx are not. pContext->Ebx = 0; pContext->Edi = 0; @@ -6330,11 +6330,11 @@ void AdjustContextForThreadStop(Thread* pThread, #else // !FEATURE_EH_FUNCLETS CopyOSContext(pContext, pThread->m_OSContext); -#if defined(_TARGET_ARM_) && defined(_DEBUG) +#if defined(TARGET_ARM) && defined(_DEBUG) // Make sure that the thumb bit is set on the IP of the original abort context we just restored. PCODE controlPC = GetIP(pContext); _ASSERTE(controlPC & THUMB_CODE); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #endif // !FEATURE_EH_FUNCLETS pThread->ResetThrowControlForThread(); @@ -6564,7 +6564,7 @@ bool IsGcMarker(CONTEXT* pContext, EXCEPTION_RECORD *pExceptionRecord) return false; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Return true if the access violation is well formed (has two info parameters // at the end) @@ -6636,20 +6636,20 @@ IsDebuggerFault(EXCEPTION_RECORD *pExceptionRecord, return false; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_EH_FUNCLETS -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 EXTERN_C void JIT_MemSet_End(); EXTERN_C void JIT_MemCpy_End(); EXTERN_C void JIT_WriteBarrier_End(); EXTERN_C void JIT_CheckedWriteBarrier_End(); EXTERN_C void JIT_ByRefWriteBarrier_End(); -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_AMD64_) && defined(_DEBUG) +#if defined(TARGET_AMD64) && defined(_DEBUG) EXTERN_C void JIT_WriteBarrier_Debug(); EXTERN_C void JIT_WriteBarrier_Debug_End(); #endif @@ -6696,7 +6696,7 @@ bool IsIPInMarkedJitHelper(UINT_PTR uControlPc) #define CHECK_RANGE(name) \ if (GetEEFuncEntryPoint(name) <= uControlPc && uControlPc < GetEEFuncEntryPoint(name##_End)) return true; -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 CHECK_RANGE(JIT_MemSet) CHECK_RANGE(JIT_MemCpy) @@ -6704,13 +6704,13 @@ bool IsIPInMarkedJitHelper(UINT_PTR uControlPc) CHECK_RANGE(JIT_CheckedWriteBarrier) CHECK_RANGE(JIT_ByRefWriteBarrier) #else -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX CHECK_RANGE(JIT_WriteBarrierGroup) CHECK_RANGE(JIT_PatchedWriteBarrierGroup) -#endif // FEATURE_PAL -#endif // _TARGET_X86_ +#endif // TARGET_UNIX +#endif // TARGET_X86 -#if defined(_TARGET_AMD64_) && defined(_DEBUG) +#if defined(TARGET_AMD64) && defined(_DEBUG) CHECK_RANGE(JIT_WriteBarrier_Debug) #endif @@ -6747,7 +6747,7 @@ AdjustContextForWriteBarrier( if (pExceptionRecord == nullptr) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) bool withinWriteBarrierGroup = ((ip >= (PCODE) JIT_WriteBarrierGroup) && (ip <= (PCODE) JIT_WriteBarrierGroup_End)); bool withinPatchedWriteBarrierGroup = ((ip >= (PCODE) JIT_PatchedWriteBarrierGroup) && (ip <= (PCODE) JIT_PatchedWriteBarrierGroup_End)); if (withinWriteBarrierGroup || withinPatchedWriteBarrierGroup) @@ -6764,7 +6764,7 @@ AdjustContextForWriteBarrier( pContext->Esp = (DWORD)esp; return TRUE; } -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) if (IsIPInMarkedJitHelper((UINT_PTR)ip)) { Thread::VirtualUnwindToFirstManagedCallFrame(pContext); @@ -6778,7 +6778,7 @@ AdjustContextForWriteBarrier( #endif // FEATURE_DATABREAKPOINT -#if defined(_TARGET_X86_) && !defined(PLATFORM_UNIX) +#if defined(TARGET_X86) && !defined(TARGET_UNIX) void* f_IP = (void *)GetIP(pContext); if (((f_IP >= (void *) JIT_WriteBarrierGroup) && (f_IP <= (void *) JIT_WriteBarrierGroup_End)) || @@ -6793,7 +6793,7 @@ AdjustContextForWriteBarrier( SetSP(pContext, PCODE((BYTE*)GetSP(pContext) + sizeof(void*))); } return FALSE; -#elif defined(FEATURE_EH_FUNCLETS) // _TARGET_X86_ && !PLATFORM_UNIX +#elif defined(FEATURE_EH_FUNCLETS) // TARGET_X86 && !TARGET_UNIX void* f_IP = dac_cast(GetIP(pContext)); CONTEXT tempContext; @@ -6813,7 +6813,7 @@ AdjustContextForWriteBarrier( Thread::VirtualUnwindToFirstManagedCallFrame(pContext); -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) // We had an AV in the writebarrier that needs to be treated // as originating in managed code. At this point, the stack (growing // from left->right) looks like this: @@ -6837,7 +6837,7 @@ AdjustContextForWriteBarrier( // Now we save the address back into the context so that it gets used // as the faulting address. SetIP(pContext, ControlPCPostAdjustment); -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 // Unwind the frame chain - On Win64, this is required since we may handle the managed fault and to do so, // we will replace the exception context with the managed context and "continue execution" there. Thus, we do not @@ -6865,7 +6865,7 @@ AdjustContextForWriteBarrier( #endif // ELSE } -#if defined(USE_FEF) && !defined(FEATURE_PAL) +#if defined(USE_FEF) && !defined(TARGET_UNIX) struct SavedExceptionInfo { @@ -6969,9 +6969,9 @@ LinkFrameAndThrow(FaultingExceptionFrame* pFrame) void SetNakedThrowHelperArgRegistersInContext(CONTEXT* pContext) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) pContext->Rcx = (UINT_PTR)GetIP(pContext); -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) // Save the original IP in LR pContext->Lr = (DWORD)GetIP(pContext); #else @@ -6998,13 +6998,13 @@ void HandleManagedFault(EXCEPTION_RECORD* pExceptionRecord, SetIP(pContext, GetEEFuncEntryPoint(NakedThrowHelper)); } -#else // USE_FEF && !FEATURE_PAL +#else // USE_FEF && !TARGET_UNIX void InitSavedExceptionInfo() { } -#endif // USE_FEF && !FEATURE_PAL +#endif // USE_FEF && !TARGET_UNIX // // Init a new frame @@ -7013,14 +7013,14 @@ void FaultingExceptionFrame::Init(CONTEXT *pContext) { WRAPPER_NO_CONTRACT; #ifndef FEATURE_EH_FUNCLETS -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 CalleeSavedRegisters *pRegs = GetCalleeSavedRegisters(); #define CALLEE_SAVED_REGISTER(regname) pRegs->regname = pContext->regname; ENUM_CALLEE_SAVED_REGISTERS(); #undef CALLEE_SAVED_REGISTER m_ReturnAddress = ::GetIP(pContext); m_Esp = (DWORD)GetSP(pContext); -#else // _TARGET_X86_ +#else // TARGET_X86 PORTABILITY_ASSERT("FaultingExceptionFrame::Init"); #endif // _TARGET_???_ (ELSE) #else // !FEATURE_EH_FUNCLETS @@ -7117,7 +7117,7 @@ bool ShouldHandleManagedFault( return true; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX LONG WINAPI CLRVectoredExceptionHandlerPhase2(PEXCEPTION_POINTERS pExceptionInfo); @@ -7446,7 +7446,7 @@ VEH_ACTION WINAPI CLRVectoredExceptionHandlerPhase3(PEXCEPTION_POINTERS pExcepti GCX_NOTRIGGER(); #ifdef USE_REDIRECT_FOR_GCSTRESS - // NOTE: this is effectively ifdef (_TARGET_AMD64_ || _TARGET_ARM_), and does not actually trigger + // NOTE: this is effectively ifdef (TARGET_AMD64 || TARGET_ARM), and does not actually trigger // a GC. This will redirect the exception context to a stub which will // push a frame and cause GC. if (IsGcMarker(pContext, pExceptionRecord)) @@ -7455,11 +7455,11 @@ VEH_ACTION WINAPI CLRVectoredExceptionHandlerPhase3(PEXCEPTION_POINTERS pExcepti } #endif // USE_REDIRECT_FOR_GCSTRESS -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) -#ifdef _TARGET_X86_ +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) +#ifdef TARGET_X86 CPFH_AdjustContextForThreadSuspensionRace(pContext, GetThread()); -#endif // _TARGET_X86_ -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // TARGET_X86 +#endif // FEATURE_HIJACK && !TARGET_UNIX // Some other parts of the EE use exceptions in their own nefarious ways. We do some up-front processing // here to fix up the exception if needed. @@ -7563,26 +7563,26 @@ VEH_ACTION WINAPI CLRVectoredExceptionHandlerPhase3(PEXCEPTION_POINTERS pExcepti return VEH_NO_ACTION; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX BOOL IsIPInEE(void *ip) { WRAPPER_NO_CONTRACT; -#if defined(FEATURE_PREJIT) && !defined(FEATURE_PAL) +#if defined(FEATURE_PREJIT) && !defined(TARGET_UNIX) if ((TADDR)ip > g_runtimeLoadedBaseAddress && (TADDR)ip < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize) { return TRUE; } else -#endif // FEATURE_PREJIT && !FEATURE_PAL +#endif // FEATURE_PREJIT && !TARGET_UNIX { return FALSE; } } -#if defined(FEATURE_HIJACK) && (!defined(_TARGET_X86_) || defined(FEATURE_PAL)) +#if defined(FEATURE_HIJACK) && (!defined(TARGET_X86) || defined(TARGET_UNIX)) // This function is used to check if the specified IP is in the prolog or not. bool IsIPInProlog(EECodeInfo *pCodeInfo) @@ -7599,7 +7599,7 @@ bool IsIPInProlog(EECodeInfo *pCodeInfo) _ASSERTE(pCodeInfo->IsValid()); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Optimized version for AMD64 that doesn't need to go through the GC info decoding PTR_RUNTIME_FUNCTION funcEntry = pCodeInfo->GetFunctionEntry(); @@ -7613,7 +7613,7 @@ bool IsIPInProlog(EECodeInfo *pCodeInfo) // Check if the specified IP is beyond the prolog or not. DWORD prologLen = pUnwindInfo->SizeOfProlog; -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken(); @@ -7633,7 +7633,7 @@ bool IsIPInProlog(EECodeInfo *pCodeInfo) #endif // USE_GC_INFO_DECODER -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (pCodeInfo->GetRelOffset() >= prologLen) { @@ -7724,7 +7724,7 @@ bool IsIPInEpilog(PTR_CONTEXT pContextToCheck, EECodeInfo *pCodeInfo, BOOL *pSaf // We are in epilog. fIsInEpilog = true; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Check if context pointers has returned the address of the stack location in the hijacked function // from where RBP was restored. If the address is NULL, then it implies that RBP has been popped off. // Since JIT64 ensures that pop of RBP is the last instruction before ret/jmp, it implies its not safe @@ -7741,11 +7741,11 @@ bool IsIPInEpilog(PTR_CONTEXT pContextToCheck, EECodeInfo *pCodeInfo, BOOL *pSaf return fIsInEpilog; } -#endif // FEATURE_HIJACK && (!_TARGET_X86_ || FEATURE_PAL) +#endif // FEATURE_HIJACK && (!TARGET_X86 || TARGET_UNIX) #define EXCEPTION_VISUALCPP_DEBUGGER ((DWORD) (1<<30 | 0x6D<<16 | 5000)) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // This holder is used to capture the FPU state, reset it to what the CLR expects // and then restore the original state that was captured. @@ -7797,9 +7797,9 @@ class FPUStateHolder } }; -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo) { @@ -7853,10 +7853,10 @@ LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo) // WARNING DWORD dwLastError = GetLastError(); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Capture the FPU state before we do anything involving floating point instructions FPUStateHolder captureFPUState; -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) #ifdef FEATURE_INTEROP_DEBUGGING // For interop debugging we have a fancy exception queueing stunt. When the debugger @@ -7886,7 +7886,7 @@ LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo) return EXCEPTION_CONTINUE_SEARCH; } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (dwCode == EXCEPTION_BREAKPOINT || dwCode == EXCEPTION_SINGLE_STEP) { // For interop debugging, debugger bashes our managed exception handler. @@ -7906,7 +7906,7 @@ LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo) #ifdef USE_REDIRECT_FOR_GCSTRESS // This is AMD64 & ARM specific as the macro above is defined for AMD64 & ARM only bIsGCMarker = IsGcMarker(pExceptionInfo->ContextRecord, pExceptionInfo->ExceptionRecord); -#elif defined(_TARGET_X86_) && defined(HAVE_GCCOVER) +#elif defined(TARGET_X86) && defined(HAVE_GCCOVER) // This is the equivalent of the check done in COMPlusFrameHandler, incase the exception is // seen by VEH first on x86. bIsGCMarker = IsGcMarker(pExceptionInfo->ContextRecord, pExceptionInfo->ExceptionRecord); @@ -8052,14 +8052,14 @@ LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo) return result; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Contains the handle to the registered VEH static PVOID g_hVectoredExceptionHandler = NULL; void CLRAddVectoredHandlers(void) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // We now install a vectored exception handler on all supporting Windows architectures. g_hVectoredExceptionHandler = AddVectoredExceptionHandler(TRUE, (PVECTORED_EXCEPTION_HANDLER)CLRVectoredExceptionHandlerShim); @@ -8070,7 +8070,7 @@ void CLRAddVectoredHandlers(void) } LOG((LF_EH, LL_INFO100, "CLRAddVectoredHandlers: AddVectoredExceptionHandler() succeeded\n")); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } // This function removes the vectored exception and continue handler registration @@ -8084,7 +8084,7 @@ void CLRRemoveVectoredHandlers(void) MODE_ANY; } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Unregister the vectored exception handler if one is registered (and we can). if (g_hVectoredExceptionHandler != NULL) @@ -8099,7 +8099,7 @@ void CLRRemoveVectoredHandlers(void) LOG((LF_EH, LL_INFO100, "CLRRemoveVectoredHandlers: RemoveVectoredExceptionHandler() succeeded.\n")); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } // @@ -8313,7 +8313,7 @@ LONG AppDomainTransitionExceptionFilter( // First, call into NotifyOfCHFFilterWrapper ret = NotifyOfCHFFilterWrapper(pExceptionInfo, pParam); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Setup the watson bucketing details if the escaping // exception is preallocated. if (SetupWatsonBucketsForEscapingPreallocatedExceptions()) @@ -8331,7 +8331,7 @@ LONG AppDomainTransitionExceptionFilter( SetupWatsonBucketsForNonPreallocatedExceptions(); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return ret; } // LONG AppDomainTransitionExceptionFilter() @@ -8361,7 +8361,7 @@ LONG ReflectionInvocationExceptionFilter( // First, call into NotifyOfCHFFilterWrapper ret = NotifyOfCHFFilterWrapper(pExceptionInfo, pParam); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Setup the watson bucketing details if the escaping // exception is preallocated. if (SetupWatsonBucketsForEscapingPreallocatedExceptions()) @@ -8379,7 +8379,7 @@ LONG ReflectionInvocationExceptionFilter( SetupWatsonBucketsForNonPreallocatedExceptions(); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // If the application has opted into triggering a failfast when a CorruptedStateException enters the Reflection system, // then do the needful. @@ -8396,11 +8396,11 @@ LONG ReflectionInvocationExceptionFilter( // Get the exception tracker for the current exception #ifdef FEATURE_EH_FUNCLETS PTR_ExceptionTracker pEHTracker = pCurTES->GetCurrentExceptionTracker(); -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo pEHTracker = pCurTES->GetCurrentExceptionTracker(); -#else // !(BIT64 || _TARGET_X86_) +#else // !(HOST_64BIT || TARGET_X86) #error Unsupported platform -#endif // BIT64 +#endif // HOST_64BIT #ifdef FEATURE_CORRUPTING_EXCEPTIONS if (pEHTracker->GetCorruptionSeverity() == ProcessCorrupting) @@ -8525,7 +8525,7 @@ bool DebugIsEECxxException(EXCEPTION_RECORD* pExceptionRecord) // [1] pExceptionObject : void* // [2] pThrowInfo : ThrowInfo* -#ifdef BIT64 +#ifdef HOST_64BIT #define NUM_CXX_EXCEPTION_PARAMS 4 #else #define NUM_CXX_EXCEPTION_PARAMS 3 @@ -8684,7 +8684,7 @@ void StripFileInfoFromStackTrace(SString &ssStackTrace) // EXCEPTION_CONTINUE_SEARCH. //============================================================================== void SetReversePInvokeEscapingUnhandledExceptionStatus(BOOL fIsUnwinding, -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) EXCEPTION_REGISTRATION_RECORD * pEstablisherFrame #elif defined(FEATURE_EH_FUNCLETS) ULONG64 pEstablisherFrame @@ -8744,7 +8744,7 @@ void SetReversePInvokeEscapingUnhandledExceptionStatus(BOOL fIsUnwinding, #endif // _DEBUG -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // This function will capture the watson buckets for the current exception object that is: // @@ -9231,7 +9231,7 @@ BOOL IsThrowableThreadAbortException(OBJECTREF oThrowable) #if defined(FEATURE_EH_FUNCLETS) PTR_ExceptionTracker GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, PTR_ExceptionTracker pStartingEHTracker) -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, PTR_ExInfo pStartingEHTracker) #else @@ -9253,11 +9253,11 @@ PTR_ExInfo GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, // Get the reference to the current exception tracker #if defined(FEATURE_EH_FUNCLETS) PTR_ExceptionTracker pEHTracker = (pStartingEHTracker != NULL) ? pStartingEHTracker : GetThread()->GetExceptionState()->GetCurrentExceptionTracker(); -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo pEHTracker = (pStartingEHTracker != NULL) ? pStartingEHTracker : GetThread()->GetExceptionState()->GetCurrentExceptionTracker(); -#else // !(BIT64 || _TARGET_X86_) +#else // !(HOST_64BIT || TARGET_X86) #error Unsupported platform -#endif // BIT64 +#endif // HOST_64BIT BOOL fFoundTracker = FALSE; @@ -9337,12 +9337,12 @@ PTR_EHWatsonBucketTracker GetWatsonBucketTrackerForPreallocatedException(OBJECTR PTR_ExceptionTracker pEHTracker = NULL; PTR_ExceptionTracker pPreviousEHTracker = NULL; -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo pEHTracker = NULL; PTR_ExInfo pPreviousEHTracker = NULL; -#else // !(BIT64 || _TARGET_X86_) +#else // !(HOST_64BIT || TARGET_X86) #error Unsupported platform -#endif // BIT64 +#endif // HOST_64BIT if (fStartSearchFromPreviousTracker) { @@ -10872,13 +10872,13 @@ void EHWatsonBucketTracker::CaptureUnhandledInfoForWatson(TypeOfReportedError to } #endif // !DACCESS_COMPILE } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Given a throwable, this function will attempt to find an active EH tracker corresponding to it. // If none found, it will return NULL #ifdef FEATURE_EH_FUNCLETS PTR_ExceptionTracker GetEHTrackerForException(OBJECTREF oThrowable, PTR_ExceptionTracker pStartingEHTracker) -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo GetEHTrackerForException(OBJECTREF oThrowable, PTR_ExInfo pStartingEHTracker) #else #error Unsupported platform @@ -10898,7 +10898,7 @@ PTR_ExInfo GetEHTrackerForException(OBJECTREF oThrowable, PTR_ExInfo pStartingEH // then use it. Otherwise, start from the current one. #ifdef FEATURE_EH_FUNCLETS PTR_ExceptionTracker pEHTracker = (pStartingEHTracker != NULL) ? pStartingEHTracker : GetThread()->GetExceptionState()->GetCurrentExceptionTracker(); -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo pEHTracker = (pStartingEHTracker != NULL) ? pStartingEHTracker : GetThread()->GetExceptionState()->GetCurrentExceptionTracker(); #else #error Unsupported platform @@ -11208,7 +11208,7 @@ BOOL CEHelper::IsProcessCorruptedStateException(OBJECTREF oThrowable) // Get the exception tracker for the current exception #ifdef FEATURE_EH_FUNCLETS PTR_ExceptionTracker pEHTracker = GetEHTrackerForException(oThrowable, NULL); -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo pEHTracker = GetEHTrackerForException(oThrowable, NULL); #else #error Unsupported platform @@ -11328,11 +11328,11 @@ void CEHelper::SetupCorruptionSeverityForActiveException(BOOL fIsRethrownExcepti // Get the exception tracker for the current exception #ifdef FEATURE_EH_FUNCLETS PTR_ExceptionTracker pEHTracker = pCurTES->GetCurrentExceptionTracker(); -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo pEHTracker = pCurTES->GetCurrentExceptionTracker(); -#else // !(BIT64 || _TARGET_X86_) +#else // !(HOST_64BIT || TARGET_X86) #error Unsupported platform -#endif // BIT64 +#endif // HOST_64BIT _ASSERTE(pEHTracker != NULL); @@ -11417,7 +11417,7 @@ void CEHelper::SetupCorruptionSeverityForActiveException(BOOL fIsRethrownExcepti #ifdef FEATURE_EH_FUNCLETS PTR_ExceptionTracker pOrigEHTracker = NULL; -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo pOrigEHTracker = NULL; #else #error Unsupported platform @@ -11659,7 +11659,7 @@ void CEHelper::ResetLastActiveCorruptionSeverityPostCatchHandler(Thread *pThread // set it to "NotSet". #ifdef FEATURE_EH_FUNCLETS PTR_ExceptionTracker pEHTracker = pCurTES->GetCurrentExceptionTracker(); -#elif _TARGET_X86_ +#elif TARGET_X86 PTR_ExInfo pEHTracker = pCurTES->GetCurrentExceptionTracker(); #else #error Unsupported platform diff --git a/src/coreclr/src/vm/excep.h b/src/coreclr/src/vm/excep.h index 71cc944e5b308..7f9aba1b30995 100644 --- a/src/coreclr/src/vm/excep.h +++ b/src/coreclr/src/vm/excep.h @@ -22,9 +22,9 @@ class Thread; #include #include "interoputil.h" -#if defined(_TARGET_ARM_) || defined(_TARGET_X86_) +#if defined(TARGET_ARM) || defined(TARGET_X86) #define VSD_STUB_CAN_THROW_AV -#endif // _TARGET_ARM_ || _TARGET_X86_ +#endif // TARGET_ARM || TARGET_X86 BOOL IsExceptionFromManagedCode(const EXCEPTION_RECORD * pExceptionRecord); #ifdef VSD_STUB_CAN_THROW_AV @@ -32,13 +32,13 @@ BOOL IsIPinVirtualStub(PCODE f_IP); #endif // VSD_STUB_CAN_THROW_AV bool IsIPInMarkedJitHelper(UINT_PTR uControlPc); -#if defined(FEATURE_HIJACK) && (!defined(_TARGET_X86_) || defined(FEATURE_PAL)) +#if defined(FEATURE_HIJACK) && (!defined(TARGET_X86) || defined(TARGET_UNIX)) // General purpose functions for use on an IP in jitted code. bool IsIPInProlog(EECodeInfo *pCodeInfo); bool IsIPInEpilog(PTR_CONTEXT pContextToCheck, EECodeInfo *pCodeInfo, BOOL *pSafeToInjectThreadAbort); -#endif // FEATURE_HIJACK && (!_TARGET_X86_ || FEATURE_PAL) +#endif // FEATURE_HIJACK && (!TARGET_X86 || TARGET_UNIX) //****************************************************************************** // @@ -175,7 +175,7 @@ BOOL IsCOMPlusExceptionHandlerInstalled(); BOOL InstallUnhandledExceptionFilter(); void UninstallUnhandledExceptionFilter(); -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) // Section naming is a strategy by itself. Ideally, we could have named the UEF section // ".text$zzz" (lowercase after $ is important). What the linker does is look for the sections // that has the same name before '$' sign. It combines them together but sorted in an alphabetical @@ -194,7 +194,7 @@ void UninstallUnhandledExceptionFilter(); // section that comes after UEF section, it can affect the UEF section and we will // assert about it in "CExecutionEngine::ClrVirtualProtect". #define CLR_UEF_SECTION_NAME ".CLR_UEF" -#endif //!defined(FEATURE_PAL) +#endif //!defined(TARGET_UNIX) LONG __stdcall COMUnhandledExceptionFilter(EXCEPTION_POINTERS *pExceptionInfo); @@ -538,9 +538,9 @@ EXCEPTION_HANDLER_DECL(COMPlusFrameHandlerRevCom); // Pop off any SEH handlers we have registered below pTargetSP VOID __cdecl PopSEHRecords(LPVOID pTargetSP); -#if defined(_TARGET_X86_) && defined(DEBUGGING_SUPPORTED) +#if defined(TARGET_X86) && defined(DEBUGGING_SUPPORTED) VOID UnwindExceptionTrackerAndResumeInInterceptionFrame(ExInfo* pExInfo, EHContext* context); -#endif // _TARGET_X86_ && DEBUGGING_SUPPORTED +#endif // TARGET_X86 && DEBUGGING_SUPPORTED BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers = FALSE); VOID PopNestedExceptionRecords(LPVOID pTargetSP, T_CONTEXT *pCtx, void *pSEH); @@ -556,14 +556,14 @@ VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH); #define STACK_OVERWRITE_BARRIER_VALUE 0xabcdefab #ifdef _DEBUG -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) struct FrameHandlerExRecordWithBarrier { DWORD m_StackOverwriteBarrier[STACK_OVERWRITE_BARRIER_SIZE]; FrameHandlerExRecord m_ExRecord; }; void VerifyValidTransitionFromManagedCode(Thread *pThread, CrawlFrame *pCF); -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) #endif // _DEBUG #endif // !defined(FEATURE_EH_FUNCLETS) @@ -738,10 +738,10 @@ bool IsInterceptableException(Thread *pThread); // perform simple checking to see if the current exception is intercepted bool CheckThreadExceptionStateForInterception(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Currently, only Windows supports ClrUnwindEx (used inside ClrDebuggerDoUnwindAndIntercept) #define DEBUGGER_EXCEPTION_INTERCEPTION_SUPPORTED -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef DEBUGGER_EXCEPTION_INTERCEPTION_SUPPORTED // Intercept the current exception and start an unwind. This function may never return. @@ -754,9 +754,9 @@ LONG NotifyDebuggerLastChance(Thread *pThread, BOOL jitAttachRequested); #endif // DEBUGGING_SUPPORTED -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) void CPFH_AdjustContextForThreadSuspensionRace(T_CONTEXT *pContext, Thread *pThread); -#endif // _TARGET_X86_ +#endif // TARGET_X86 DWORD GetGcMarkerExceptionCode(LPVOID ip); bool IsGcMarker(T_CONTEXT *pContext, EXCEPTION_RECORD *pExceptionRecord); @@ -785,9 +785,9 @@ bool DebugIsEECxxException(EXCEPTION_RECORD* pExceptionRecord); inline void CopyOSContext(T_CONTEXT* pDest, T_CONTEXT* pSrc) { SIZE_T cbReadOnlyPost = 0; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 cbReadOnlyPost = sizeof(CONTEXT) - FIELD_OFFSET(CONTEXT, FltSave); // older OSes don't have the vector reg fields -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 memcpyNoGCRefs(pDest, pSrc, sizeof(T_CONTEXT) - cbReadOnlyPost); } @@ -796,7 +796,7 @@ void SaveCurrentExceptionInfo(PEXCEPTION_RECORD pRecord, PT_CONTEXT pContext); #ifdef _DEBUG void SetReversePInvokeEscapingUnhandledExceptionStatus(BOOL fIsUnwinding, -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 EXCEPTION_REGISTRATION_RECORD * pEstablisherFrame #elif defined(FEATURE_EH_FUNCLETS) ULONG64 pEstablisherFrame diff --git a/src/coreclr/src/vm/exceptionhandling.cpp b/src/coreclr/src/vm/exceptionhandling.cpp index f7fccb89b882f..827637b53a30a 100644 --- a/src/coreclr/src/vm/exceptionhandling.cpp +++ b/src/coreclr/src/vm/exceptionhandling.cpp @@ -17,16 +17,16 @@ #include "virtualcallstub.h" #include "utilcode.h" -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #define USE_CURRENT_CONTEXT_IN_FILTER -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) // ARM/ARM64 uses Caller-SP to locate PSPSym in the funclet frame. #define USE_CALLER_SP_IN_FUNCLET -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) || defined(_TARGET_X86_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_X86) #define ADJUST_PC_UNWOUND_TO_CALL #define STACK_RANGE_BOUNDS_ARE_CALLER_SP #define USE_FUNCLET_CALL_HELPER @@ -35,15 +35,15 @@ // // For x86/Linux, RtlVirtualUnwind sets EstablisherFrame as Caller-SP. #define ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP -#endif // _TARGET_ARM_ || _TARGET_ARM64_ || _TARGET_X86_ +#endif // TARGET_ARM || TARGET_ARM64 || TARGET_X86 -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX void NOINLINE ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord, UINT_PTR ReturnValue, UINT_PTR TargetIP, UINT_PTR TargetFrameSp); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef USE_CURRENT_CONTEXT_IN_FILTER inline void CaptureNonvolatileRegisters(PKNONVOLATILE_CONTEXT pNonvolatileContext, PCONTEXT pContext) @@ -142,20 +142,20 @@ void FixContext(PCONTEXT pContextRecord) pContextRecord->reg = (value); \ } while (0) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 size_t resumeSp = EECodeManager::GetResumeSp(pContextRecord); FIXUPREG(Esp, resumeSp); -#endif // _TARGET_X86_ +#endif // TARGET_X86 #undef FIXUPREG } MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX BOOL HandleHardwareException(PAL_SEHException* ex); BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord); -#endif // FEATURE_PAL +#endif // TARGET_UNIX static ExceptionTracker* GetTrackerMemory() { @@ -200,7 +200,7 @@ static inline void UpdatePerformanceMetrics(CrawlFrame *pcfThisFrame, BOOL bIsRe ETW::ExceptionLog::ExceptionThrown(pcfThisFrame, bIsRethrownException, bIsNewException); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX static LONG volatile g_termination_triggered = 0; void HandleTerminationRequest(int terminationExitCode) @@ -231,7 +231,7 @@ void InitializeExceptionHandling() // Initialize the lock used for synchronizing access to the stacktrace in the exception object g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Register handler of hardware exceptions like null reference in PAL PAL_SetHardwareExceptionHandler(HandleHardwareException, IsSafeToHandleHardwareException); @@ -240,7 +240,7 @@ void InitializeExceptionHandling() // Register handler for termination requests (e.g. SIGTERM) PAL_SetTerminationRequestHandler(HandleTerminationRequest); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } struct UpdateObjectRefInResumeContextCallbackState @@ -455,11 +455,11 @@ void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDI pAbortContext = GetThread()->GetAbortContext(); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define HANDLE_NULL_CONTEXT_POINTER _ASSERTE(false) -#else // FEATURE_PAL +#else // TARGET_UNIX #define HANDLE_NULL_CONTEXT_POINTER -#endif // FEATURE_PAL +#endif // TARGET_UNIX #define UPDATEREG(reg) \ do { \ @@ -482,14 +482,14 @@ void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDI } while (0) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) UPDATEREG(Ebx); UPDATEREG(Esi); UPDATEREG(Edi); UPDATEREG(Ebp); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) UPDATEREG(Rbx); UPDATEREG(Rbp); @@ -502,7 +502,7 @@ void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDI UPDATEREG(R14); UPDATEREG(R15); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) UPDATEREG(R4); UPDATEREG(R5); @@ -513,7 +513,7 @@ void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDI UPDATEREG(R10); UPDATEREG(R11); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) UPDATEREG(X19); UPDATEREG(X20); @@ -801,9 +801,9 @@ UINT_PTR ExceptionTracker::FinishSecondPass( STRESS_LOG1(LF_EH, LL_INFO10, "resume under control: ip: %p\n", uResumePC); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 pContextRecord->Rcx = uResumePC; -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) // On ARM & ARM64, we save off the original PC in Lr. This is the same as done // in HandleManagedFault for H/W generated exceptions. pContextRecord->Lr = uResumePC; @@ -1088,7 +1088,7 @@ ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord } } -#ifndef FEATURE_PAL // Watson is on Windows only +#ifndef TARGET_UNIX // Watson is on Windows only // Setup bucketing details for nested exceptions (rethrow and non-rethrow) only if we are in the first pass if (!(dwExceptionFlags & EXCEPTION_UNWINDING)) { @@ -1098,7 +1098,7 @@ ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord SetStateForWatsonBucketing((STState == ExceptionTracker::STS_FirstRethrowFrame), pPrevEHTracker->GetThrowableAsHandle()); } } -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX CLRUnwindStatus status; @@ -1127,7 +1127,7 @@ ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord SetLastError(dwLastError); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // // At this point (the end of the 1st pass) we don't know where // we are going to resume to. So, we pass in an address, which @@ -1160,7 +1160,7 @@ ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord // via the custom unwinder. return ExceptionStackUnwind; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } else if (SecondPassComplete == status) { @@ -1499,10 +1499,10 @@ void ExceptionTracker::InitializeCrawlFrame(CrawlFrame* pcfThisFrame, Thread* pT } else { -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) // See the comment above the call to InitRegDisplay for this assertion. _ASSERTE(pDispatcherContext->ControlPc == GetIP(pDispatcherContext->ContextRecord)); -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 #ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP // Simply setup the callerSP during the second pass in the caller context. @@ -1534,7 +1534,7 @@ void ExceptionTracker::InitializeCrawlFrame(CrawlFrame* pcfThisFrame, Thread* pT } #endif // ADJUST_PC_UNWOUND_TO_CALL -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Remove the Thumb bit ControlPCForEHSearch = ThumbCodeToDataPointer(ControlPCForEHSearch); #endif @@ -3317,7 +3317,7 @@ EXTERN_C DWORD_PTR STDCALL CallEHFilterFunclet(Object *pThrowable, TADDR CallerS static inline UINT_PTR CastHandlerFn(HandlerFn *pfnHandler) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM return DataPointerToThumbCode(pfnHandler); #else return (UINT_PTR)pfnHandler; @@ -3326,11 +3326,11 @@ static inline UINT_PTR CastHandlerFn(HandlerFn *pfnHandler) static inline UINT_PTR *GetFirstNonVolatileRegisterAddress(PCONTEXT pContextRecord) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) return (UINT_PTR*)&(pContextRecord->R4); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return (UINT_PTR*)&(pContextRecord->X19); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) return (UINT_PTR*)&(pContextRecord->Edi); #else PORTABILITY_ASSERT("GetFirstNonVolatileRegisterAddress"); @@ -3340,9 +3340,9 @@ static inline UINT_PTR *GetFirstNonVolatileRegisterAddress(PCONTEXT pContextReco static inline TADDR GetFrameRestoreBase(PCONTEXT pContextRecord) { -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) return GetSP(pContextRecord); -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) return pContextRecord->Ebp; #else PORTABILITY_ASSERT("GetFrameRestoreBase"); @@ -3612,13 +3612,13 @@ void ExceptionTracker::PopTrackers( while (pTracker) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // When we are about to pop off a tracker, it should // have a stack range setup. // It is not true on PAL where the scanned stack range needs to // be reset after unwinding a sequence of native frames. _ASSERTE(!pTracker->m_ScannedStackRange.IsEmpty()); -#endif // FEATURE_PAL +#endif // TARGET_UNIX ExceptionTracker* pPrev = pTracker->m_pPrevNestedInfo; @@ -3709,11 +3709,11 @@ ExceptionTracker* ExceptionTracker::GetOrCreateTracker( { fTransitionFromSecondToFirstPass = fIsFirstPass && !pTracker->IsInFirstPass(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // We don't check this on PAL where the scanned stack range needs to // be reset after unwinding a sequence of native frames. CONSISTENCY_CHECK(!pTracker->m_ScannedStackRange.IsEmpty()); -#endif // FEATURE_PAL +#endif // TARGET_UNIX if (pTracker->m_ExceptionFlags.IsRethrown()) { @@ -4402,7 +4402,7 @@ static void DoEHLog( } #endif // _DEBUG -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX //--------------------------------------------------------------------------------------- // @@ -4824,7 +4824,7 @@ VOID DECLSPEC_NORETURN DispatchManagedException(PAL_SEHException& ex, bool isHar throw std::move(ex); } -#if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#if defined(TARGET_AMD64) || defined(TARGET_X86) /*++ Function : @@ -4977,11 +4977,11 @@ DWORD64 GetModRMOperandValue(BYTE rex, BYTE* ip, PCONTEXT pContext, bool is8Bit, // Check for Displacement only addressing mode for x86 if ((mod == 0) && (rm == 5)) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) result = (DWORD64)ip + sizeof(INT32) + *(INT32*)ip; #else result = (DWORD64)(*(DWORD*)ip); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } else { @@ -5150,7 +5150,7 @@ bool IsDivByZeroAnIntegerOverflow(PCONTEXT pContext) // must have been an overflow. return divisor != 0; } -#endif // _TARGET_AMD64_ || _TARGET_X86_ +#endif // TARGET_AMD64 || TARGET_X86 BOOL IsSafeToCallExecutionManager() { @@ -5229,7 +5229,7 @@ BOOL HandleHardwareException(PAL_SEHException* ex) return TRUE; } -#if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#if defined(TARGET_AMD64) || defined(TARGET_X86) // It is possible that an overflow was mapped to a divide-by-zero exception. // This happens when we try to divide the maximum negative value of a // signed integer with -1. @@ -5242,7 +5242,7 @@ BOOL HandleHardwareException(PAL_SEHException* ex) // The exception was an integer overflow, so augment the exception code. ex->GetExceptionRecord()->ExceptionCode = EXCEPTION_INT_OVERFLOW; } -#endif // _TARGET_AMD64_ || _TARGET_X86_ +#endif // TARGET_AMD64 || TARGET_X86 // Create frame necessary for the exception handling FrameWithCookie fef; @@ -5290,7 +5290,7 @@ BOOL HandleHardwareException(PAL_SEHException* ex) // See https://static.docs.arm.com/ddi0487/db/DDI0487D_b_armv8_arm.pdf#page=6916&zoom=100,0,152 // at aarch64/exceptions/debug/AArch64.SoftwareBreakpoint // However, the rest of the code expects that it points to an instruction after the break. -#if defined(__linux__) && (defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) +#if defined(__linux__) && (defined(TARGET_ARM) || defined(TARGET_ARM64)) if (ex->GetExceptionRecord()->ExceptionCode == STATUS_BREAKPOINT) { SetIP(ex->GetContextRecord(), GetIP(ex->GetContextRecord()) + CORDbg_BREAK_INSTRUCTION_SIZE); @@ -5324,9 +5324,9 @@ BOOL HandleHardwareException(PAL_SEHException* ex) return FALSE; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX void ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord, UINT_PTR ReturnValue, UINT_PTR TargetIP, UINT_PTR TargetFrameSp) { PVOID TargetFrame = (PVOID)TargetFrameSp; @@ -5342,7 +5342,7 @@ void ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord, UINT_PTR ReturnValue, UINT_ // doesn't return UNREACHABLE(); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX void TrackerAllocator::Init() { @@ -5474,7 +5474,7 @@ void TrackerAllocator::FreeTrackerMemory(ExceptionTracker* pTracker) FastInterlockExchangePointer(&(pTracker->m_pThread), NULL); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // This is Windows specific implementation as it is based upon the notion of collided unwind that is specific // to Windows 64bit. // @@ -5507,7 +5507,7 @@ void FixupDispatcherContext(DISPATCHER_CONTEXT* pDispatcherContext, CONTEXT* pCo pDispatcherContext->ControlPc = (UINT_PTR) GetIP(pDispatcherContext->ContextRecord); -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) // Since this routine is used to fixup contexts for async exceptions, // clear the CONTEXT_UNWOUND_TO_CALL flag since, semantically, frames // where such exceptions have happened do not have callsites. On a similar @@ -5527,15 +5527,15 @@ void FixupDispatcherContext(DISPATCHER_CONTEXT* pDispatcherContext, CONTEXT* pCo // be fixing it at their end, in their implementation of collided unwind. pDispatcherContext->ContextRecord->ContextFlags &= ~CONTEXT_DEBUG_REGISTERS; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // But keep the architecture flag set (its part of CONTEXT_DEBUG_REGISTERS) pDispatcherContext->ContextRecord->ContextFlags |= CONTEXT_ARM; -#else // _TARGET_ARM64_ +#else // TARGET_ARM64 // But keep the architecture flag set (its part of CONTEXT_DEBUG_REGISTERS) pDispatcherContext->ContextRecord->ContextFlags |= CONTEXT_ARM64; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 INDEBUG(pDispatcherContext->FunctionEntry = (PT_RUNTIME_FUNCTION)INVALID_POINTER_CD); INDEBUG(pDispatcherContext->ImageBase = INVALID_POINTER_CD); @@ -5753,7 +5753,7 @@ FixContextHandler(IN PEXCEPTION_RECORD pExceptionRecord // (which was broken when we whacked the IP to get control over the thread) return ExceptionCollidedUnwind; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef _DEBUG // IsSafeToUnwindFrameChain: @@ -5847,7 +5847,7 @@ void CleanUpForSecondPass(Thread* pThread, bool fIsSO, LPVOID MemoryStackFpForFr } } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX typedef enum { @@ -5880,7 +5880,7 @@ UnhandledExceptionHandlerUnix( return _URC_FATAL_PHASE1_ERROR; } -#else // FEATURE_PAL +#else // TARGET_UNIX EXTERN_C EXCEPTION_DISPOSITION UMThunkUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord @@ -6030,7 +6030,7 @@ CallDescrWorkerUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionReco return retVal; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifdef FEATURE_COMINTEROP EXTERN_C EXCEPTION_DISPOSITION @@ -6049,7 +6049,7 @@ ReverseComUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord } #endif // FEATURE_COMINTEROP -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX EXTERN_C EXCEPTION_DISPOSITION FixRedirectContextHandler( IN PEXCEPTION_RECORD pExceptionRecord @@ -6083,7 +6083,7 @@ FixRedirectContextHandler( // (which was broken when we whacked the IP to get control over the thread) return ExceptionCollidedUnwind; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // DACCESS_COMPILE void ExceptionTracker::StackRange::Reset() @@ -6139,7 +6139,7 @@ void ExceptionTracker::StackRange::CombineWith(StackFrame sfCurrent, StackRange* } else { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // When the current range is empty, copy the low bound too. Otherwise a degenerate range would get // created and tests for stack frame in the stack range would always fail. // TODO: Check if we could enable it for non-PAL as well. @@ -6147,7 +6147,7 @@ void ExceptionTracker::StackRange::CombineWith(StackFrame sfCurrent, StackRange* { m_sfLowBound = pPreviousRange->m_sfLowBound; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX m_sfHighBound = pPreviousRange->m_sfHighBound; } } @@ -6815,7 +6815,7 @@ StackFrame ExceptionTracker::FindParentStackFrameHelper(CrawlFrame* pCF, // Check if the caller IP is in mscorwks. If it is not, then it is an out-of-line finally. // Normally, the caller of a finally is ExceptionTracker::CallHandler(). -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX fIsCallerInVM = !ExecutionManager::IsManagedCode(callerIP); #else #if defined(DACCESS_COMPILE) @@ -6824,7 +6824,7 @@ StackFrame ExceptionTracker::FindParentStackFrameHelper(CrawlFrame* pCF, HMODULE_TGT hEE = g_pMSCorEE; #endif // !DACCESS_COMPILE fIsCallerInVM = IsIPInModule(hEE, callerIP); -#endif // FEATURE_PAL +#endif // TARGET_UNIX if (!fIsCallerInVM) { @@ -7108,16 +7108,16 @@ void ExceptionTracker::ReleaseResources() } m_StackTraceInfo.FreeStackTrace(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Clear any held Watson Bucketing details GetWatsonBucketTracker()->ClearWatsonBucketDetails(); -#else // !FEATURE_PAL +#else // !TARGET_UNIX if (m_fOwnsExceptionPointers) { PAL_FreeExceptionRecords(m_ptrs.ExceptionRecord, m_ptrs.ContextRecord); m_fOwnsExceptionPointers = FALSE; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // DACCESS_COMPILE } diff --git a/src/coreclr/src/vm/exceptionhandling.h b/src/coreclr/src/vm/exceptionhandling.h index bc58000b7c854..8c245daaa6e18 100644 --- a/src/coreclr/src/vm/exceptionhandling.h +++ b/src/coreclr/src/vm/exceptionhandling.h @@ -13,9 +13,9 @@ #include "eexcp.h" #include "exstatecommon.h" -#if defined(_TARGET_ARM_) || defined(_TARGET_X86_) +#if defined(TARGET_ARM) || defined(TARGET_X86) #define USE_PER_FRAME_PINVOKE_INIT -#endif // _TARGET_ARM_ || _TARGET_X86_ +#endif // TARGET_ARM || TARGET_X86 // This address lies in the NULL pointer partition of the process memory. // Accessing it will result in AV. @@ -69,10 +69,10 @@ class ExceptionTracker m_StackTraceInfo.Init(); #endif // DACCESS_COMPILE -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Init the WatsonBucketTracker m_WatsonBucketTracker.Init(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_CORRUPTING_EXCEPTIONS // Initialize the default exception severity to NotCorrupting @@ -99,7 +99,7 @@ class ExceptionTracker m_pLimitFrame = NULL; m_csfEHClauseOfCollapsedTracker.Clear(); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_fOwnsExceptionPointers = FALSE; #endif } @@ -129,10 +129,10 @@ class ExceptionTracker m_StackTraceInfo.Init(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Init the WatsonBucketTracker m_WatsonBucketTracker.Init(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_CORRUPTING_EXCEPTIONS // Initialize the default exception severity to NotCorrupting @@ -158,7 +158,7 @@ class ExceptionTracker m_pInitialExplicitFrame = NULL; m_csfEHClauseOfCollapsedTracker.Clear(); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_fOwnsExceptionPointers = FALSE; #endif } @@ -299,7 +299,7 @@ class ExceptionTracker return m_pInitialExplicitFrame; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Reset the range of explicit frames, the limit frame and the scanned // stack range before unwinding a sequence of native frames. These frames // will be in the unwound part of the stack. @@ -309,7 +309,7 @@ class ExceptionTracker m_pLimitFrame = NULL; m_ScannedStackRange.Reset(); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Determines if we have unwound to the specified parent method frame. // Currently this is only used for funclet skipping. @@ -383,7 +383,7 @@ class ExceptionTracker bool IsStackOverflowException(); -#if defined(FEATURE_PAL) && !defined(CROSS_COMPILE) +#if defined(TARGET_UNIX) && !defined(CROSS_COMPILE) void TakeExceptionPointersOwnership(PAL_SEHException* ex) { _ASSERTE(ex->GetExceptionRecord() == m_ptrs.ExceptionRecord); @@ -391,7 +391,7 @@ class ExceptionTracker ex->Clear(); m_fOwnsExceptionPointers = TRUE; } -#endif // FEATURE_PAL && !CROSS_COMPILE +#endif // TARGET_UNIX && !CROSS_COMPILE private: DWORD_PTR @@ -581,7 +581,7 @@ class ExceptionTracker return m_EnclosingClauseInfoOfCollapsedTracker.GetEnclosingClauseCallerSP(); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX private: EHWatsonBucketTracker m_WatsonBucketTracker; public: @@ -590,7 +590,7 @@ class ExceptionTracker LIMITED_METHOD_CONTRACT; return PTR_EHWatsonBucketTracker(PTR_HOST_MEMBER_TADDR(ExceptionTracker, this, m_WatsonBucketTracker)); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_CORRUPTING_EXCEPTIONS private: @@ -712,7 +712,7 @@ private: ; StackRange m_ScannedStackRange; DAC_EXCEPTION_POINTERS m_ptrs; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX BOOL m_fOwnsExceptionPointers; #endif OBJECTHANDLE m_hThrowable; diff --git a/src/coreclr/src/vm/exceptmacros.h b/src/coreclr/src/vm/exceptmacros.h index c7f81d9358886..38c20de0457f9 100644 --- a/src/coreclr/src/vm/exceptmacros.h +++ b/src/coreclr/src/vm/exceptmacros.h @@ -292,7 +292,7 @@ VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL r void UnwindAndContinueRethrowHelperInsideCatch(Frame* pEntryFrame, Exception* pException); VOID DECLSPEC_NORETURN UnwindAndContinueRethrowHelperAfterCatch(Frame* pEntryFrame, Exception* pException); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX VOID DECLSPEC_NORETURN DispatchManagedException(PAL_SEHException& ex, bool isHardwareException); #define INSTALL_MANAGED_EXCEPTION_DISPATCHER \ @@ -330,14 +330,14 @@ VOID DECLSPEC_NORETURN DispatchManagedException(PAL_SEHException& ex, bool isHar UNREACHABLE(); \ } -#else // FEATURE_PAL +#else // TARGET_UNIX #define INSTALL_MANAGED_EXCEPTION_DISPATCHER #define UNINSTALL_MANAGED_EXCEPTION_DISPATCHER #define INSTALL_UNHANDLED_MANAGED_EXCEPTION_TRAP #define UNINSTALL_UNHANDLED_MANAGED_EXCEPTION_TRAP -#endif // FEATURE_PAL +#endif // TARGET_UNIX #define INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE \ { \ diff --git a/src/coreclr/src/vm/exinfo.cpp b/src/coreclr/src/vm/exinfo.cpp index 879f047f21da8..8a9ed80d257f5 100644 --- a/src/coreclr/src/vm/exinfo.cpp +++ b/src/coreclr/src/vm/exinfo.cpp @@ -49,10 +49,10 @@ void ExInfo::CopyAndClearSource(ExInfo *from) } CONTRACTL_END; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 LOG((LF_EH, LL_INFO100, "In ExInfo::CopyAndClearSource: m_dEsp=%08x, %08x <- [%08x], stackAddress = 0x%p <- 0x%p\n", from->m_dEsp, &(this->m_dEsp), &from->m_dEsp, this->m_StackAddress, from->m_StackAddress)); -#endif // _TARGET_X86_ +#endif // TARGET_X86 // If we have a handle to an exception object in this ExInfo already, then go ahead and destroy it before we // loose it. @@ -75,11 +75,11 @@ void ExInfo::CopyAndClearSource(ExInfo *from) // Finally, initialize the source ExInfo. from->Init(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Clear the Watson Bucketing information as well since they // have been transferred over by the "memcpy" above. from->GetWatsonBucketTracker()->Init(); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } void ExInfo::Init() @@ -121,10 +121,10 @@ void ExInfo::Init() m_pTopMostHandlerDuringSO = NULL; -#if defined(_TARGET_X86_) && defined(DEBUGGING_SUPPORTED) +#if defined(TARGET_X86) && defined(DEBUGGING_SUPPORTED) m_InterceptionContext.Init(); m_ValidInterceptionContext = FALSE; -#endif //_TARGET_X86_ && DEBUGGING_SUPPORTED +#endif //TARGET_X86 && DEBUGGING_SUPPORTED } ExInfo::ExInfo() @@ -134,10 +134,10 @@ ExInfo::ExInfo() m_hThrowable = NULL; Init(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Init the WatsonBucketTracker m_WatsonBucketTracker.Init(); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } //******************************************************************************* @@ -205,11 +205,11 @@ void ExInfo::UnwindExInfo(VOID* limit) pPrevNestedInfo->DestroyExceptionHandle(); } - #ifndef FEATURE_PAL + #ifndef TARGET_UNIX // Free the Watson bucket details when ExInfo // is being released pPrevNestedInfo->GetWatsonBucketTracker()->ClearWatsonBucketDetails(); - #endif // FEATURE_PAL + #endif // TARGET_UNIX pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace(); @@ -257,10 +257,10 @@ void ExInfo::UnwindExInfo(VOID* limit) // We just do a basic Init of the current top ExInfo here. Init(); - #ifndef FEATURE_PAL + #ifndef TARGET_UNIX // Init the Watson buckets as well GetWatsonBucketTracker()->ClearWatsonBucketDetails(); - #endif // FEATURE_PAL + #endif // TARGET_UNIX } } #endif // DACCESS_COMPILE diff --git a/src/coreclr/src/vm/exinfo.h b/src/coreclr/src/vm/exinfo.h index ca7e29815a0d2..5e57e0ce77ae8 100644 --- a/src/coreclr/src/vm/exinfo.h +++ b/src/coreclr/src/vm/exinfo.h @@ -79,7 +79,7 @@ class ExInfo // void* m_StackAddress; // A pseudo or real stack location for this record. -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX private: EHWatsonBucketTracker m_WatsonBucketTracker; public: @@ -156,7 +156,7 @@ class ExInfo EHClauseInfo m_EHClauseInfo; ExceptionFlags m_ExceptionFlags; -#if defined(_TARGET_X86_) && defined(DEBUGGING_SUPPORTED) +#if defined(TARGET_X86) && defined(DEBUGGING_SUPPORTED) EHContext m_InterceptionContext; BOOL m_ValidInterceptionContext; #endif @@ -175,9 +175,9 @@ class ExInfo ExInfo& operator=(const ExInfo &from); }; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) PTR_ExInfo GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, PTR_ExInfo pStartingEHTracker); -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // !FEATURE_EH_FUNCLETS #endif // __ExInfo_h__ diff --git a/src/coreclr/src/vm/exstate.cpp b/src/coreclr/src/vm/exstate.cpp index 3b037d6fa3734..c2c2c0adb63f4 100644 --- a/src/coreclr/src/vm/exstate.cpp +++ b/src/coreclr/src/vm/exstate.cpp @@ -39,10 +39,10 @@ ThreadExceptionState::ThreadExceptionState() m_flag = TEF_None; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Init the UE Watson BucketTracker m_UEWatsonBucketTracker.Init(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_CORRUPTING_EXCEPTIONS // Initialize the default exception severity to NotCorrupting @@ -54,10 +54,10 @@ ThreadExceptionState::ThreadExceptionState() ThreadExceptionState::~ThreadExceptionState() { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Init the UE Watson BucketTracker m_UEWatsonBucketTracker.ClearWatsonBucketDetails(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #if defined(_DEBUG) @@ -384,9 +384,9 @@ BOOL ThreadExceptionState::IsDebuggerInterceptable() !GetFlags()->DebuggerInterceptNotPossible()); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 PEXCEPTION_REGISTRATION_RECORD GetClrSEHRecordServicingStackPointer(Thread *pThread, void *pStackPointer); -#endif // _TARGET_X86_ +#endif // TARGET_X86 //--------------------------------------------------------------------------------------- // diff --git a/src/coreclr/src/vm/exstate.h b/src/coreclr/src/vm/exstate.h index ba89ab3994f82..1ea8790a11dc1 100644 --- a/src/coreclr/src/vm/exstate.h +++ b/src/coreclr/src/vm/exstate.h @@ -215,7 +215,7 @@ class ThreadExceptionState private: ThreadExceptionFlag m_flag; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX private: EHWatsonBucketTracker m_UEWatsonBucketTracker; public: @@ -224,7 +224,7 @@ class ThreadExceptionState LIMITED_METHOD_CONTRACT; return PTR_EHWatsonBucketTracker(PTR_HOST_MEMBER_TADDR(ThreadExceptionState, this, m_UEWatsonBucketTracker)); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX private: @@ -265,7 +265,7 @@ class ThreadExceptionState EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame, DWORD exceptionCode); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 friend LPVOID COMPlusEndCatchWorker(Thread * pThread); #endif @@ -273,10 +273,10 @@ class ThreadExceptionState friend StackWalkAction COMPlusUnwindCallback(CrawlFrame *pCf, ThrowCallbackType *pData); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) friend void ResumeAtJitEH(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, Thread *pThread, BOOL unwindStack); -#endif // _TARGET_X86_ +#endif // TARGET_X86 friend _EXCEPTION_HANDLER_DECL(COMPlusNestedExceptionHandler); @@ -315,7 +315,7 @@ class ThreadExceptionFlagHolder extern BOOL IsWatsonEnabled(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // This preprocessor definition is used to capture watson buckets // at AppDomain transition boundary in END_DOMAIN_TRANSITION macro. // @@ -355,8 +355,8 @@ extern BOOL IsWatsonEnabled(); SetupWatsonBucketsForNonPreallocatedExceptions(throwable); \ } \ } -#else // !FEATURE_PAL +#else // !TARGET_UNIX #define CAPTURE_BUCKETS_AT_TRANSITION(pThread, oThrowable) -#endif // FEATURE_PAL +#endif // TARGET_UNIX #endif // __ExState_h__ diff --git a/src/coreclr/src/vm/exstatecommon.h b/src/coreclr/src/vm/exstatecommon.h index 40728a930f796..e1bf685f64dc5 100644 --- a/src/coreclr/src/vm/exstatecommon.h +++ b/src/coreclr/src/vm/exstatecommon.h @@ -452,7 +452,7 @@ class TypeOfReportedError }; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // This class is used to track Watson bucketing information for an exception. typedef DPTR(class EHWatsonBucketTracker) PTR_EHWatsonBucketTracker; class EHWatsonBucketTracker @@ -519,6 +519,6 @@ BOOL SetupWatsonBucketsForNonPreallocatedExceptions(OBJECTREF oThrowable = NULL) PTR_EHWatsonBucketTracker GetWatsonBucketTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, BOOL fCaptureBucketsIfNotPresent, BOOL fStartSearchFromPreviousTracker = FALSE); BOOL IsThrowableThreadAbortException(OBJECTREF oThrowable); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // __ExStateCommon_h__ diff --git a/src/coreclr/src/vm/fcall.cpp b/src/coreclr/src/vm/fcall.cpp index 344b04a33c232..51c1f0147ad66 100644 --- a/src/coreclr/src/vm/fcall.cpp +++ b/src/coreclr/src/vm/fcall.cpp @@ -151,7 +151,7 @@ NOINLINE Object* FC_GCPoll(void* __me, Object* objToProtect) #ifdef ENABLE_CONTRACTS /**************************************************************************************/ -#if defined(_TARGET_X86_) && defined(ENABLE_PERF_COUNTERS) +#if defined(TARGET_X86) && defined(ENABLE_PERF_COUNTERS) static __int64 getCycleCount() { LIMITED_METHOD_CONTRACT; @@ -250,7 +250,7 @@ DEBUG_NOINLINE FCallCheck::~FCallCheck() } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) FCallTransitionState::FCallTransitionState () @@ -398,6 +398,6 @@ CompletedFCallTransitionState::~CompletedFCallTransitionState () } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif // ENABLE_CONTRACTS diff --git a/src/coreclr/src/vm/fcall.h b/src/coreclr/src/vm/fcall.h index 42bbe8ec74b03..533efb1c2e1b0 100644 --- a/src/coreclr/src/vm/fcall.h +++ b/src/coreclr/src/vm/fcall.h @@ -232,7 +232,7 @@ // whether it is an issue on x86. //============================================================================================== -#if defined(_TARGET_AMD64_) && !defined(FEATURE_PAL) +#if defined(TARGET_AMD64) && !defined(TARGET_UNIX) // // On AMD64 this is accomplished by including a setjmp anywhere in a function. @@ -372,7 +372,7 @@ LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR ar #define F_CALL_VA_CONV __cdecl -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Choose the appropriate calling convention for FCALL helpers on the basis of the JIT calling convention #ifdef __GNUC__ @@ -387,13 +387,13 @@ LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR ar #endif // !__GNUC__ #define SWIZZLE_STKARG_ORDER -#else // _TARGET_X86_ +#else // TARGET_X86 // // non-x86 platforms don't have messed-up calling convention swizzling // #define F_CALL_CONV -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 #ifdef SWIZZLE_STKARG_ORDER #ifdef SWIZZLE_REGARG_ORDER @@ -1304,7 +1304,7 @@ typedef LPVOID FC_BOOL_RET; #else -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef INT32 FC_BOOL_RET; #else @@ -1316,7 +1316,7 @@ typedef CLR_BOOL FC_BOOL_RET; #endif -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // The return value is artifically widened on x86 and amd64 typedef UINT32 FC_CHAR_RET; typedef INT32 FC_INT8_RET; diff --git a/src/coreclr/src/vm/fieldmarshaler.cpp b/src/coreclr/src/vm/fieldmarshaler.cpp index e331729e4d4bd..438dc1fb48458 100644 --- a/src/coreclr/src/vm/fieldmarshaler.cpp +++ b/src/coreclr/src/vm/fieldmarshaler.cpp @@ -107,7 +107,7 @@ VOID ParseNativeType(Module* pModule, *pNFD = NativeFieldDescriptor(NATIVE_FIELD_CATEGORY_BLITTABLE_INTEGER, sizeof(INT32), alignof(INT32)); break; case MarshalInfo::MARSHAL_TYPE_GENERIC_8: -#if defined(_TARGET_X86_) && defined(UNIX_X86_ABI) +#if defined(TARGET_X86) && defined(UNIX_X86_ABI) *pNFD = NativeFieldDescriptor(NATIVE_FIELD_CATEGORY_BLITTABLE_INTEGER, sizeof(INT64), 4); #else *pNFD = NativeFieldDescriptor(NATIVE_FIELD_CATEGORY_BLITTABLE_INTEGER, sizeof(INT64), sizeof(INT64)); @@ -131,7 +131,7 @@ VOID ParseNativeType(Module* pModule, *pNFD = NativeFieldDescriptor(NATIVE_FIELD_CATEGORY_R4, sizeof(float), sizeof(float)); break; case MarshalInfo::MARSHAL_TYPE_DOUBLE: -#if defined(_TARGET_X86_) && defined(UNIX_X86_ABI) +#if defined(TARGET_X86) && defined(UNIX_X86_ABI) *pNFD = NativeFieldDescriptor(NATIVE_FIELD_CATEGORY_R8, sizeof(double), 4); #else *pNFD = NativeFieldDescriptor(NATIVE_FIELD_CATEGORY_R8, sizeof(double), sizeof(double)); diff --git a/src/coreclr/src/vm/finalizerthread.cpp b/src/coreclr/src/vm/finalizerthread.cpp index 13953ef56b1f1..e313c2ec4eba4 100644 --- a/src/coreclr/src/vm/finalizerthread.cpp +++ b/src/coreclr/src/vm/finalizerthread.cpp @@ -475,10 +475,10 @@ void FinalizerThread::FinalizerThreadCreate() MODE_ANY; } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX MHandles[kLowMemoryNotification] = CreateMemoryResourceNotification(LowMemoryResourceNotification); -#endif // FEATURE_PAL +#endif // TARGET_UNIX hEventFinalizerDone = new CLREvent(); hEventFinalizerDone->CreateManualEvent(FALSE); diff --git a/src/coreclr/src/vm/frames.cpp b/src/coreclr/src/vm/frames.cpp index 46a691c035d8a..246ad2259642f 100644 --- a/src/coreclr/src/vm/frames.cpp +++ b/src/coreclr/src/vm/frames.cpp @@ -64,7 +64,7 @@ void Frame::Log() { MethodDesc* method = GetFunction(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (GetVTablePtr() == UMThkCallFrame::GetMethodFrameVPtr()) method = ((UMThkCallFrame*) this)->GetUMEntryThunk()->GetMethod(); #endif @@ -75,7 +75,7 @@ void Frame::Log() { const char* frameType; if (GetVTablePtr() == PrestubMethodFrame::GetMethodFrameVPtr()) frameType = "PreStub"; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else if (GetVTablePtr() == UMThkCallFrame::GetMethodFrameVPtr()) frameType = "UMThkCallFrame"; #endif @@ -127,7 +127,7 @@ void __stdcall Frame::LogTransition(Frame* frame) BEGIN_ENTRYPOINT_VOIDRET; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On x86, StubLinkerCPU::EmitMethodStubProlog calls Frame::LogTransition // but the caller of EmitMethodStubProlog sets the GSCookie later on. // So the cookie is not initialized by the point we get here. @@ -157,7 +157,7 @@ void __stdcall Frame::LogTransition(Frame* frame) bool isLegalManagedCodeCaller(PCODE retAddr) { WRAPPER_NO_CONTRACT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // we expect to be called from JITTED code or from special code sites inside // mscorwks like callDescr which we have put a NOP (0x90) so we know that they @@ -197,9 +197,9 @@ bool isLegalManagedCodeCaller(PCODE retAddr) { _ASSERTE(!"Bad return address on stack"); return false; -#else // _TARGET_X86_ +#else // TARGET_X86 return true; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } #endif //0 @@ -456,7 +456,7 @@ VOID Frame::Pop(Thread *pThread) m_Next = NULL; } -#if defined(FEATURE_PAL) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) +#if defined(TARGET_UNIX) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) void Frame::PopIfChained() { CONTRACTL @@ -475,7 +475,7 @@ void Frame::PopIfChained() Pop(); } } -#endif // FEATURE_PAL && !DACCESS_COMPILE && !CROSSGEN_COMPILE +#endif // TARGET_UNIX && !DACCESS_COMPILE && !CROSSGEN_COMPILE //----------------------------------------------------------------------- #endif // #ifndef DACCESS_COMPILE @@ -522,7 +522,7 @@ TADDR TransitionFrame::GetAddrOfThis() VASigCookie * TransitionFrame::GetVASigCookie() { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) LIMITED_METHOD_CONTRACT; return dac_cast( *dac_cast(GetTransitionBlock() + @@ -855,7 +855,7 @@ void DynamicHelperFrame::GcScanRoots(promote_func *fn, ScanContext* sc) if (m_dynamicHelperFrameFlags & DynamicHelperFrameFlags_ObjectArg) { TADDR pArgument = GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // x86 is special as always pArgument += offsetof(ArgumentRegisters, ECX); #endif @@ -865,7 +865,7 @@ void DynamicHelperFrame::GcScanRoots(promote_func *fn, ScanContext* sc) if (m_dynamicHelperFrameFlags & DynamicHelperFrameFlags_ObjectArg2) { TADDR pArgument = GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // x86 is special as always pArgument += offsetof(ArgumentRegisters, EDX); #else @@ -1138,7 +1138,7 @@ void HijackFrame::GcScanRoots(promote_func *fn, ScanContext* sc) switch (r) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 case RT_Float: // Fall through #endif case RT_Scalar: @@ -1336,7 +1336,7 @@ void TransitionFrame::PromoteCallerStackHelper(promote_func* fn, ScanContext* sc } } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 UINT TransitionFrame::CbStackPopUsingGCRefMap(PTR_BYTE pGCRefMap) { LIMITED_METHOD_CONTRACT; @@ -1352,7 +1352,7 @@ void TransitionFrame::PromoteCallerStackUsingGCRefMap(promote_func* fn, ScanCont GCRefMapDecoder decoder(pGCRefMap); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Skip StackPop decoder.ReadStackPop(); #endif @@ -1366,7 +1366,7 @@ void TransitionFrame::PromoteCallerStackUsingGCRefMap(promote_func* fn, ScanCont int ofs; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 ofs = (pos < NUM_ARGUMENT_REGISTERS) ? (TransitionBlock::GetOffsetOfArgumentRegisters() + ARGUMENTREGISTERS_SIZE - (pos + 1) * sizeof(TADDR)) : (TransitionBlock::GetOffsetOfArgs() + (pos - NUM_ARGUMENT_REGISTERS) * sizeof(TADDR)); @@ -1546,7 +1546,7 @@ BOOL TransitionFrame::Protects(OBJECTREF * ppORef) #ifdef FEATURE_COMINTEROP -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Return the # of stack bytes pushed by the unmanaged caller. UINT ComMethodFrame::GetNumCallerStackBytes() { @@ -1559,7 +1559,7 @@ UINT ComMethodFrame::GetNumCallerStackBytes() // compute the callee pop stack bytes return pCMD->GetNumStackBytes(); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifndef DACCESS_COMPILE void ComMethodFrame::DoSecondPassHandlerCleanup(Frame * pCurFrame) @@ -1600,7 +1600,7 @@ void ComMethodFrame::DoSecondPassHandlerCleanup(Frame * pCurFrame) #endif // FEATURE_COMINTEROP -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 PTR_UMEntryThunk UMThkCallFrame::GetUMEntryThunk() { @@ -1623,11 +1623,11 @@ void UMThkCallFrame::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) } #endif -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifndef DACCESS_COMPILE -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif @@ -1684,7 +1684,7 @@ void HelperMethodFrame::Pop() PopSlowHelper(); } -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations #endif @@ -1887,15 +1887,15 @@ BOOL MulticastFrame::TraceFrame(Thread *thread, BOOL fromPatch, BYTE *pbDel = NULL; int delegateCount = 0; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // At this point the counter hasn't been incremented yet. delegateCount = *regs->GetEdiLocation() + 1; pbDel = *(BYTE **)( (size_t)*regs->GetEsiLocation() + GetOffsetOfTransitionBlock() + ArgIterator::GetThisOffset()); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // At this point the counter hasn't been incremented yet. delegateCount = (int)regs->pCurrentContext->Rdi + 1; pbDel = *(BYTE **)( (size_t)(regs->pCurrentContext->Rsi) + GetOffsetOfTransitionBlock() + ArgIterator::GetThisOffset()); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // At this point the counter has not yet been incremented. Counter is in R7, frame pointer in R4. delegateCount = regs->pCurrentContext->R7 + 1; pbDel = *(BYTE **)( (size_t)(regs->pCurrentContext->R4) + GetOffsetOfTransitionBlock() + ArgIterator::GetThisOffset()); diff --git a/src/coreclr/src/vm/frames.h b/src/coreclr/src/vm/frames.h index e4c5372b015ff..490547b1a7d45 100644 --- a/src/coreclr/src/vm/frames.h +++ b/src/coreclr/src/vm/frames.h @@ -104,7 +104,7 @@ // | +-ComPrestubMethodFrame - prestub frame for calls from COM to CLR // | #endif //FEATURE_COMINTEROP -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // | +-UMThkCallFrame - this frame represents an unmanaged->managed // | transition through N/Direct #endif @@ -238,7 +238,7 @@ FRAME_TYPE_NAME(DebuggerClassInitMarkFrame) FRAME_TYPE_NAME(DebuggerSecurityCodeMarkFrame) FRAME_TYPE_NAME(DebuggerExitFrame) FRAME_TYPE_NAME(DebuggerU2MCatchHandlerFrame) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 FRAME_TYPE_NAME(UMThkCallFrame) #endif FRAME_TYPE_NAME(InlinedCallFrame) @@ -255,7 +255,7 @@ FRAME_TYPE_NAME(AssumeByrefFromJITStack) #ifndef __frames_h__ #define __frames_h__ -#if defined(_MSC_VER) && defined(_TARGET_X86_) && !defined(FPO_ON) +#if defined(_MSC_VER) && defined(TARGET_X86) && !defined(FPO_ON) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #define FPO_ON 1 #define FRAMES_TURNED_FPO_ON 1 @@ -302,7 +302,7 @@ class ComCallMethodDesc; #ifndef DACCESS_COMPILE -#if defined(FEATURE_PAL) && !defined(CROSSGEN_COMPILE) +#if defined(TARGET_UNIX) && !defined(CROSSGEN_COMPILE) #define DEFINE_DTOR(klass) \ public: \ @@ -312,7 +312,7 @@ class ComCallMethodDesc; #define DEFINE_DTOR(klass) -#endif // FEATURE_PAL && !CROSSGEN_COMPILE +#endif // TARGET_UNIX && !CROSSGEN_COMPILE #define DEFINE_VTABLE_GETTER(klass) \ public: \ @@ -754,7 +754,7 @@ class Frame : public FrameBase #ifdef _DEBUG friend LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo); #endif -#ifdef BIT64 +#ifdef HOST_64BIT friend Thread * __stdcall JIT_InitPInvokeFrame(InlinedCallFrame *pFrame, PTR_VOID StubSecretArg); #endif #ifdef FEATURE_EH_FUNCLETS @@ -779,11 +779,11 @@ class Frame : public FrameBase LIMITED_METHOD_CONTRACT; } -#if defined(FEATURE_PAL) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) +#if defined(TARGET_UNIX) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) virtual ~Frame() { LIMITED_METHOD_CONTRACT; } void PopIfChained(); -#endif // FEATURE_PAL && !DACCESS_COMPILE && !CROSSGEN_COMPILE +#endif // TARGET_UNIX && !DACCESS_COMPILE && !CROSSGEN_COMPILE }; @@ -990,7 +990,7 @@ class TransitionFrame : public Frame } virtual void UpdateRegDisplay(const PREGDISPLAY); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 void UpdateRegDisplayHelper(const PREGDISPLAY, UINT cbStackPop); #endif @@ -1006,7 +1006,7 @@ class TransitionFrame : public Frame void PromoteCallerStackUsingGCRefMap(promote_func* fn, ScanContext* sc, PTR_BYTE pGCRefMap); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 UINT CbStackPopUsingGCRefMap(PTR_BYTE pGCRefMap); #endif @@ -1029,13 +1029,13 @@ class FaultingExceptionFrame : public Frame friend class CheckAsmOffsets; #ifndef FEATURE_EH_FUNCLETS -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 DWORD m_Esp; CalleeSavedRegisters m_regs; TADDR m_ReturnAddress; -#else // _TARGET_X86_ +#else // TARGET_X86 #error "Unsupported architecture" -#endif // _TARGET_X86_ +#endif // TARGET_X86 #else // FEATURE_EH_FUNCLETS BOOL m_fFilterExecuted; // Flag for FirstCallToHandler TADDR m_ReturnAddress; @@ -1075,12 +1075,12 @@ class FaultingExceptionFrame : public Frame #ifndef FEATURE_EH_FUNCLETS CalleeSavedRegisters *GetCalleeSavedRegisters() { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 LIMITED_METHOD_DAC_CONTRACT; return &m_regs; #else PORTABILITY_ASSERT("GetCalleeSavedRegisters"); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } #endif // FEATURE_EH_FUNCLETS @@ -1684,7 +1684,7 @@ class FramedMethodFrame : public TransitionFrame #ifdef COM_STUBS_SEPARATE_FP_LOCATIONS static int GetFPArgOffset(int iArg) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Floating point spill area is between return value and transition block for frames that need it // (code:TPMethodFrame and code:ComPlusMethodFrame) return -(4 * 0x10 /* floating point args */ + 0x8 /* alignment pad */ + TransitionBlock::GetNegSpaceSize()) + (iArg * 0x10); @@ -1828,7 +1828,7 @@ class UnmanagedToManagedFrame : public Frame static BYTE GetOffsetOfArgs() { LIMITED_METHOD_DAC_CONTRACT; -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) size_t ofs = offsetof(UnmanagedToManagedFrame, m_argumentRegisters); #else size_t ofs = sizeof(UnmanagedToManagedFrame); @@ -1850,7 +1850,7 @@ class UnmanagedToManagedFrame : public Frame return offsetof(UnmanagedToManagedFrame, m_pvDatum); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 static int GetOffsetOfCalleeSavedRegisters() { LIMITED_METHOD_CONTRACT; @@ -1883,14 +1883,14 @@ class UnmanagedToManagedFrame : public Frame protected: TADDR m_pvDatum; // type depends on the sub class -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) CalleeSavedRegisters m_calleeSavedRegisters; TADDR m_ReturnAddress; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) TADDR m_R11; // R11 chain TADDR m_ReturnAddress; ArgumentRegisters m_argumentRegisters; -#elif defined (_TARGET_ARM64_) +#elif defined (TARGET_ARM64) TADDR m_fp; TADDR m_ReturnAddress; TADDR m_x8; // ret buff arg @@ -1913,7 +1913,7 @@ class ComMethodFrame : public UnmanagedToManagedFrame public: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Return the # of stack bytes pushed by the unmanaged caller. UINT GetNumCallerStackBytes(); #endif @@ -2021,9 +2021,9 @@ class PInvokeCalliFrame : public FramedMethodFrame return m_pVASigCookie; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 virtual void UpdateRegDisplay(const PREGDISPLAY); -#endif // _TARGET_X86_ +#endif // TARGET_X86 BOOL TraceFrame(Thread *thread, BOOL fromPatch, TraceDestination *trace, REGDISPLAY *regs) @@ -2160,10 +2160,10 @@ class StubDispatchFrame : public FramedMethodFrame // Returns this frame GC ref map if it has one PTR_BYTE GetGCRefMap(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 virtual void UpdateRegDisplay(const PREGDISPLAY pRD); virtual PCODE GetReturnAddress(); -#endif // _TARGET_X86_ +#endif // TARGET_X86 PCODE GetUnadjustedReturnAddress() { @@ -2303,7 +2303,7 @@ class ExternalMethodFrame : public FramedMethodFrame Interception GetInterception(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 virtual void UpdateRegDisplay(const PREGDISPLAY pRD); #endif @@ -2325,7 +2325,7 @@ class DynamicHelperFrame : public FramedMethodFrame virtual void GcScanRoots(promote_func *fn, ScanContext* sc); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 virtual void UpdateRegDisplay(const PREGDISPLAY pRD); #endif @@ -2761,7 +2761,7 @@ typedef DPTR(class UMThunkMarshInfo) PTR_UMThunkMarshInfo; class UMEntryThunk; typedef DPTR(class UMEntryThunk) PTR_UMEntryThunk; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) //------------------------------------------------------------------------ // This frame guards an unmanaged->managed transition thru a UMThk //------------------------------------------------------------------------ @@ -2789,9 +2789,9 @@ class UMThkCallFrame : public UnmanagedToManagedFrame // Keep as last entry in class DEFINE_VTABLE_GETTER_AND_CTOR_AND_DTOR(UMThkCallFrame) }; -#endif // _TARGET_X86_ && !FEATURE_PAL +#endif // TARGET_X86 && !TARGET_UNIX -#if defined(_TARGET_X86_) && defined(FEATURE_COMINTEROP) +#if defined(TARGET_X86) && defined(FEATURE_COMINTEROP) //------------------------------------------------------------------------- // Exception handler for COM to managed frame // and the layout of the exception registration record structure in the stack @@ -2811,7 +2811,7 @@ struct ComToManagedExRecord return &m_frame; } }; -#endif // _TARGET_X86_ && FEATURE_COMINTEROP +#endif // TARGET_X86 && FEATURE_COMINTEROP //------------------------------------------------------------------------ @@ -2840,12 +2840,12 @@ class InlinedCallFrame : public Frame { WRAPPER_NO_CONTRACT; -#ifdef BIT64 +#ifdef HOST_64BIT // See code:GenericPInvokeCalliHelper return ((m_Datum != NULL) && !(dac_cast(m_Datum) & 0x1)); -#else // BIT64 +#else // HOST_64BIT return ((dac_cast(m_Datum) & ~0xffff) != 0); -#endif // BIT64 +#endif // HOST_64BIT } // Retrieves the return address into the code that called out @@ -2872,7 +2872,7 @@ class InlinedCallFrame : public Frame // method if the current InlinedCallFrame is inactive. PTR_MethodDesc GetActualInteropMethodDesc() { -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) // Important: This code relies on the way JIT lays out frames. Keep it in sync // with code:Compiler.lvaAssignFrameOffsets. // @@ -2889,14 +2889,14 @@ class InlinedCallFrame : public Frame // Extract the actual MethodDesc to report from the InlinedCallFrame. TADDR addr = dac_cast(this) + sizeof(InlinedCallFrame); return PTR_MethodDesc(*PTR_TADDR(addr)); -#elif defined(BIT64) +#elif defined(HOST_64BIT) // On 64bit, the actual interop MethodDesc is saved off in a field off the InlinedCrawlFrame // which is populated by the JIT. Refer to JIT_InitPInvokeFrame for details. return PTR_MethodDesc(m_StubSecretArg); #else _ASSERTE(!"NYI - Interop method reporting for this architecture!"); return NULL; -#endif // defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#endif // defined(TARGET_X86) || defined(TARGET_ARM) } virtual void UpdateRegDisplay(const PREGDISPLAY); @@ -2907,12 +2907,12 @@ class InlinedCallFrame : public Frame // See code:HasFunction. PTR_NDirectMethodDesc m_Datum; -#ifdef BIT64 +#ifdef HOST_64BIT // IL stubs fill this field with the incoming secret argument when they erect // InlinedCallFrame so we know which interop method was invoked even if the frame // is not active at the moment. PTR_VOID m_StubSecretArg; -#endif // BIT64 +#endif // HOST_64BIT // X86: ESP after pushing the outgoing arguments, and just before calling // out to unmanaged code. @@ -2937,12 +2937,12 @@ class InlinedCallFrame : public Frame // stubs, since there is no easy way to inline an implementation of GetThread. PTR_VOID m_pThread; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Store the value of SP after prolog to ensure we can unwind functions that use // stackalloc. In these functions, the m_pCallSiteSP can already be augmented by // the stackalloc size, which is variable. TADDR m_pSPAfterProlog; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM public: //--------------------------------------------------------------- @@ -3007,7 +3007,7 @@ class InlinedCallFrame : public Frame bool isRetAddr(TADDR retAddr, TADDR* whereCalled); //------------------------------------------------------------------------ -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // This frame is used as padding for virtual stub dispatch tailcalls. // When A calls B via virtual stub dispatch, the stub dispatch stub resolves // the target code for B and jumps to it. If A wants to do a tail call, @@ -3042,16 +3042,16 @@ class TailCallFrame : public Frame { VPTR_VTABLE_CLASS(TailCallFrame, Frame) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) TADDR m_CallerAddress; // the address the tailcall was initiated from CalleeSavedRegisters m_regs; // callee saved registers - the stack walk assumes that all non-JIT frames have them TADDR m_ReturnAddress; // the return address of the tailcall -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) TADDR m_pGCLayout; TADDR m_padding; // code:StubLinkerCPU::CreateTailCallCopyArgsThunk expects the size of TailCallFrame to be 16-byte aligned CalleeSavedRegisters m_calleeSavedRegisters; TADDR m_ReturnAddress; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) union { CalleeSavedRegisters m_calleeSavedRegisters; // alias saved link register as m_ReturnAddress @@ -3067,7 +3067,7 @@ class TailCallFrame : public Frame public: #ifndef CROSSGEN_COMPILE -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) #ifndef DACCESS_COMPILE TailCallFrame(T_CONTEXT * pContext, Thread * pThread) @@ -3084,9 +3084,9 @@ class TailCallFrame : public Frame #endif static TailCallFrame * GetFrameFromContext(CONTEXT * pContext); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) static TailCallFrame* FindTailCallFrame(Frame* pFrame) { LIMITED_METHOD_CONTRACT; @@ -3101,7 +3101,7 @@ class TailCallFrame : public Frame LIMITED_METHOD_CONTRACT; return m_CallerAddress; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 virtual TADDR GetReturnAddressPtr() { @@ -3117,7 +3117,7 @@ class TailCallFrame : public Frame virtual void UpdateRegDisplay(const PREGDISPLAY pRD); #endif // !CROSSGEN_COMPILE -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 void SetGCLayout(TADDR pGCLayout) { LIMITED_METHOD_CONTRACT; diff --git a/src/coreclr/src/vm/gccover.cpp b/src/coreclr/src/vm/gccover.cpp index cd0581aefb622..c5801e259fec7 100644 --- a/src/coreclr/src/vm/gccover.cpp +++ b/src/coreclr/src/vm/gccover.cpp @@ -27,7 +27,7 @@ #include "virtualcallstub.h" #include "threadsuspend.h" -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) #include "gcinfodecoder.h" #endif @@ -38,7 +38,7 @@ MethodDesc* AsMethodDesc(size_t addr); static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE*nextInstr); bool isCallToStopForGCJitHelper(PBYTE instrPtr); -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) static void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID codeStart); static bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID codeStart); #endif @@ -76,9 +76,9 @@ bool IsGcCoverageInterruptInstruction(PBYTE instrPtr) { UINT32 instrVal; -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) instrVal = *reinterpret_cast(instrPtr); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) { @@ -97,11 +97,11 @@ bool IsGcCoverageInterruptInstruction(PBYTE instrPtr) bool IsOriginalInstruction(PBYTE instrPtr, GCCoverageInfo* gcCover, DWORD offset) { -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) UINT32 instrVal = *reinterpret_cast(instrPtr); UINT32 origInstrVal = *reinterpret_cast(gcCover->savedCode + offset); return (instrVal == origInstrVal); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) { @@ -153,7 +153,7 @@ void SetupAndSprinkleBreakpoints( fZapped); // This is not required for ARM* as the above call does the work for both hot & cold regions -#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_) +#if !defined(TARGET_ARM) && !defined(TARGET_ARM64) if (gcCover->methodRegion.coldSize != 0) { gcCover->SprinkleBreakpoints(gcCover->savedCode + gcCover->methodRegion.hotSize, @@ -310,7 +310,7 @@ void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD) ReturnKind returnKind = callMD->GetReturnKind(true); if (!IsValidReturnKind(returnKind)) { -#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX) +#if defined(TARGET_AMD64) && defined(TARGET_UNIX) _ASSERTE(!"Unexpected return kind for x64 Unix."); #else // SKip GC coverage after the call. @@ -320,7 +320,7 @@ void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD) _ASSERTE(IsValidReturnKind(returnKind)); bool ispointerKind = IsPointerReturnKind(returnKind); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM size_t instrLen = GetARMInstructionLength(instrToReplace); bool protectReturn = ispointerKind; if (protectReturn) @@ -333,13 +333,13 @@ void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD) *(WORD*)instrToReplace = INTERRUPT_INSTR; else *(DWORD*)instrToReplace = INTERRUPT_INSTR_32; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) bool protectReturn = ispointerKind; if (protectReturn) *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET; else *(DWORD*)instrToReplace = INTERRUPT_INSTR; -#elif defined(_TARGET_AMD64_) || defined(_TARGET_X86_) +#elif defined(TARGET_AMD64) || defined(TARGET_X86) if (ispointerKind) @@ -369,9 +369,9 @@ void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD) } else { -#if !defined(_TARGET_AMD64_) || !defined(PLATFORM_UNIX) +#if !defined(TARGET_AMD64) || !defined(TARGET_UNIX) _ASSERTE(!"Not expected multi reg return with pointers."); -#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX +#endif // !TARGET_AMD64 || !TARGET_UNIX if (!protectRegister[0] && protectRegister[1]) { *instrToReplace = INTERRUPT_INSTR_PROTECT_SECOND_RET; @@ -392,7 +392,7 @@ void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD) #endif } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 class GCCoverageRangeEnumerator { @@ -475,7 +475,7 @@ class GCCoverageRangeEnumerator } }; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // When sprinkling break points, we must make sure that certain calls to // Thread-suspension routines inlined into the managed method are not @@ -545,7 +545,7 @@ void GCCoverageInfo::SprinkleBreakpoints( size_t regionOffsetAdj, BOOL fZapped) { -#if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && USE_DISASSEMBLER +#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && USE_DISASSEMBLER BYTE * codeStart = (BYTE *)pCode; @@ -566,7 +566,7 @@ void GCCoverageInfo::SprinkleBreakpoints( static ConfigDWORD fGcStressOnDirectCalls; // ConfigDWORD must be a static variable -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 GCCoverageRangeEnumerator rangeEnum(codeMan, gcInfoToken, codeStart, codeSize); GcInfoDecoder safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0); @@ -608,7 +608,7 @@ void GCCoverageInfo::SprinkleBreakpoints( InstructionType instructionType; size_t len = disassembler.DisassembleInstruction(cur, codeEnd - cur, &instructionType); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // REVISIT_TODO apparently the jit does not use the entire RUNTIME_FUNCTION range // for code. It uses some for switch tables. Because the first few offsets // may be decodable as instructions, we can't reason about where we should @@ -634,7 +634,7 @@ void GCCoverageInfo::SprinkleBreakpoints( switch(instructionType) { case InstructionType::Call_IndirectUnconditional: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj))) #endif { @@ -645,7 +645,7 @@ void GCCoverageInfo::SprinkleBreakpoints( case InstructionType::Call_DirectUnconditional: if(fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls)) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj))) #endif { @@ -665,7 +665,7 @@ void GCCoverageInfo::SprinkleBreakpoints( } break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case InstructionType::Branch_IndirectUnconditional: fSawPossibleSwitch = true; break; @@ -692,7 +692,7 @@ void GCCoverageInfo::SprinkleBreakpoints( if (codeMan->IsGcSafe(&codeInfo, static_cast(dwRelOffset))) *cur = INTERRUPT_INSTR; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // we will whack every instruction in the prolog and epilog to make certain // our unwinding logic works there. if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL)) @@ -707,7 +707,7 @@ void GCCoverageInfo::SprinkleBreakpoints( cur += len; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 PBYTE newCur = rangeEnum.EnsureInRange(cur); if(newCur != cur) { @@ -725,7 +725,7 @@ void GCCoverageInfo::SprinkleBreakpoints( if ((regionOffsetAdj==0) && (*codeStart != INTERRUPT_INSTR)) doingEpilogChecks = false; -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) //Save the method code from hotRegion memcpy(saveAddr, (BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize); @@ -766,10 +766,10 @@ void GCCoverageInfo::SprinkleBreakpoints( #else _ASSERTE(!"not implemented for platform"); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) #ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED @@ -809,7 +809,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID //Determine if instruction before the safe point is call using immediate (BLX Imm) or call by register (BLX Rm) BOOL instructionIsACallThroughRegister = FALSE; BOOL instructionIsACallThroughImmediate = FALSE; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // call by register instruction is two bytes (BL Reg T1 encoding) WORD instr = *((WORD*)savedInstrPtr - 1); @@ -826,7 +826,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID if((*(((WORD*)savedInstrPtr)-1) & 0xd000) == 0xd000) // It is call by immediate instructionIsACallThroughImmediate = TRUE; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) DWORD instr = *((DWORD*)savedInstrPtr - 1); // Is the call through a register or an immediate offset @@ -855,9 +855,9 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID { // If it is call by register then cannot know MethodDesc so replace the call instruction with illegal instruction // safe point will be replaced with appropriate illegal instruction at execution time when reg value is known -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) *((WORD*)instrPtr - 1) = INTERRUPT_INSTR_CALL; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) *((DWORD*)instrPtr - 1) = INTERRUPT_INSTR_CALL; #endif // _TARGET_XXXX_ } @@ -965,7 +965,7 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto { // The instruction about to be replaced cannot already be a gcstress instruction _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtr)); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtr); if (instrLen == 2) @@ -978,7 +978,7 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto } instrPtr += instrLen; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Do not replace with gcstress interrupt instruction at call to JIT_RareDisableHelper if(!isCallToStopForGCJitHelper(instrPtr)) *((DWORD*)instrPtr) = INTERRUPT_INSTR; @@ -998,7 +998,7 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto } return FALSE; } -#endif // defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM) || defined(TARGET_ARM64) // Is this a call instruction to JIT_RareDisableHelper() // We cannot insert GCStress instruction at this call @@ -1012,7 +1012,7 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto // get the actual jithelper target. bool isCallToStopForGCJitHelper(PBYTE instrPtr) { -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) if (((*reinterpret_cast(instrPtr)) & 0xFC000000) == 0x94000000) // Do we have a BL instruction? { // call through immediate @@ -1031,7 +1031,7 @@ bool isCallToStopForGCJitHelper(PBYTE instrPtr) } } } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if((instrPtr[1] & 0xf8) == 0xf0 && (instrPtr[3] & 0xc0) == 0xc0) // call using imm { int imm32 = GetThumb2BlRel24((UINT16 *)instrPtr); @@ -1075,7 +1075,7 @@ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { PBYTE PC = (regs) ? (PBYTE)GetIP(regs) : instrPtr; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if((instrPtr[1] & 0xf0) == 0xf0) // direct call { int imm32 = GetThumb2BlRel24((UINT16 *)instrPtr); @@ -1092,7 +1092,7 @@ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { { return 0; // Not a call. } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (((*reinterpret_cast(instrPtr)) & 0xFC000000) == 0x94000000) { // call through immediate @@ -1115,7 +1115,7 @@ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { } #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if ((instrPtr[0] & 0xf0) == REX_PREFIX_BASE) { @@ -1129,7 +1129,7 @@ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { instrPtr++; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 if (instrPtr[0] == 0xE8) { // Direct Relative Near *nextInstr = instrPtr + 5; @@ -1217,7 +1217,7 @@ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { // if ((mod == 0) && (rm == 5)) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // at this point instrPtr should be pointing at the beginning // of the byte sequence for the call instruction. the operand // is a RIP-relative address from the next instruction, so to @@ -1227,7 +1227,7 @@ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { result = PC + 6; #else result = 0; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } else { result = (PBYTE)getRegVal(baseadj + rm, regs); } @@ -1281,7 +1281,7 @@ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { /****************************************************************************/ -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 void checkAndUpdateReg(DWORD& origVal, DWORD curVal, bool gcHappened) { if (origVal == curVal) @@ -1296,7 +1296,7 @@ void checkAndUpdateReg(DWORD& origVal, DWORD curVal, bool gcHappened) { origVal = curVal; // this is now the best estimate of what should be returned. } -#endif // _TARGET_X86_ +#endif // TARGET_X86 int GCcoverCount = 0; @@ -1344,12 +1344,12 @@ bool IsGcCoverageInterrupt(LPVOID ip) void RemoveGcCoverageInterrupt(TADDR instrPtr, BYTE * savedInstrPtr) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (GetARMInstructionLength(savedInstrPtr) == 2) *(WORD *)instrPtr = *(WORD *)savedInstrPtr; else *(DWORD *)instrPtr = *(DWORD *)savedInstrPtr; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) *(DWORD *)instrPtr = *(DWORD *)savedInstrPtr; #else *(BYTE *)instrPtr = *savedInstrPtr; @@ -1402,7 +1402,7 @@ BOOL OnGcCoverageInterrupt(PCONTEXT regs) Thread* pThread = GetThread(); _ASSERTE(pThread); -#if defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(PLATFORM_UNIX) +#if defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(TARGET_UNIX) // If we're unable to redirect, then we simply won't test GC at this // location. if (!pThread->CheckForAndDoRedirectForGCStress(regs)) @@ -1467,7 +1467,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) bool atCall; bool afterCallProtect[2] = { false, false }; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) BYTE instrVal = *instrPtr; forceStack[6] = &instrVal; // This is so I can see it fastchecked @@ -1487,7 +1487,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) afterCallProtect[1] = true; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) forceStack[6] = (WORD*)instrPtr; // This is so I can see it fastchecked size_t instrLen = GetARMInstructionLength(instrPtr); @@ -1507,7 +1507,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) atCall = (instrVal32 == INTERRUPT_INSTR_CALL_32); afterCallProtect[0] = (instrVal32 == INTERRUPT_INSTR_PROTECT_RET_32); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) DWORD instrVal = *(DWORD *)instrPtr; forceStack[6] = &instrVal; // This is so I can see it fastchecked @@ -1516,7 +1516,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) #endif // _TARGET_* -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* are we at the very first instruction? If so, capture the register state */ bool bShouldUpdateProlog = true; if (gcCover->doingEpilogChecks) { @@ -1546,7 +1546,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) } instrVal = gcCover->savedCode[offset]; -#endif // _TARGET_X86_ +#endif // TARGET_X86 // @@ -1560,7 +1560,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) // thread may take the exception due to the HLT, but by the time the OS // inspects the code stream, the HLT may be replaced with the original // code and it will just raise a STATUS_ACCESS_VIOLATION. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // only restore the original instruction if: // this is not the first instruction in the method's prolog, or // if it is, only if this is the second time we run in this method @@ -1569,7 +1569,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) #endif pThread->PostGCStressInstructionUpdate((BYTE*)instrPtr, &gcCover->savedCode[offset]); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 /* are we in a prolog or epilog? If so just test the unwind logic but don't actually do a GC since the prolog and epilog are not GC safe points */ @@ -1621,9 +1621,9 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) } return; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) /* In non-fully interrruptable code, if the EIP is just after a call instr means something different because it expects that we are IN the @@ -1668,13 +1668,13 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) { // We are in preemptive mode in JITTed code. This implies that we are into IL stub // close to PINVOKE method. This call will never return objectrefs. -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM size_t instrLen = GetARMInstructionLength(nextInstr); if (instrLen == 2) *(WORD*)nextInstr = INTERRUPT_INSTR; else *(DWORD*)nextInstr = INTERRUPT_INSTR_32; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) *(DWORD*)nextInstr = INTERRUPT_INSTR; #else *nextInstr = INTERRUPT_INSTR; @@ -1749,24 +1749,24 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) if (afterCallProtect[0]) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) retValRegs[numberOfRegs++] = regs->Rax; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) retValRegs[numberOfRegs++] = regs->Eax; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) retValRegs[numberOfRegs++] = regs->R0; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) retValRegs[numberOfRegs++] = regs->X0; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 } if (afterCallProtect[1]) { -#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX) +#if defined(TARGET_AMD64) && defined(TARGET_UNIX) retValRegs[numberOfRegs++] = regs->Rdx; -#else // !_TARGET_AMD64_ || !PLATFORM_UNIX +#else // !TARGET_AMD64 || !TARGET_UNIX _ASSERTE(!"Not expected multi reg return with pointers."); -#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX +#endif // !TARGET_AMD64 || !TARGET_UNIX } _ASSERTE(sizeof(OBJECTREF) == sizeof(DWORD_PTR)); @@ -1800,13 +1800,13 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) { if (afterCallProtect[0]) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) regs->Rax = retValRegs[0]; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) regs->Eax = retValRegs[0]; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) regs->R0 = retValRegs[0]; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) regs->X[0] = retValRegs[0]; #else PORTABILITY_ASSERT("DoGCStress - return register"); @@ -1815,11 +1815,11 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) if (afterCallProtect[1]) { -#if defined(_TARGET_AMD64_) && defined(PLATFORM_UNIX) +#if defined(TARGET_AMD64) && defined(TARGET_UNIX) regs->Rdx = retValRegs[numberOfRegs - 1]; -#else // !_TARGET_AMD64_ || !PLATFORM_UNIX +#else // !TARGET_AMD64 || !TARGET_UNIX _ASSERTE(!"Not expected multi reg return with pointers."); -#endif // !_TARGET_AMD64_ || !PLATFORM_UNIX +#endif // !TARGET_AMD64 || !TARGET_UNIX } } diff --git a/src/coreclr/src/vm/gccover.h b/src/coreclr/src/vm/gccover.h index 4c24f2c554276..6432f005b653e 100644 --- a/src/coreclr/src/vm/gccover.h +++ b/src/coreclr/src/vm/gccover.h @@ -66,7 +66,7 @@ typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::sav #pragma warning(pop) #endif // _MSC_VER -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) #define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do) #define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction @@ -74,7 +74,7 @@ typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::sav #define INTERRUPT_INSTR_PROTECT_SECOND_RET 0xEC // X86 IN instruction, protect the second return register #define INTERRUPT_INSTR_PROTECT_BOTH_RET 0xED // X86 IN instruction, protect both return registers -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // 16-bit illegal instructions which will cause exception and cause // control to go to GcStress codepath @@ -97,7 +97,7 @@ typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::sav #define INTERRUPT_INSTR_CALL_32 0xa002f7f0 // 0xf7f0a002 #define INTERRUPT_INSTR_PROTECT_RET_32 0xa003f7f0 // 0xf7f0a003 -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // The following encodings are undefined. They fall into section C4.5.8 - Data processing (2 source) of // "Arm Architecture Reference Manual ARMv8" @@ -113,7 +113,7 @@ typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::sav // inline bool IsGcCoverageInterruptInstructionVal(UINT32 instrVal) { -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) switch (instrVal) { @@ -125,7 +125,7 @@ inline bool IsGcCoverageInterruptInstructionVal(UINT32 instrVal) return false; } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) UINT16 instrVal16 = static_cast(instrVal); size_t instrLen = GetARMInstructionLength(instrVal16); diff --git a/src/coreclr/src/vm/gcenv.ee.common.cpp b/src/coreclr/src/vm/gcenv.ee.common.cpp index 88986eb9bc2c2..f9e7794a839bc 100644 --- a/src/coreclr/src/vm/gcenv.ee.common.cpp +++ b/src/coreclr/src/vm/gcenv.ee.common.cpp @@ -232,7 +232,7 @@ StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData) unsigned flags = pCF->GetCodeManagerFlags(); - #ifdef _TARGET_X86_ + #ifdef TARGET_X86 STRESS_LOG3(LF_GCROOTS, LL_INFO1000, "Scanning Frameless method %pM EIP = %p &EIP = %p\n", pMD, GetControlPC(pCF->GetRegisterSet()), pCF->GetRegisterSet()->PCTAddr); #else diff --git a/src/coreclr/src/vm/gcenv.ee.cpp b/src/coreclr/src/vm/gcenv.ee.cpp index f78b5a0e8c60a..72cac07c9f6ef 100644 --- a/src/coreclr/src/vm/gcenv.ee.cpp +++ b/src/coreclr/src/vm/gcenv.ee.cpp @@ -850,7 +850,7 @@ void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args) // On architectures with strong ordering, we only need to prevent compiler reordering. // Otherwise we put a process-wide fence here (so that we could use an ordinary read in the barrier) -#if defined(_ARM64_) || defined(_ARM_) +#if defined(HOST_ARM64) || defined(HOST_ARM) if (!is_runtime_suspended) { // If runtime is not suspended, force all threads to see the changed table before seeing updated heap boundaries. @@ -862,11 +862,11 @@ void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args) g_lowest_address = args->lowest_address; g_highest_address = args->highest_address; -#if defined(_ARM64_) || defined(_ARM_) +#if defined(HOST_ARM64) || defined(HOST_ARM) // Need to reupdate for changes to g_highest_address g_lowest_address stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check); -#ifdef _ARM_ +#ifdef HOST_ARM if (stompWBCompleteActions & SWB_ICACHE_FLUSH) { // flushing/invalidating the write barrier's body for the current process @@ -902,7 +902,7 @@ void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args) // (we care only about managed threads and suspend/resume will do full fences - good enough for us). // -#if defined(_ARM64_) || defined(_ARM_) +#if defined(HOST_ARM64) || defined(HOST_ARM) is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || is_runtime_suspended; if (!is_runtime_suspended) { diff --git a/src/coreclr/src/vm/gcenv.h b/src/coreclr/src/vm/gcenv.h index 767adb892caea..8837e8bbdc34a 100644 --- a/src/coreclr/src/vm/gcenv.h +++ b/src/coreclr/src/vm/gcenv.h @@ -44,7 +44,7 @@ #include "gcenv.interlocked.h" #include "gcenv.interlocked.inl" -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX #include "gcenv.unix.inl" #else #include "gcenv.windows.inl" @@ -62,7 +62,7 @@ namespace ETW } GC_ROOT_KIND; }; -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX #define _tcslen wcslen #define _tcscpy wcscpy #define _tfopen _wfopen diff --git a/src/coreclr/src/vm/gcenv.os.cpp b/src/coreclr/src/vm/gcenv.os.cpp index 8f4c31cc28c37..cfe0009dbdd6e 100644 --- a/src/coreclr/src/vm/gcenv.os.cpp +++ b/src/coreclr/src/vm/gcenv.os.cpp @@ -14,7 +14,7 @@ #include "common.h" #include "gcenv.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include #endif @@ -26,7 +26,7 @@ #define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0)) -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX uint32_t g_pageSizeUnixInl = 0; #endif @@ -56,7 +56,7 @@ class GroupProcNo uint16_t GetCombinedValue() { return m_groupProc; } }; -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) static bool g_SeLockMemoryPrivilegeAcquired = false; @@ -96,7 +96,7 @@ bool InitLargePagesPrivilege() return true; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX static void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX) { @@ -114,7 +114,7 @@ bool GCToOSInterface::Initialize() { LIMITED_METHOD_CONTRACT; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX g_pageSizeUnixInl = GetOsPageSize(); uint32_t currentProcessCpuCount = PAL_GetLogicalCpuCountFromOS(); @@ -130,7 +130,7 @@ bool GCToOSInterface::Initialize() g_processAffinitySet.Add(i); } } -#else // FEATURE_PAL +#else // TARGET_UNIX if (CPUGroupInfo::CanEnableGCCPUGroups()) { // When CPU groups are enabled, then the process is not bound by the process affinity set at process launch. @@ -158,7 +158,7 @@ bool GCToOSInterface::Initialize() } } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX return true; } @@ -199,7 +199,7 @@ bool GCToOSInterface::SetCurrentThreadIdealAffinity(uint16_t srcProcNo, uint16_t LIMITED_METHOD_CONTRACT; bool success = true; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX GroupProcNo srcGroupProcNo(srcProcNo); GroupProcNo dstGroupProcNo(dstProcNo); @@ -231,19 +231,19 @@ bool GCToOSInterface::SetCurrentThreadIdealAffinity(uint16_t srcProcNo, uint16_t return success; -#else // !FEATURE_PAL +#else // !TARGET_UNIX // There is no way to set a thread ideal processor on Unix, so do nothing. return true; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } bool GCToOSInterface::GetCurrentThreadIdealProc(uint16_t* procNo) { LIMITED_METHOD_CONTRACT; bool success = false; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX PROCESSOR_NUMBER proc; success = !!GetThreadIdealProcessorEx(GetCurrentThread(), &proc); if (success) @@ -251,7 +251,7 @@ bool GCToOSInterface::GetCurrentThreadIdealProc(uint16_t* procNo) GroupProcNo groupProcNo(proc.Group, proc.Number); *procNo = groupProcNo.GetCombinedValue(); } -#endif //FEATURE_PAL +#endif //TARGET_UNIX return success; } @@ -262,7 +262,7 @@ uint32_t GCToOSInterface::GetCurrentProcessorNumber() _ASSERTE(CanGetCurrentProcessorNumber()); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX PROCESSOR_NUMBER proc_no_cpu_group; GetCurrentProcessorNumberEx(&proc_no_cpu_group); @@ -270,7 +270,7 @@ uint32_t GCToOSInterface::GetCurrentProcessorNumber() return groupProcNo.GetCombinedValue(); #else return ::GetCurrentProcessorNumber(); -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX } // Check if the OS supports getting current processor number @@ -278,7 +278,7 @@ bool GCToOSInterface::CanGetCurrentProcessorNumber() { LIMITED_METHOD_CONTRACT; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX return PAL_HasGetCurrentProcessorNumber(); #else // on all Windows platforms we support this API exists @@ -377,7 +377,7 @@ void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size) { LIMITED_METHOD_CONTRACT; -#if !defined(FEATURE_PAL) +#if !defined(TARGET_UNIX) if (!g_SeLockMemoryPrivilegeAcquired) { if (!InitLargePagesPrivilege()) @@ -441,13 +441,13 @@ bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock) LIMITED_METHOD_CONTRACT; bool success = ::ClrVirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != NULL; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (success && unlock) { // Remove the page range from the working set ::VirtualUnlock(address, size); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX return success; } @@ -457,7 +457,7 @@ bool GCToOSInterface::SupportsWriteWatch() { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX bool writeWatchSupported = false; // check if the OS supports write-watch. @@ -471,9 +471,9 @@ bool GCToOSInterface::SupportsWriteWatch() } return writeWatchSupported; -#else // FEATURE_PAL +#else // TARGET_UNIX return false; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // Reset the write tracking state for the specified virtual memory range. @@ -484,9 +484,9 @@ void GCToOSInterface::ResetWriteWatch(void* address, size_t size) { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX ::ResetWriteWatch(address, size); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // Retrieve addresses of the pages that are written to in a region of virtual memory @@ -503,7 +503,7 @@ bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX uint32_t flags = resetState ? 1 : 0; ULONG granularity; @@ -511,12 +511,12 @@ bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, _ASSERTE (granularity == GetOsPageSize()); return success; -#else // FEATURE_PAL +#else // TARGET_UNIX *pageAddresses = NULL; *pageAddressesCount = 0; return true; -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // Get size of the largest cache on the processor die @@ -540,7 +540,7 @@ size_t GCToOSInterface::GetCacheSizePerLogicalCpu(bool trueSize) bool GCToOSInterface::SetThreadAffinity(uint16_t procNo) { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX GroupProcNo groupProcNo(procNo); if (CPUGroupInfo::CanEnableGCCPUGroups()) @@ -557,9 +557,9 @@ bool GCToOSInterface::SetThreadAffinity(uint16_t procNo) { return !!SetThreadAffinityMask(GetCurrentThread(), (DWORD_PTR)1 << groupProcNo.GetProcIndex()); } -#else // FEATURE_PAL +#else // TARGET_UNIX return PAL_SetCurrentThreadAffinity(procNo); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } // Boosts the calling thread's thread priority to a level higher than the default @@ -581,9 +581,9 @@ bool GCToOSInterface::BoostThreadPriority() // set of enabled processors const AffinitySet* GCToOSInterface::SetGCThreadsAffinitySet(uintptr_t configAffinityMask, const AffinitySet* configAffinitySet) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (CPUGroupInfo::CanEnableGCCPUGroups()) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX { if (!configAffinitySet->IsEmpty()) { @@ -597,7 +597,7 @@ const AffinitySet* GCToOSInterface::SetGCThreadsAffinitySet(uintptr_t configAffi } } } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX else { if (configAffinityMask != 0) @@ -612,7 +612,7 @@ const AffinitySet* GCToOSInterface::SetGCThreadsAffinitySet(uintptr_t configAffi } } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return &g_processAffinitySet; } @@ -624,14 +624,14 @@ uint32_t GCToOSInterface::GetCurrentProcessCpuCount() { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // GetCurrentProcessCpuCount only returns up to 64 procs. return CPUGroupInfo::CanEnableGCCPUGroups() ? GCToOSInterface::GetTotalProcessorCount(): ::GetCurrentProcessCpuCount(); -#else // !FEATURE_PAL +#else // !TARGET_UNIX return ::GetCurrentProcessCpuCount(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } // Return the size of the user-mode portion of the virtual address space of this process. @@ -649,7 +649,7 @@ size_t GCToOSInterface::GetVirtualMemoryLimit() static size_t g_RestrictedPhysicalMemoryLimit = (size_t)MAX_PTR; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // For 32-bit processes the virtual address range could be smaller than the amount of physical // memory on the machine/in the container, we need to restrict by the VM. @@ -803,7 +803,7 @@ static size_t GetRestrictedPhysicalMemoryLimit() VolatileStore(&g_RestrictedPhysicalMemoryLimit, memory_limit); return g_RestrictedPhysicalMemoryLimit; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX // Get the physical memory that this process can use. // Return: @@ -822,7 +822,7 @@ uint64_t GCToOSInterface::GetPhysicalMemoryLimit(bool* is_restricted) if (restricted_limit != 0) { if (is_restricted -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX && !g_UseRestrictedVirtualMemory #endif ) @@ -854,7 +854,7 @@ void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available { size_t workingSetSize; BOOL status = FALSE; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (!g_UseRestrictedVirtualMemory) { PROCESS_MEMORY_COUNTERS pmc; @@ -888,7 +888,7 @@ void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available MEMORYSTATUSEX ms; GetProcessMemoryLoad(&ms); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (g_UseRestrictedVirtualMemory) { _ASSERTE (ms.ullTotalVirtual == restricted_limit); @@ -904,7 +904,7 @@ void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available *available_page_file = 0; } else -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX { if (memory_load != NULL) *memory_load = ms.dwMemoryLoad; @@ -965,7 +965,7 @@ uint32_t GCToOSInterface::GetTotalProcessorCount() { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (CPUGroupInfo::CanEnableGCCPUGroups()) { return CPUGroupInfo::GetNumActiveProcessors(); @@ -974,9 +974,9 @@ uint32_t GCToOSInterface::GetTotalProcessorCount() { return g_SystemInfo.dwNumberOfProcessors; } -#else // !FEATURE_PAL +#else // !TARGET_UNIX return PAL_GetTotalCpuCount(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } bool GCToOSInterface::CanEnableGCNumaAware() @@ -988,30 +988,30 @@ bool GCToOSInterface::CanEnableGCNumaAware() bool GCToOSInterface::GetNumaInfo(uint16_t* total_nodes, uint32_t* max_procs_per_node) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX return NumaNodeInfo::GetNumaInfo(total_nodes, (DWORD*)max_procs_per_node); #else return false; -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX } bool GCToOSInterface::GetCPUGroupInfo(uint16_t* total_groups, uint32_t* max_procs_per_group) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX return CPUGroupInfo::GetCPUGroupInfo(total_groups, (DWORD*)max_procs_per_group); #else return false; -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX } bool GCToOSInterface::CanEnableGCCPUGroups() { LIMITED_METHOD_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX return CPUGroupInfo::CanEnableGCCPUGroups() != FALSE; #else return false; -#endif //!FEATURE_PAL +#endif //!TARGET_UNIX } // Get processor number and optionally its NUMA node number for the specified heap number @@ -1045,7 +1045,7 @@ bool GCToOSInterface::GetProcessorForHeap(uint16_t heap_number, uint16_t* proc_n if (success) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX WORD gn, gpn; if (CPUGroupInfo::CanEnableGCCPUGroups()) @@ -1087,13 +1087,13 @@ bool GCToOSInterface::GetProcessorForHeap(uint16_t heap_number, uint16_t* proc_n { // no numa setting, each cpu group is treated as a node *node_no = procNumber.Group; } -#else // !FEATURE_PAL +#else // !TARGET_UNIX *proc_no = procIndex; if (!GCToOSInterface::CanEnableGCNumaAware() || !NumaNodeInfo::GetNumaProcessorNodeEx(procIndex, (WORD*)node_no)) { *node_no = NUMA_NODE_UNDEFINED; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } return success; @@ -1111,7 +1111,7 @@ bool GCToOSInterface::ParseGCHeapAffinitizeRangesEntry(const char** config_strin size_t index_offset = 0; char* number_end; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX size_t group_number = strtoul(*config_string, &number_end, 10); if ((number_end == *config_string) || (*number_end != ':')) @@ -1130,7 +1130,7 @@ bool GCToOSInterface::ParseGCHeapAffinitizeRangesEntry(const char** config_strin index_offset = group_begin; *config_string = number_end + 1; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX size_t start, end; if (!ParseIndexOrRange(config_string, &start, &end)) @@ -1138,13 +1138,13 @@ bool GCToOSInterface::ParseGCHeapAffinitizeRangesEntry(const char** config_strin return false; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if ((start >= group_size) || (end >= group_size)) { // Invalid CPU index values or range return false; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX *start_index = index_offset + start; *end_index = index_offset + end; diff --git a/src/coreclr/src/vm/gcheaputilities.h b/src/coreclr/src/vm/gcheaputilities.h index d0de17300be88..24cd66bb9f321 100644 --- a/src/coreclr/src/vm/gcheaputilities.h +++ b/src/coreclr/src/vm/gcheaputilities.h @@ -124,7 +124,7 @@ class GCHeapUtilities { { // When running on a single-proc Intel system, it's more efficient to use a single global // allocation context for SOH allocations than to use one for every thread. -#if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && !defined(FEATURE_PAL) +#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && !defined(TARGET_UNIX) return IsServerHeap() || ::g_SystemInfo.dwNumberOfProcessors != 1 || CPUGroupInfo::CanEnableGCCPUGroups(); #else return true; diff --git a/src/coreclr/src/vm/gchelpers.cpp b/src/coreclr/src/vm/gchelpers.cpp index cc2077c61ff55..744125c8d42c5 100644 --- a/src/coreclr/src/vm/gchelpers.cpp +++ b/src/coreclr/src/vm/gchelpers.cpp @@ -171,13 +171,13 @@ inline void CheckObjectSize(size_t alloc_size) } CONTRACTL_END; size_t max_object_size; -#ifdef BIT64 +#ifdef HOST_64BIT if (g_pConfig->GetGCAllowVeryLargeObjects()) { max_object_size = (INT64_MAX - 7 - min_obj_size); } else -#endif // BIT64 +#endif // HOST_64BIT { max_object_size = (INT32_MAX - 7 - min_obj_size); } @@ -450,7 +450,7 @@ OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS ThrowOutOfMemoryDimensionsExceeded(); // Allocate the space from the GC heap -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // POSITIVE_INT32 * UINT16 + SMALL_CONST // this cannot overflow on 64bit size_t totalSize = cElements * componentSize + pArrayMT->GetBaseSize(); @@ -583,7 +583,7 @@ void ThrowOutOfMemoryDimensionsExceeded() THROWS; } CONTRACTL_END; -#ifdef BIT64 +#ifdef HOST_64BIT EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED)); #else ThrowOutOfMemory(); @@ -708,7 +708,7 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, ThrowOutOfMemoryDimensionsExceeded(); // Allocate the space from the GC heap -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // POSITIVE_INT32 * UINT16 + SMALL_CONST // this cannot overflow on 64bit size_t totalSize = cElements * componentSize + pArrayMT->GetBaseSize(); @@ -1588,9 +1588,9 @@ void ErectWriteBarrierForMT(MethodTable **dst, MethodTable *ref) // // We could use the pointer maps and do this more accurately if necessary -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame -#endif //_MSC_VER && _TARGET_X86_ +#endif //_MSC_VER && TARGET_X86 void SetCardsAfterBulkCopy(Object **start, size_t len) @@ -1602,6 +1602,6 @@ SetCardsAfterBulkCopy(Object **start, size_t len) } } -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations -#endif //_MSC_VER && _TARGET_X86_ +#endif //_MSC_VER && TARGET_X86 diff --git a/src/coreclr/src/vm/gchelpers.inl b/src/coreclr/src/vm/gchelpers.inl index 53c00ac49ccdf..864bed35b24a9 100644 --- a/src/coreclr/src/vm/gchelpers.inl +++ b/src/coreclr/src/vm/gchelpers.inl @@ -19,7 +19,7 @@ // //======================================================================== -#if defined(BIT64) +#if defined(HOST_64BIT) static const int card_byte_shift = 11; static const int card_bundle_byte_shift = 21; #else diff --git a/src/coreclr/src/vm/gcinfodecoder.cpp b/src/coreclr/src/vm/gcinfodecoder.cpp index 5669c6101d391..012b00da210ec 100644 --- a/src/coreclr/src/vm/gcinfodecoder.cpp +++ b/src/coreclr/src/vm/gcinfodecoder.cpp @@ -132,11 +132,11 @@ GcInfoDecoder::GcInfoDecoder( m_GenericSecretParamIsMD = (headerFlags & GC_INFO_HAS_GENERICS_INST_CONTEXT_MASK) == GC_INFO_HAS_GENERICS_INST_CONTEXT_MD; m_GenericSecretParamIsMT = (headerFlags & GC_INFO_HAS_GENERICS_INST_CONTEXT_MASK) == GC_INFO_HAS_GENERICS_INST_CONTEXT_MT; int hasStackBaseRegister = headerFlags & GC_INFO_HAS_STACK_BASE_REGISTER; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 m_WantsReportOnlyLeaf = ((headerFlags & GC_INFO_WANTS_REPORT_ONLY_LEAF) != 0); -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) m_HasTailCalls = ((headerFlags & GC_INFO_HAS_TAILCALLS) != 0); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 int hasSizeOfEditAndContinuePreservedArea = headerFlags & GC_INFO_HAS_EDIT_AND_CONTINUE_PRESERVED_SLOTS; int hasReversePInvokeFrame = false; @@ -153,9 +153,9 @@ GcInfoDecoder::GcInfoDecoder( } else { -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 m_ReturnKind = RT_Unset; -#endif // ! _TARGET_X86_ +#endif // ! TARGET_X86 } if (flags == DECODE_RETURN_KIND) { @@ -379,7 +379,7 @@ bool GcInfoDecoder::IsSafePoint(UINT32 codeOffset) if(m_NumSafePoints == 0) return false; -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) // Safepoints are encoded with a -1 adjustment codeOffset--; #endif @@ -399,7 +399,7 @@ UINT32 GcInfoDecoder::FindSafePoint(UINT32 breakOffset) const UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength)); UINT32 result = m_NumSafePoints; -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) // Safepoints are encoded with a -1 adjustment // but normalizing them masks off the low order bit // Thus only bother looking if the address is odd @@ -446,7 +446,7 @@ void GcInfoDecoder::EnumerateSafePoints(EnumerateSafePointsCallback *pCallback, UINT32 normOffset = (UINT32)m_Reader.Read(numBitsPerOffset); UINT32 offset = DENORMALIZE_CODE_OFFSET(normOffset) + 2; -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) // Safepoints are encoded with a -1 adjustment offset--; #endif @@ -538,18 +538,18 @@ bool GcInfoDecoder::GetIsVarArg() return m_IsVarArg; } -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) bool GcInfoDecoder::HasTailCalls() { _ASSERTE( m_Flags & DECODE_HAS_TAILCALLS ); return m_HasTailCalls; } -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 bool GcInfoDecoder::WantsReportOnlyLeaf() { // Only AMD64 with JIT64 can return false here. -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return m_WantsReportOnlyLeaf; #else return true; @@ -1356,7 +1356,7 @@ const GcSlotDesc* GcSlotDecoder::GetSlotDesc(UINT32 slotIndex) // Platform-specific methods //----------------------------------------------------------------------------- -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) OBJECTREF* GcInfoDecoder::GetRegisterSlot( @@ -1380,7 +1380,7 @@ OBJECTREF* GcInfoDecoder::GetRegisterSlot( return (OBJECTREF*)*(ppRax + regNum); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX OBJECTREF* GcInfoDecoder::GetCapturedRegister( int regNum, PREGDISPLAY pRD @@ -1396,7 +1396,7 @@ OBJECTREF* GcInfoDecoder::GetCapturedRegister( return (OBJECTREF*)(pRax + regNum); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD) { @@ -1450,7 +1450,7 @@ void GcInfoDecoder::ReportRegisterToGC( // AMD64 LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum )); OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD ); -#if defined(FEATURE_PAL) && !defined(SOS_TARGET_AMD64) +#if defined(TARGET_UNIX) && !defined(SOS_TARGET_AMD64) // On PAL, we don't always have the context pointers available due to // a limitation of an unwinding library. In such case, the context // pointers for some nonvolatile registers are NULL. @@ -1470,7 +1470,7 @@ void GcInfoDecoder::ReportRegisterToGC( // AMD64 gcFlags |= GC_CALL_PINNED; } -#endif // FEATURE_PAL && !SOS_TARGET_AMD64 +#endif // TARGET_UNIX && !SOS_TARGET_AMD64 #ifdef _DEBUG if(IsScratchRegister(regNum, pRD)) @@ -1492,7 +1492,7 @@ void GcInfoDecoder::ReportRegisterToGC( // AMD64 pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false))); } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) OBJECTREF* GcInfoDecoder::GetRegisterSlot( int regNum, @@ -1524,7 +1524,7 @@ OBJECTREF* GcInfoDecoder::GetRegisterSlot( } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX OBJECTREF* GcInfoDecoder::GetCapturedRegister( int regNum, PREGDISPLAY pRD @@ -1540,7 +1540,7 @@ OBJECTREF* GcInfoDecoder::GetCapturedRegister( return (OBJECTREF*)(pR0 + regNum); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD) @@ -1604,7 +1604,7 @@ void GcInfoDecoder::ReportRegisterToGC( // ARM pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false))); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) OBJECTREF* GcInfoDecoder::GetRegisterSlot( int regNum, @@ -1695,7 +1695,7 @@ void GcInfoDecoder::ReportRegisterToGC( // ARM64 pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false))); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX OBJECTREF* GcInfoDecoder::GetCapturedRegister( int regNum, PREGDISPLAY pRD @@ -1710,7 +1710,7 @@ OBJECTREF* GcInfoDecoder::GetCapturedRegister( return (OBJECTREF*)(pX0 + regNum); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX #else // Unknown platform @@ -1776,7 +1776,7 @@ OBJECTREF* GcInfoDecoder::GetStackSlot( SIZE_T * pFrameReg = (SIZE_T*) GetRegisterSlot(m_StackBaseRegister, pRD); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // On PAL, we don't always have the context pointers available due to // a limitation of an unwinding library. In such case, the context // pointers for some nonvolatile registers are NULL. @@ -1784,7 +1784,7 @@ OBJECTREF* GcInfoDecoder::GetStackSlot( { pFrameReg = (SIZE_T*) GetCapturedRegister(m_StackBaseRegister, pRD); } -#endif // FEATURE_PAL +#endif // TARGET_UNIX pObjRef = (OBJECTREF*)(*pFrameReg + spOffset); } @@ -1796,11 +1796,11 @@ OBJECTREF* GcInfoDecoder::GetStackSlot( #ifdef DACCESS_COMPILE int GcInfoDecoder::GetStackReg(int spBase) { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) int esp = 4; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) int esp = 13; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) int esp = 31; #endif diff --git a/src/coreclr/src/vm/gdbjit.cpp b/src/coreclr/src/vm/gdbjit.cpp index becabba65cf19..58613869c448b 100644 --- a/src/coreclr/src/vm/gdbjit.cpp +++ b/src/coreclr/src/vm/gdbjit.cpp @@ -2296,7 +2296,7 @@ void Elf_Builder::Finalize() // Elf_Ehdr *elfHeader = new (m_Buffer.GetPtr()) Elf_Ehdr; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM elfHeader->e_flags = EF_ARM_EABI_VER5; #ifdef ARM_SOFTFP elfHeader->e_flags |= EF_ARM_SOFT_FLOAT; @@ -2350,7 +2350,7 @@ struct __attribute__((packed)) FDE static void BuildDebugFrame(Elf_Builder &elfBuilder, PCODE pCode, TADDR codeSize) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) const unsigned int code_alignment_factor = 2; const int data_alignment_factor = -4; @@ -2371,7 +2371,7 @@ static void BuildDebugFrame(Elf_Builder &elfBuilder, PCODE pCode, TADDR codeSize // DW_CFA_def_cfa_register 11(r11) 0x0d, 0x0b, }; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) const unsigned int code_alignment_factor = 1; const int data_alignment_factor = -4; @@ -2392,7 +2392,7 @@ static void BuildDebugFrame(Elf_Builder &elfBuilder, PCODE pCode, TADDR codeSize // DW_CFA_def_cfa_register 5(ebp) 0x0d, 0x05, }; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) const unsigned int code_alignment_factor = 1; const int data_alignment_factor = -8; @@ -2413,7 +2413,7 @@ static void BuildDebugFrame(Elf_Builder &elfBuilder, PCODE pCode, TADDR codeSize // DW_CFA_def_cfa_register(6) 0x0d, 0x06, }; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) const unsigned int code_alignment_factor = 1; const int data_alignment_factor = -4; @@ -3610,7 +3610,7 @@ bool NotifyGdb::BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize, sym[i].st_other = 0; sym[i].st_shndx = thunkIndexBase + (i - (1 + methodCount)); // .thunks section index sym[i].st_size = 8; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM sym[i].st_value = 1; // for THUMB code #else sym[i].st_value = 0; @@ -3649,9 +3649,9 @@ Elf32_Ehdr::Elf32_Ehdr() e_ident[i] = 0; e_type = ET_REL; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) e_machine = EM_386; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) e_machine = EM_ARM; #endif e_flags = 0; @@ -3679,9 +3679,9 @@ Elf64_Ehdr::Elf64_Ehdr() e_ident[i] = 0; e_type = ET_REL; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) e_machine = EM_X86_64; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) e_machine = EM_AARCH64; #endif e_flags = 0; diff --git a/src/coreclr/src/vm/gdbjit.h b/src/coreclr/src/vm/gdbjit.h index abb8480ba93db..8d9359c16ca5e 100644 --- a/src/coreclr/src/vm/gdbjit.h +++ b/src/coreclr/src/vm/gdbjit.h @@ -22,13 +22,13 @@ #include "../inc/llvm/ELF.h" #include "../inc/llvm/Dwarf.h" -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) typedef Elf32_Ehdr Elf_Ehdr; typedef Elf32_Shdr Elf_Shdr; typedef Elf32_Sym Elf_Sym; const uint16_t DW_FORM_size = DW_FORM_data4; #define ADDRESS_SIZE 4 -#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) typedef Elf64_Ehdr Elf_Ehdr; typedef Elf64_Shdr Elf_Shdr; typedef Elf64_Sym Elf_Sym; @@ -472,13 +472,13 @@ class FunctionMember: public TypeMember dumped(false) { m_sub_loc[0] = 1; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) m_sub_loc[1] = DW_OP_reg6; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) m_sub_loc[1] = DW_OP_reg5; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) m_sub_loc[1] = DW_OP_reg29; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) m_sub_loc[1] = DW_OP_reg11; #else #error Unsupported platform! diff --git a/src/coreclr/src/vm/hosting.cpp b/src/coreclr/src/vm/hosting.cpp index d46b467481922..6abcc2c901f12 100644 --- a/src/coreclr/src/vm/hosting.cpp +++ b/src/coreclr/src/vm/hosting.cpp @@ -51,8 +51,8 @@ LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, D if (lpAddress == NULL && (flAllocationType & MEM_RESERVE) != 0 && PEDecoder::GetForceRelocs()) { -#ifdef BIT64 - // Try to allocate memory all over the place when we are stressing relocations on BIT64. +#ifdef HOST_64BIT + // Try to allocate memory all over the place when we are stressing relocations on HOST_64BIT. // This will make sure that we generate jump stubs correctly among other things. static BYTE* ptr = (BYTE*)0x234560000; ptr += 0x123450000; @@ -67,7 +67,7 @@ LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, D #else // Allocate memory top to bottom to stress ngen fixups with LARGEADDRESSAWARE support. p = ::VirtualAlloc(lpAddress, dwSize, flAllocationType | MEM_TOP_DOWN, flProtect); -#endif // BIT64 +#endif // HOST_64BIT } } #endif // _DEBUG @@ -154,14 +154,14 @@ HANDLE EEHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize) } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX { return ::HeapCreate(flOptions, dwInitialSize, dwMaximumSize); } -#else // !FEATURE_PAL +#else // !TARGET_UNIX return NULL; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #define HeapCreate(flOptions, dwInitialSize, dwMaximumSize) Dont_Use_HeapCreate(flOptions, dwInitialSize, dwMaximumSize) @@ -175,19 +175,19 @@ BOOL EEHeapDestroy(HANDLE hHeap) } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX { return ::HeapDestroy(hHeap); } -#else // !FEATURE_PAL +#else // !TARGET_UNIX UNREACHABLE(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #define HeapDestroy(hHeap) Dont_Use_HeapDestroy(hHeap) #ifdef _DEBUG -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #define OS_HEAP_ALIGN 8 #else #define OS_HEAP_ALIGN 16 @@ -305,14 +305,14 @@ BOOL EEHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX { return ::HeapValidate(hHeap, dwFlags, lpMem); } -#else // !FEATURE_PAL +#else // !TARGET_UNIX return TRUE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #define HeapValidate(hHeap, dwFlags, lpMem) Dont_Use_HeapValidate(hHeap, dwFlags, lpMem) @@ -322,7 +322,7 @@ HANDLE EEGetProcessExecutableHeap() { STATIC_CONTRACT_GC_NOTRIGGER; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // // Create the executable heap lazily @@ -351,9 +351,9 @@ HANDLE EEGetProcessExecutableHeap() { #define HeapCreate(flOptions, dwInitialSize, dwMaximumSize) Dont_Use_HeapCreate(flOptions, dwInitialSize, dwMaximumSize) #define HeapDestroy(hHeap) Dont_Use_HeapDestroy(hHeap) -#else // !FEATURE_PAL +#else // !TARGET_UNIX UNREACHABLE(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // TODO: implement hosted executable heap @@ -440,7 +440,7 @@ BOOL __DangerousSwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount, BOOL goT // The following two values appear to yield roughly equivalent spin times // on their respective platforms. // -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #define SLEEP_START_THRESHOLD (5 * 1024) #else #define SLEEP_START_THRESHOLD (32 * 1024) diff --git a/src/coreclr/src/vm/i386/asmconstants.h b/src/coreclr/src/vm/i386/asmconstants.h index 08d0c4254e957..554219356efb3 100644 --- a/src/coreclr/src/vm/i386/asmconstants.h +++ b/src/coreclr/src/vm/i386/asmconstants.h @@ -12,7 +12,7 @@ // a compile-time assert, check out USE_COMPILE_TIME_CONSTANT_FINDER. // TODO: put the constant finder in a common place so other platforms can use it. -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 #error this file should only be used on an X86 platform #endif diff --git a/src/coreclr/src/vm/i386/cgencpu.h b/src/coreclr/src/vm/i386/cgencpu.h index 7e49a7801830b..b9c924fcd4d9a 100644 --- a/src/coreclr/src/vm/i386/cgencpu.h +++ b/src/coreclr/src/vm/i386/cgencpu.h @@ -10,9 +10,9 @@ -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 #error Should only include "cgenx86.h" for X86 builds -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifndef __cgenx86_h__ #define __cgenx86_h__ @@ -545,10 +545,10 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode) // #define JIT_GetSharedGCStaticBaseNoCtor // #define JIT_GetSharedNonGCStaticBaseNoCtor -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define JIT_NewCrossContext JIT_NewCrossContext #define JIT_Stelem_Ref JIT_Stelem_Ref -#endif // FEATURE_PAL +#endif // TARGET_UNIX //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Call counting diff --git a/src/coreclr/src/vm/i386/cgenx86.cpp b/src/coreclr/src/vm/i386/cgenx86.cpp index 8f0d974c09f0a..fb890e814e8e3 100644 --- a/src/coreclr/src/vm/i386/cgenx86.cpp +++ b/src/coreclr/src/vm/i386/cgenx86.cpp @@ -893,7 +893,7 @@ void DynamicHelperFrame::UpdateRegDisplay(const PREGDISPLAY pRD) // header issues with cgencpu.h including dbginterface.h. WORD GetUnpatchedCodeData(LPCBYTE pAddr) { -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 #error Make sure this works before porting to platforms other than x86. #endif CONTRACT(WORD) { @@ -930,7 +930,7 @@ WORD GetUnpatchedCodeData(LPCBYTE pAddr) #ifndef DACCESS_COMPILE -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) //------------------------------------------------------------------------- // One-time creation of special prestub to initialize UMEntryThunks. //------------------------------------------------------------------------- @@ -980,7 +980,7 @@ Stub *GenerateUMThunkPrestub() RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()); } -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL Stub *GenerateInitPInvokeFrameHelper() { @@ -1131,7 +1131,7 @@ void ResumeAtJit(PCONTEXT pContext, LPVOID oldESP) #endif // !EnC_SUPPORTED -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #pragma warning(push) #pragma warning(disable: 4035) extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]) @@ -1205,7 +1205,7 @@ extern "C" DWORD __stdcall xmmYmmStateSupport() #pragma warning(pop) -#else // !FEATURE_PAL +#else // !TARGET_UNIX extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]) { @@ -1251,7 +1251,7 @@ extern "C" DWORD __stdcall xmmYmmStateSupport() return ((eax & 0x06) == 0x06) ? 1 : 0; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam) { diff --git a/src/coreclr/src/vm/i386/gmsx86.cpp b/src/coreclr/src/vm/i386/gmsx86.cpp index 9337013445a81..ccb54a0150be7 100644 --- a/src/coreclr/src/vm/i386/gmsx86.cpp +++ b/src/coreclr/src/vm/i386/gmsx86.cpp @@ -9,7 +9,7 @@ #include "common.h" #include "gmscpu.h" -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #define USE_EXTERNAL_UNWINDER #endif diff --git a/src/coreclr/src/vm/i386/jitinterfacex86.cpp b/src/coreclr/src/vm/i386/jitinterfacex86.cpp index 3b97a6b05af88..5a585cbd9cd10 100644 --- a/src/coreclr/src/vm/i386/jitinterfacex86.cpp +++ b/src/coreclr/src/vm/i386/jitinterfacex86.cpp @@ -95,7 +95,7 @@ extern "C" void STDCALL WriteBarrierAssert(BYTE* ptr, Object* obj) #endif // _DEBUG -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX /****************************************************************************/ /* assigns 'val to 'array[idx], after doing all the proper checks */ @@ -210,7 +210,7 @@ HCIMPL1_V(INT32, JIT_Dbl2IntOvf, double val) FCThrow(kOverflowException); } HCIMPLEND -#endif // FEATURE_PAL +#endif // TARGET_UNIX FCDECL1(Object*, JIT_New, CORINFO_CLASS_HANDLE typeHnd_); diff --git a/src/coreclr/src/vm/i386/stublinkerx86.cpp b/src/coreclr/src/vm/i386/stublinkerx86.cpp index b2acdbf952dc2..fe99762075355 100644 --- a/src/coreclr/src/vm/i386/stublinkerx86.cpp +++ b/src/coreclr/src/vm/i386/stublinkerx86.cpp @@ -27,9 +27,9 @@ #include "dbginterface.h" #include "eeprofinterfaces.h" #include "eeconfig.h" -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #include "asmconstants.h" -#endif // _TARGET_X86_ +#endif // TARGET_X86 #include "class.h" #include "stublink.inl" @@ -63,7 +63,7 @@ extern "C" VOID __cdecl ArrayOpStubNullException(void); extern "C" VOID __cdecl ArrayOpStubRangeException(void); extern "C" VOID __cdecl ArrayOpStubTypeMismatchException(void); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #define EXCEPTION_HELPERS(base) \ extern "C" VOID __cdecl base##_RSIRDI_ScratchArea(void); \ extern "C" VOID __cdecl base##_ScratchArea(void); \ @@ -73,14 +73,14 @@ EXCEPTION_HELPERS(ArrayOpStubNullException); EXCEPTION_HELPERS(ArrayOpStubRangeException); EXCEPTION_HELPERS(ArrayOpStubTypeMismatchException); #undef EXCEPTION_HELPERS -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 #endif // !FEATURE_ARRAYSTUB_AS_IL -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #if defined(_DEBUG) extern "C" VOID __cdecl DebugCheckStubUnwindInfo(); #endif // _DEBUG -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #ifdef FEATURE_COMINTEROP Thread* __stdcall CreateThreadBlockReturnHr(ComMethodFrame *pFrame); @@ -88,7 +88,7 @@ Thread* __stdcall CreateThreadBlockReturnHr(ComMethodFrame *pFrame); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BOOL IsPreservedReg (X86Reg reg) { @@ -104,9 +104,9 @@ BOOL IsPreservedReg (X86Reg reg) return PreservedRegMask & (1 << reg); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 //----------------------------------------------------------------------- // InstructionFormat for near Jump and short Jump //----------------------------------------------------------------------- @@ -379,9 +379,9 @@ class X86NearJump : public InstructionFormat { public: X86NearJump() : InstructionFormat( InstructionFormat::k8|InstructionFormat::k32 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 | InstructionFormat::k64Small | InstructionFormat::k64 -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 ) { LIMITED_METHOD_CONTRACT; @@ -397,13 +397,13 @@ class X86NearJump : public InstructionFormat case k32: return 5; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case k64Small: return 5 + 2; case k64: return 12; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 default: _ASSERTE(!"unexpected refsize"); return 0; @@ -424,7 +424,7 @@ class X86NearJump : public InstructionFormat pOutBuffer[0] = 0xe9; *((__int32*)(pOutBuffer+1)) = (__int32)fixedUpReference; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 else if (k64Small == refsize) { UINT64 TargetAddress = (INT64)pOutBuffer + fixedUpReference + GetSizeOfInstruction(refsize, variationCode); @@ -449,7 +449,7 @@ class X86NearJump : public InstructionFormat pOutBuffer[10] = 0xFF; pOutBuffer[11] = 0xE0; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 else { _ASSERTE(!"unreached"); @@ -475,7 +475,7 @@ class X86NearJump : public InstructionFormat case InstructionFormat::k32: return sizeof(PVOID) <= sizeof(UINT32); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case InstructionFormat::k64Small: return FitsInI4(offset); @@ -498,13 +498,13 @@ class X86NearJump : public InstructionFormat return FitsInI1(offset); case InstructionFormat::k32: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 return FitsInI4(offset); #else return TRUE; #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case InstructionFormat::k64Small: // EmitInstruction emits a non-relative jmp for // k64Small. We don't have enough info to predict the @@ -571,9 +571,9 @@ class X86Call : public InstructionFormat public: X86Call () : InstructionFormat( InstructionFormat::k32 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 | InstructionFormat::k64Small | InstructionFormat::k64 -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 ) { LIMITED_METHOD_CONTRACT; @@ -588,13 +588,13 @@ class X86Call : public InstructionFormat case k32: return 5; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case k64Small: return 5 + 2; case k64: return 10 + 2; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 default: _ASSERTE(!"unexpected refsize"); @@ -613,7 +613,7 @@ class X86Call : public InstructionFormat *((__int32*)(1+pOutBuffer)) = (__int32)fixedUpReference; break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case k64Small: UINT64 TargetAddress; @@ -639,7 +639,7 @@ class X86Call : public InstructionFormat pOutBuffer[10] = 0xff; pOutBuffer[11] = 0xd0; break; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 default: _ASSERTE(!"unreached"); @@ -649,7 +649,7 @@ class X86Call : public InstructionFormat // For x86, the default CanReach implementation will suffice. It only needs // to handle k32. -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset) { if (fExternal) @@ -699,7 +699,7 @@ class X86Call : public InstructionFormat } } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 }; @@ -732,7 +732,7 @@ class X86PushImm32 : public InstructionFormat } }; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) //----------------------------------------------------------------------- // InstructionFormat for lea reg, [RIP relative]. //----------------------------------------------------------------------- @@ -813,9 +813,9 @@ class X64LeaRIP : public InstructionFormat } }; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) static BYTE gX64NearJumpSetup[sizeof(X64NearJumpSetup)]; static BYTE gX64NearJumpExecute[sizeof(X64NearJumpExecute)]; static BYTE gX64LeaRIP[sizeof(X64LeaRIP)]; @@ -840,7 +840,7 @@ static BYTE gX86PushImm32[sizeof(X86PushImm32)]; new (gX86Call) X86Call(); new (gX86PushImm32) X86PushImm32(InstructionFormat::k32); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) new (gX64NearJumpSetup) X64NearJumpSetup(); new (gX64NearJumpExecute) X64NearJumpExecute(); new (gX64LeaRIP) X64LeaRIP(); @@ -855,7 +855,7 @@ VOID StubLinkerCPU::X86EmitMovRegReg(X86Reg destReg, X86Reg srcReg) { STANDARD_VM_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; if (destReg >= kR8) @@ -904,7 +904,7 @@ VOID StubLinkerCPU::X86EmitPushReg(X86Reg reg) X86Reg origReg = reg; #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (reg >= kR8) { Emit8(REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT | REX_OPCODE_REG_EXT); @@ -934,13 +934,13 @@ VOID StubLinkerCPU::X86EmitPopReg(X86Reg reg) { STANDARD_VM_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (reg >= kR8) { Emit8(REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT | REX_OPCODE_REG_EXT); reg = X86RegFromAMD64Reg(reg); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 Emit8(static_cast(0x58 + reg)); Pop(sizeof(void*)); @@ -994,7 +994,7 @@ VOID StubLinkerCPU::X86EmitPushImmPtr(LPVOID value BIT64_ARG(X86Reg tmpReg /*=kR { STANDARD_VM_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 X86EmitRegLoad(tmpReg, (UINT_PTR) value); X86EmitPushReg(tmpReg); #else @@ -1010,7 +1010,7 @@ VOID StubLinkerCPU::X86EmitZeroOutReg(X86Reg reg) { STANDARD_VM_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // 32-bit results are zero-extended, so we only need the REX byte if // it's an extended register. if (reg >= kR8) @@ -1052,7 +1052,7 @@ VOID StubLinkerCPU::X86EmitCmpRegImm32(X86Reg reg, INT32 imm32) } CONTRACTL_END; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; if (reg >= kR8) @@ -1074,7 +1074,7 @@ VOID StubLinkerCPU::X86EmitCmpRegImm32(X86Reg reg, INT32 imm32) } } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 //--------------------------------------------------------------- // Emits: // CMP [reg+offs], imm32 @@ -1097,9 +1097,9 @@ VOID StubLinkerCPU:: X86EmitCmpRegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32 } VOID StubLinkerCPU:: X64EmitCmp32RegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32) -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 VOID StubLinkerCPU:: X86EmitCmpRegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32) -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 { CONTRACTL { @@ -1142,7 +1142,7 @@ VOID StubLinkerCPU:: X86EmitCmpRegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32 //--------------------------------------------------------------- // Emits: -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // mov rax, // add rsp, imm32 // jmp rax @@ -1155,7 +1155,7 @@ VOID StubLinkerCPU::X86EmitTailcallWithESPAdjust(CodeLabel *pTarget, INT32 imm32 { STANDARD_VM_CONTRACT; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) EmitLabelRef(pTarget, reinterpret_cast(gX64NearJumpSetup), 0); X86EmitAddEsp(imm32); EmitLabelRef(pTarget, reinterpret_cast(gX64NearJumpExecute), 0); @@ -1167,7 +1167,7 @@ VOID StubLinkerCPU::X86EmitTailcallWithESPAdjust(CodeLabel *pTarget, INT32 imm32 //--------------------------------------------------------------- // Emits: -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // mov rax, // pop reg // jmp rax @@ -1180,7 +1180,7 @@ VOID StubLinkerCPU::X86EmitTailcallWithSinglePop(CodeLabel *pTarget, X86Reg reg) { STANDARD_VM_CONTRACT; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) EmitLabelRef(pTarget, reinterpret_cast(gX64NearJumpSetup), 0); X86EmitPopReg(reg); EmitLabelRef(pTarget, reinterpret_cast(gX64NearJumpExecute), 0); @@ -1227,9 +1227,9 @@ VOID StubLinkerCPU::X86EmitCall(CodeLabel *target, int iArgBytes) INDEBUG(Emit8(0x90)); // Emit a nop after the call in debug so that // we know that this is a call that can directly call // managed code -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 Pop(iArgBytes); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } @@ -1242,7 +1242,7 @@ VOID StubLinkerCPU::X86EmitReturn(WORD wArgBytes) CONTRACTL { STANDARD_VM_CHECK; -#if defined(_TARGET_AMD64_) || defined(UNIX_X86_ABI) +#if defined(TARGET_AMD64) || defined(UNIX_X86_ABI) PRECONDITION(wArgBytes == 0); #endif @@ -1260,7 +1260,7 @@ VOID StubLinkerCPU::X86EmitReturn(WORD wArgBytes) Pop(wArgBytes); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 //--------------------------------------------------------------- // Emits: // JMP or @@ -1271,7 +1271,7 @@ VOID StubLinkerCPU::X86EmitLeaRIP(CodeLabel *target, X86Reg reg) STANDARD_VM_CONTRACT; EmitLabelRef(target, reinterpret_cast(gX64LeaRIP), reg); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 @@ -1330,7 +1330,7 @@ VOID StubLinkerCPU::X86EmitIndexRegStore(X86Reg dstreg, X86EmitOp(0x89, srcreg, (X86Reg)kESP_Unsafe, ofs); } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) //--------------------------------------------------------------- // Emits: // mov [RSP + ], @@ -1360,7 +1360,7 @@ VOID StubLinkerCPU::X86EmitIndexRegStoreR12(__int32 ofs, X86EmitOp(0x89, srcreg, (X86Reg)kR12, ofs, (X86Reg)0, 0, k64BitOp); } -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) //--------------------------------------------------------------- // Emits: @@ -1458,14 +1458,14 @@ VOID StubLinkerCPU::X86EmitIndexLea(X86Reg dstreg, X86Reg srcreg, __int32 ofs) X86EmitOffsetModRM(0x8d, dstreg, srcreg, ofs); } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) VOID StubLinkerCPU::X86EmitIndexLeaRSP(X86Reg dstreg, X86Reg srcreg, __int32 ofs) { STANDARD_VM_CONTRACT; X86EmitOp(0x8d, dstreg, (X86Reg)kESP_Unsafe, ofs, (X86Reg)0, 0, k64BitOp); } -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) //--------------------------------------------------------------- // Emits: @@ -1588,7 +1588,7 @@ VOID StubLinkerCPU::X86EmitAddReg(X86Reg reg, INT32 imm32) if (imm32 == 0) return; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; if (reg >= kR8) @@ -1633,7 +1633,7 @@ VOID StubLinkerCPU::X86EmitSubReg(X86Reg reg, INT32 imm32) } CONTRACTL_END; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; if (reg >= kR8) @@ -1666,7 +1666,7 @@ VOID StubLinkerCPU::X86EmitSubRegReg(X86Reg destReg, X86Reg srcReg) X86EmitR2ROp(0x29, srcReg, destReg); } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) //--------------------------------------------------------------- // movdqa destXmmreg, srcXmmReg @@ -1847,7 +1847,7 @@ VOID StubLinkerCPU::X64EmitMovXmmWorker(BYTE prefix, BYTE opcode, X86Reg Xmmreg, EmitBytes(codeBuffer, nBytes); } -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) //--------------------------------------------------------------- // Emits a MOD/RM for accessing a dword at [ + ofs32] @@ -1859,7 +1859,7 @@ VOID StubLinkerCPU::X86EmitOffsetModRM(BYTE opcode, X86Reg opcodereg, X86Reg ind BYTE codeBuffer[7]; BYTE* code = codeBuffer; int nBytes = 0; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 code++; // // code points to base X86 instruction, @@ -1923,7 +1923,7 @@ VOID StubLinkerCPU::X86EmitOffsetModRmSIB(BYTE opcode, X86Reg opcodeOrReg, X86Re BYTE* code = codeBuffer; int nBytes = 0; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 _ASSERTE(!"NYI"); #endif code[0] = opcode; @@ -1973,7 +1973,7 @@ VOID StubLinkerCPU::X86EmitRegLoad(X86Reg reg, UINT_PTR imm) UINT cbimm = sizeof(void*); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // amd64 zero-extends all 32-bit operations. If the immediate will fit in // 32 bits, use the smaller encoding. @@ -1993,7 +1993,7 @@ VOID StubLinkerCPU::X86EmitRegLoad(X86Reg reg, UINT_PTR imm) // the low 4 bytes. cbimm = sizeof(UINT32); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 Emit8(0xB8 | (BYTE)reg); EmitBytes((BYTE*)&imm, cbimm); } @@ -2046,7 +2046,7 @@ VOID StubLinkerCPU::X86EmitOp(WORD opcode, } CONTRACTL_END; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if ( k64BitOp == OperandSize || altreg >= kR8 || basereg >= kR8 @@ -2080,7 +2080,7 @@ VOID StubLinkerCPU::X86EmitOp(WORD opcode, Emit8(rex); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 BYTE modrmbyte = static_cast(altreg << 3); BOOL fNeedSIB = FALSE; @@ -2189,7 +2189,7 @@ VOID StubLinkerCPU::X86EmitR2ROp (WORD opcode, } CONTRACTL_END; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE rex = 0; if (modrmreg >= kR8) @@ -2209,7 +2209,7 @@ VOID StubLinkerCPU::X86EmitR2ROp (WORD opcode, if (rex) Emit8(REX_PREFIX_BASE | rex); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 Emit8((BYTE)opcode); @@ -2236,7 +2236,7 @@ VOID StubLinkerCPU::X86EmitEspOffset(BYTE opcode, BYTE *code = codeBuffer; int nBytes; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE rex = 0; if (k64BitOp == OperandSize) @@ -2255,7 +2255,7 @@ VOID StubLinkerCPU::X86EmitEspOffset(BYTE opcode, nBytes = 1; } else -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 { nBytes = 0; } @@ -2306,7 +2306,7 @@ VOID StubLinkerCPU::X86EmitDebugTrashReg(X86Reg reg) { STANDARD_VM_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT; if (reg >= kR8) @@ -2345,7 +2345,7 @@ X86Reg GetX86ArgumentRegisterFromOffset(size_t ofs) } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 static const X86Reg c_argRegs[] = { #define ARGUMENT_REGISTER(regname) k##regname, ENUM_ARGUMENT_REGISTERS() @@ -2356,7 +2356,7 @@ static const X86Reg c_argRegs[] = { #ifndef CROSSGEN_COMPILE -#if defined(_DEBUG) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && !defined(TARGET_UNIX) void StubLinkerCPU::EmitJITHelperLoggingThunk(PCODE pJitHelper, LPVOID helperFuncCount) { STANDARD_VM_CONTRACT; @@ -2367,7 +2367,7 @@ void StubLinkerCPU::EmitJITHelperLoggingThunk(PCODE pJitHelper, LPVOID helperFun mov rcx, &(pHelperFuncCount->count) lock inc [rcx] pop rcx -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 mov rax, jmp rax #else @@ -2384,7 +2384,7 @@ void StubLinkerCPU::EmitJITHelperLoggingThunk(PCODE pJitHelper, LPVOID helperFun BYTE lock_inc_RCX[] = { 0xf0, 0xff, 0x01 }; EmitBytes(lock_inc_RCX, sizeof(lock_inc_RCX)); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // mov rax, // pop rcx // jmp rax @@ -2394,7 +2394,7 @@ void StubLinkerCPU::EmitJITHelperLoggingThunk(PCODE pJitHelper, LPVOID helperFun #endif X86EmitTailcallWithSinglePop(NewExternalCodeLabel(pJitHelper), kECX); } -#endif // _DEBUG && !FEATURE_PAL +#endif // _DEBUG && !TARGET_UNIX VOID StubLinkerCPU::X86EmitCurrentThreadFetch(X86Reg dstreg, unsigned preservedRegSet) { @@ -2408,7 +2408,7 @@ VOID StubLinkerCPU::X86EmitCurrentThreadFetch(X86Reg dstreg, unsigned preservedR } CONTRACTL_END; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX X86EmitPushRegs(preservedRegSet & ((1 << kEAX) | (1 << kEDX) | (1 << kECX))); @@ -2431,12 +2431,12 @@ VOID StubLinkerCPU::X86EmitCurrentThreadFetch(X86Reg dstreg, unsigned preservedR X86EmitDebugTrashReg(kECX); #endif // _DEBUG -#else // FEATURE_PAL +#else // TARGET_UNIX -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BYTE code[] = { 0x65,0x48,0x8b,0x04,0x25 }; // mov dstreg, qword ptr gs:[IMM32] static const int regByteIndex = 3; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) BYTE code[] = { 0x64,0x8b,0x05 }; // mov dstreg, dword ptr fs:[IMM32] static const int regByteIndex = 2; #endif @@ -2449,10 +2449,10 @@ VOID StubLinkerCPU::X86EmitCurrentThreadFetch(X86Reg dstreg, unsigned preservedR X86EmitIndexRegLoad(dstreg, dstreg, (g_TlsIndex & 0x7FFF0000) >> 16); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #if defined(PROFILING_SUPPORTED) && !defined(FEATURE_STUBS_AS_IL) VOID StubLinkerCPU::EmitProfilerComCallProlog(TADDR pFrameVptr, X86Reg regFrame) @@ -2819,9 +2819,9 @@ VOID StubLinkerCPU::EmitRareSetup(CodeLabel *pRejoinPoint, BOOL fThrow) } //======================================================================== -#endif // _TARGET_X86_ +#endif // TARGET_X86 //======================================================================== -#if defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_) +#if defined(FEATURE_COMINTEROP) && defined(TARGET_X86) //======================================================================== // Epilog for stubs that enter managed code from COM // @@ -2928,9 +2928,9 @@ void StubLinkerCPU::EmitSharedComMethodStubEpilog(TADDR pFrameVptr, } //======================================================================== -#endif // defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_) +#endif // defined(FEATURE_COMINTEROP) && defined(TARGET_X86) -#if !defined(FEATURE_STUBS_AS_IL) && defined(_TARGET_X86_) +#if !defined(FEATURE_STUBS_AS_IL) && defined(TARGET_X86) /*============================================================================== Pushes a TransitionFrame on the stack If you make any changes to the prolog instruction sequence, be sure @@ -3079,9 +3079,9 @@ VOID StubLinkerCPU::EmitCheckGSCookie(X86Reg frameReg, int gsCookieOffset) EmitLabel(pLabel); #endif } -#endif // !defined(FEATURE_STUBS_AS_IL) && defined(_TARGET_X86_) +#endif // !defined(FEATURE_STUBS_AS_IL) && defined(TARGET_X86) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // This method unboxes the THIS pointer and then calls pRealMD // If it's shared code for a method in a generic value class, then also extract the vtable pointer // and pass it as an extra argument. Thus this stub generator really covers both @@ -3112,9 +3112,9 @@ VOID StubLinkerCPU::EmitUnboxMethodStub(MethodDesc* pUnboxMD) X86EmitAddReg(THIS_kREG, sizeof(void*)); EmitTailJumpToMethod(pUnboxMD); } -#endif //_TARGET_X86_ +#endif //TARGET_X86 -#if defined(FEATURE_SHARE_GENERIC_CODE) && defined(_TARGET_AMD64_) +#if defined(FEATURE_SHARE_GENERIC_CODE) && defined(TARGET_AMD64) VOID StubLinkerCPU::EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg) { STANDARD_VM_CONTRACT; @@ -3173,9 +3173,9 @@ VOID StubLinkerCPU::EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, s EmitTailJumpToMethod(pSharedMD); } -#endif // defined(FEATURE_SHARE_GENERIC_CODE) && defined(_TARGET_AMD64_) +#endif // defined(FEATURE_SHARE_GENERIC_CODE) && defined(TARGET_AMD64) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 VOID StubLinkerCPU::EmitLoadMethodAddressIntoAX(MethodDesc *pMD) { if (pMD->HasStableEntryPoint()) @@ -3193,7 +3193,7 @@ VOID StubLinkerCPU::EmitLoadMethodAddressIntoAX(MethodDesc *pMD) VOID StubLinkerCPU::EmitTailJumpToMethod(MethodDesc *pMD) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 EmitLoadMethodAddressIntoAX(pMD); Emit16(X86_INSTR_JMP_EAX); #else @@ -3211,7 +3211,7 @@ VOID StubLinkerCPU::EmitTailJumpToMethod(MethodDesc *pMD) #endif } -#if defined(FEATURE_SHARE_GENERIC_CODE) && !defined(FEATURE_INSTANTIATINGSTUB_AS_IL) && defined(_TARGET_X86_) +#if defined(FEATURE_SHARE_GENERIC_CODE) && !defined(FEATURE_INSTANTIATINGSTUB_AS_IL) && defined(TARGET_X86) // The stub generated by this method passes an extra dictionary argument before jumping to // shared-instantiation generic code. // @@ -3279,7 +3279,7 @@ VOID StubLinkerCPU::EmitInstantiatingMethodStub(MethodDesc* pMD, void* extra) EmitTailJumpToMethod(pMD); } -#endif // defined(FEATURE_SHARE_GENERIC_CODE) && !defined(FEATURE_INSTANTIATINGSTUB_AS_IL) && defined(_TARGET_X86_) +#endif // defined(FEATURE_SHARE_GENERIC_CODE) && !defined(FEATURE_INSTANTIATINGSTUB_AS_IL) && defined(TARGET_X86) #if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) @@ -3428,7 +3428,7 @@ VOID StubLinkerCPU::EmitUnwindInfoCheckSubfunction() { STANDARD_VM_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // X86EmitCall will generate "mov rax, target/jmp rax", so we have to save // rax on the stack. DO NOT use X86EmitPushReg. That will induce infinite // recursion, since the push may require more unwind info. This "push rax" @@ -3444,7 +3444,7 @@ VOID StubLinkerCPU::EmitUnwindInfoCheckSubfunction() #endif // defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //----------------------------------------------------------------------- // Generates the inline portion of the code to enable preemptive GC. Hopefully, @@ -3663,7 +3663,7 @@ VOID StubLinkerCPU::EmitRareDisableHRESULT(CodeLabel *pRejoinPoint, CodeLabel *p } #endif // FEATURE_COMINTEROP -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // CROSSGEN_COMPILE @@ -3672,7 +3672,7 @@ VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray) { STANDARD_VM_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov SCRATCHREG,rsp X86_64BitOperands(); @@ -3814,7 +3814,7 @@ VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray) // jmp r10 X86EmitR2ROp(0xff, (X86Reg)4, kR10); -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 UINT espadjust = 0; BOOL haveMemMemMove = FALSE; @@ -3903,13 +3903,13 @@ VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray) static const BYTE bjmpeax[] = { 0xff, 0x20 }; EmitBytes(bjmpeax, sizeof(bjmpeax)); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } #if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_STUBS_AS_IL) -#if defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_MULTICASTSTUB_AS_IL) //=========================================================================== // Computes hash code for MulticastDelegate.Invoke() UINT_PTR StubLinkerCPU::HashMulticastInvoke(MetaSig* pSig) @@ -3942,9 +3942,9 @@ UINT_PTR StubLinkerCPU::HashMulticastInvoke(MetaSig* pSig) return hash; } -#endif // defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL) +#endif // defined(TARGET_X86) && !defined(FEATURE_MULTICASTSTUB_AS_IL) -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //=========================================================================== // Emits code for MulticastDelegate.Invoke() VOID StubLinkerCPU::EmitDelegateInvoke() @@ -3979,9 +3979,9 @@ VOID StubLinkerCPU::EmitDelegateInvoke() X86EmitReturn(0); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_MULTICASTSTUB_AS_IL) VOID StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash) { STANDARD_VM_CONTRACT; @@ -4099,7 +4099,7 @@ VOID StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash) // Epilog EmitMethodStubEpilog(numStackBytes, MulticastFrame::GetOffsetOfTransitionBlock()); } -#endif // defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL) +#endif // defined(TARGET_X86) && !defined(FEATURE_MULTICASTSTUB_AS_IL) #endif // !CROSSGEN_COMPILE && !FEATURE_STUBS_AS_IL @@ -4231,7 +4231,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) // total (accumulates unscaled offset) edi r10 // factor (accumulates the slice factor) esi r11 X86Reg kArrayRefReg = THIS_kREG; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 const X86Reg kArrayMTReg = kR10; const X86Reg kTotalReg = kR10; const X86Reg kFactorReg = kR11; @@ -4241,7 +4241,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) const X86Reg kFactorReg = kESI; #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Simplifying assumption for fNeedPrologue. _ASSERTE(!pArrayOpScript->m_gcDesc || (pArrayOpScript->m_flags & ArrayOpScript::NEEDSWRITEBARRIER)); // Simplifying assumption for saving rsi and rdi. @@ -4313,7 +4313,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) BOOL fSavedESI = FALSE; BOOL fSavedEDI = FALSE; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (fNeedPrologue) { // Save argument registers if we'll be making a call before using @@ -4400,7 +4400,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) // If that fails we will fall back to calling the slow helper ( ArrayStoreCheck ) that erects a frame. // See also JitInterfaceX86::JIT_Stelem_Ref -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // RCX contains pointer to object to check (Object*) // RDX contains array type handle @@ -4441,7 +4441,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) CodeLabel * Cleanup = NewCodeLabel(); X86EmitCondJump(Cleanup, X86CondCode::kJZ); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // get address of value to store // lea rcx, [rsp+offs] X86EmitEspOffset(0x8d, kRCX, ofsadjust + pArrayOpScript->m_fValLoc); @@ -4466,7 +4466,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) X86EmitCall(NewExternalCodeLabel((LPVOID)ArrayStoreCheck), 0); EmitLabel(Cleanup); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 X86EmitEspOffset(0x8b, kRCX, 0x00 + ofsadjust + TransitionBlock::GetOffsetOfArgumentRegisters()); X86EmitEspOffset(0x8b, kRDX, 0x08 + ofsadjust + TransitionBlock::GetOffsetOfArgumentRegisters()); X86EmitEspOffset(0x8b, kR8, 0x10 + ofsadjust + TransitionBlock::GetOffsetOfArgumentRegisters()); @@ -4756,14 +4756,14 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) case 4: if (pArrayOpScript->m_flags & ArrayOpScript::ISFPUTYPE) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // movss xmm0, dword ptr ELEMADDR Emit8(0xf3); X86EmitOp(0x100f, (X86Reg)0, elemBaseReg, elemOfs, elemScaledReg, elemScale); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // fld dword ptr ELEMADDR X86EmitOp(0xd9, (X86Reg)0, elemBaseReg, elemOfs, elemScaledReg, elemScale); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } else { @@ -4775,20 +4775,20 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) case 8: if (pArrayOpScript->m_flags & ArrayOpScript::ISFPUTYPE) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // movsd xmm0, qword ptr ELEMADDR Emit8(0xf2); X86EmitOp(0x100f, (X86Reg)0, elemBaseReg, elemOfs, elemScaledReg, elemScale); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // fld qword ptr ELEMADDR X86EmitOp(0xdd, (X86Reg)0, elemBaseReg, elemOfs, elemScaledReg, elemScale); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } else { // mov eax, ELEMADDR X86EmitOp(0x8b, kEAX, elemBaseReg, elemOfs, elemScaledReg, elemScale AMD64_ARG(k64BitOp)); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // mov edx, ELEMADDR + 4 X86EmitOp(0x8b, kEDX, elemBaseReg, elemOfs + 4, elemScaledReg, elemScale); #endif @@ -4820,7 +4820,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) X86EmitOp(0x89, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale); break; case 4: -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 if (pArrayOpScript->m_flags & ArrayOpScript::NEEDSWRITEBARRIER) { // mov SCRATCH, [esp + valoffset] @@ -4834,7 +4834,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) X86EmitCall(NewExternalCodeLabel((LPVOID) &JIT_WriteBarrierEAX), 0); } else -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 if (pArrayOpScript->m_flags & ArrayOpScript::ISFPUTYPE) { if (!TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc)) @@ -4855,7 +4855,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) X86EmitOp(0x110f, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale); } else -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 { // mov SCRATCH, [esp + valoffset] kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, SCRATCH_REGISTER_X86REG, ofsadjust AMD64_ARG(k32BitOp)); @@ -4869,7 +4869,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) if (!(pArrayOpScript->m_flags & ArrayOpScript::NEEDSWRITEBARRIER)) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (pArrayOpScript->m_flags & ArrayOpScript::ISFPUTYPE) { if (!TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc)) @@ -4897,7 +4897,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) // mov ELEMADDR, SCRATCH X86EmitOp(0x89, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale, k64BitOp); } -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 _ASSERTE(TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc)); // on x86, value will never get a register: so too lazy to implement that case // mov SCRATCH, [esp + valoffset] X86EmitEspOffset(0x8b, SCRATCH_REGISTER_X86REG, pArrayOpScript->m_fValLoc + ofsadjust); @@ -4909,10 +4909,10 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) X86EmitEspOffset(0x8b, SCRATCH_REGISTER_X86REG, pArrayOpScript->m_fValLoc + ofsadjust + 4); // mov ELEMADDR+4, SCRATCH X86EmitOp(0x89, SCRATCH_REGISTER_X86REG, elemBaseReg, elemOfs+4, elemScaledReg, elemScale); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 break; } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 else { _ASSERTE(SCRATCH_REGISTER_X86REG == kEAX); // value to store is already in EAX where we want it. @@ -4929,22 +4929,22 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) X86EmitCall(NewExternalCodeLabel((PVOID)JIT_WriteBarrier), 0); break; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // FALL THROUGH (on x86) default: // Ensure that these registers have been saved! _ASSERTE(fSavedESI && fSavedEDI); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // mov rsi, [rsp + valoffset] kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, kRSI, ofsadjust); if (kRSI != kValueReg) X86EmitR2ROp(0x8b, kRSI, kValueReg); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 _ASSERTE(TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc)); // lea esi, [esp + valoffset] X86EmitEspOffset(0x8d, kESI, pArrayOpScript->m_fValLoc + ofsadjust); -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 // lea edi, ELEMADDR X86EmitOp(0x8d, kEDI, elemBaseReg, elemOfs, elemScaledReg, elemScale AMD64_ARG(k64BitOp)); @@ -4958,7 +4958,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) EmitLabel(Epilog); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (fNeedPrologue) { if (fNeedScratchArea) @@ -4975,7 +4975,7 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) } X86EmitReturn(0); -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 // Restore the callee-saved registers X86EmitPopReg(kFactorReg); X86EmitPopReg(kTotalReg); @@ -4986,14 +4986,14 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) #else X86EmitReturn(0); #endif -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 // Exception points must clean up the stack for all those extra args. // kFactorReg and kTotalReg will be popped by the jump targets. void *pvExceptionThrowFn; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) #define ARRAYOP_EXCEPTION_HELPERS(base) { (PVOID)base, (PVOID)base##_RSIRDI, (PVOID)base##_ScratchArea, (PVOID)base##_RSIRDI_ScratchArea } static void *rgNullExceptionHelpers[] = ARRAYOP_EXCEPTION_HELPERS(ArrayOpStubNullException); static void *rgRangeExceptionHelpers[] = ARRAYOP_EXCEPTION_HELPERS(ArrayOpStubRangeException); @@ -5001,40 +5001,40 @@ VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript) #undef ARRAYOP_EXCEPTION_HELPERS UINT iExceptionHelper = (fNeedRSIRDI ? 1 : 0) + (fNeedScratchArea ? 2 : 0); -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) EmitLabel(Inner_nullexception); -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 pvExceptionThrowFn = (LPVOID)ArrayOpStubNullException; Emit8(0xb8); // mov EAX, Emit32(pArrayOpScript->m_cbretpop); -#else //_TARGET_AMD64_ +#else //TARGET_AMD64 pvExceptionThrowFn = rgNullExceptionHelpers[iExceptionHelper]; -#endif //!_TARGET_AMD64_ +#endif //!TARGET_AMD64 X86EmitNearJump(NewExternalCodeLabel(pvExceptionThrowFn)); EmitLabel(Inner_rangeexception); -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 pvExceptionThrowFn = (LPVOID)ArrayOpStubRangeException; Emit8(0xb8); // mov EAX, Emit32(pArrayOpScript->m_cbretpop); -#else //_TARGET_AMD64_ +#else //TARGET_AMD64 pvExceptionThrowFn = rgRangeExceptionHelpers[iExceptionHelper]; -#endif //!_TARGET_AMD64_ +#endif //!TARGET_AMD64 X86EmitNearJump(NewExternalCodeLabel(pvExceptionThrowFn)); if (Inner_typeMismatchexception != NULL) { EmitLabel(Inner_typeMismatchexception); -#ifndef _TARGET_AMD64_ +#ifndef TARGET_AMD64 pvExceptionThrowFn = (LPVOID)ArrayOpStubTypeMismatchException; Emit8(0xb8); // mov EAX, Emit32(pArrayOpScript->m_cbretpop); -#else //_TARGET_AMD64_ +#else //TARGET_AMD64 pvExceptionThrowFn = rgTypeMismatchExceptionHelpers[iExceptionHelper]; -#endif //!_TARGET_AMD64_ +#endif //!TARGET_AMD64 X86EmitNearJump(NewExternalCodeLabel(pvExceptionThrowFn)); } } @@ -5055,7 +5055,7 @@ VOID StubLinkerCPU::EmitDebugBreak() Emit8(0xCC); } -#if defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_) +#if defined(FEATURE_COMINTEROP) && defined(TARGET_X86) #ifdef _MSC_VER #pragma warning(push) @@ -5116,14 +5116,14 @@ Thread* __stdcall CreateThreadBlockReturnHr(ComMethodFrame *pFrame) #pragma warning(pop) #endif -#endif // FEATURE_COMINTEROP && _TARGET_X86_ +#endif // FEATURE_COMINTEROP && TARGET_X86 #endif // !CROSSGEN_COMPILE && !FEATURE_STUBS_AS_IL #endif // !DACCESS_COMPILE -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // // TailCallFrame Object Scanning @@ -5179,7 +5179,7 @@ void TailCallFrame::GcScanRoots(promote_func *fn, ScanContext* sc) offset &= 0x7FFFFFFC; -#ifdef BIT64 +#ifdef HOST_64BIT offset <<= 1; #endif offset += sizeof(void*); @@ -5239,7 +5239,7 @@ static void EncodeOneGCOffset(CPUSTUBLINKER *pSl, ULONG delta, BOOL maybeInterio // we use the 1 bit to denote a range _ASSERTE((delta % sizeof(void*)) == 0); -#if defined(BIT64) +#if defined(HOST_64BIT) // For 64-bit, we have 3 bits of alignment, so we allow larger frames // by shifting and gaining a free high-bit. ULONG encodedDelta = delta >> 1; @@ -5839,7 +5839,7 @@ Stub * StubLinkerCPU::CreateTailCallCopyArgsThunk(CORINFO_SIG_INFO * pSig, } #endif // DACCESS_COMPILE -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #ifdef HAS_FIXUP_PRECODE diff --git a/src/coreclr/src/vm/i386/stublinkerx86.h b/src/coreclr/src/vm/i386/stublinkerx86.h index 6b09be8fe41de..41c5c473b0991 100644 --- a/src/coreclr/src/vm/i386/stublinkerx86.h +++ b/src/coreclr/src/vm/i386/stublinkerx86.h @@ -44,7 +44,7 @@ extern PCODE GetPreStubEntryPoint(); #define X86_INSTR_MOVUPS_RM_R 0x110F // movups xmm1/mem128, xmm2 #define X86_INSTR_XORPS 0x570F // xorps xmm1, xmm2/mem128 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 #define X86_INSTR_MOV_R10_IMM64 0xBA49 // mov r10, imm64 #endif @@ -63,9 +63,9 @@ enum X86Reg kESI = 6, kEDI = 7, -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 NumX86Regs = 8, -#endif // _TARGET_X86_ +#endif // TARGET_X86 kXMM0 = 0, kXMM1 = 1, @@ -73,7 +73,7 @@ enum X86Reg kXMM3 = 3, kXMM4 = 4, kXMM5 = 5, -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) kXMM6 = 6, kXMM7 = 7, kXMM8 = 8, @@ -103,7 +103,7 @@ enum X86Reg kR15 = 15, NumX86Regs = 16, -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // We use "push ecx" instead of "sub esp, sizeof(LPVOID)" kDummyPushReg = kECX @@ -162,7 +162,7 @@ class StubLinkerCPU : public StubLinker { public: -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 enum X86OperandSize { k32BitOp, @@ -190,7 +190,7 @@ class StubLinkerCPU : public StubLinker VOID X86EmitCmpRegImm32(X86Reg reg, INT32 imm32); // cmp reg, imm32 VOID X86EmitCmpRegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32); // cmp [reg+offs], imm32 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 VOID X64EmitCmp32RegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32); // cmp dword ptr [reg+offs], imm32 VOID X64EmitMovXmmXmm(X86Reg destXmmreg, X86Reg srcXmmReg); @@ -220,7 +220,7 @@ class StubLinkerCPU : public StubLinker VOID X86EmitCondJump(CodeLabel *pTarget, X86CondCode::cc condcode); VOID X86EmitCall(CodeLabel *target, int iArgBytes); VOID X86EmitReturn(WORD wArgBytes); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 VOID X86EmitLeaRIP(CodeLabel *target, X86Reg reg); #endif @@ -228,18 +228,18 @@ class StubLinkerCPU : public StubLinker VOID X86EmitIndexRegLoad(X86Reg dstreg, X86Reg srcreg, __int32 ofs = 0); VOID X86EmitIndexRegStore(X86Reg dstreg, __int32 ofs, X86Reg srcreg); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) VOID X86EmitIndexRegStoreRSP(__int32 ofs, X86Reg srcreg); VOID X86EmitIndexRegStoreR12(__int32 ofs, X86Reg srcreg); -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) VOID X86EmitIndexPush(X86Reg srcreg, __int32 ofs); VOID X86EmitBaseIndexPush(X86Reg baseReg, X86Reg indexReg, __int32 scale, __int32 ofs); VOID X86EmitIndexPop(X86Reg srcreg, __int32 ofs); VOID X86EmitIndexLea(X86Reg dstreg, X86Reg srcreg, __int32 ofs); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) VOID X86EmitIndexLeaRSP(X86Reg dstreg, X86Reg srcreg, __int32 ofs); -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) VOID X86EmitSPIndexPush(__int32 ofs); VOID X86EmitSubEsp(INT32 imm32); @@ -251,14 +251,14 @@ class StubLinkerCPU : public StubLinker ); VOID X86EmitPushEBPframe(); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #if defined(PROFILING_SUPPORTED) && !defined(FEATURE_STUBS_AS_IL) // These are used to emit calls to notify the profiler of transitions in and out of // managed code through COM->COM+ interop or N/Direct VOID EmitProfilerComCallProlog(TADDR pFrameVptr, X86Reg regFrame); VOID EmitProfilerComCallEpilog(TADDR pFrameVptr, X86Reg regFrame); #endif // PROFILING_SUPPORTED && !FEATURE_STUBS_AS_IL -#endif // _TARGET_X86_ +#endif // TARGET_X86 @@ -291,7 +291,7 @@ class StubLinkerCPU : public StubLinker AMD64_ARG(X86OperandSize OperandSize = k32BitOp) ); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 FORCEINLINE VOID X86EmitOp(WORD opcode, X86Reg altreg, @@ -302,7 +302,7 @@ class StubLinkerCPU : public StubLinker { X86EmitOp(opcode, altreg, basereg, ofs, (X86Reg)0, 0, OperandSize); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // Emits // @@ -336,7 +336,7 @@ class StubLinkerCPU : public StubLinker VOID X86_64BitOperands () { WRAPPER_NO_CONTRACT; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 Emit8(0x48); #endif } @@ -357,20 +357,20 @@ class StubLinkerCPU : public StubLinker VOID EmitCheckGSCookie(X86Reg frameReg, int gsCookieOffset); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 void EmitComMethodStubProlog(TADDR pFrameVptr, CodeLabel** rgRareLabels, CodeLabel** rgRejoinLabels, BOOL bShouldProfile); void EmitComMethodStubEpilog(TADDR pFrameVptr, CodeLabel** rgRareLabels, CodeLabel** rgRejoinLabels, BOOL bShouldProfile); -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // !FEATURE_STUBS_AS_IL -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 VOID EmitUnboxMethodStub(MethodDesc* pRealMD); -#endif // _TARGET_X86_ +#endif // TARGET_X86 VOID EmitTailJumpToMethod(MethodDesc *pMD); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 VOID EmitLoadMethodAddressIntoAX(MethodDesc *pMD); #endif @@ -379,7 +379,7 @@ class StubLinkerCPU : public StubLinker #endif // FEATURE_SHARE_GENERIC_CODE VOID EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg); -#if defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_) +#if defined(FEATURE_COMINTEROP) && defined(TARGET_X86) //======================================================================== // shared Epilog for stubs that enter managed code from COM // uses a return thunk within the method desc @@ -388,24 +388,24 @@ class StubLinkerCPU : public StubLinker CodeLabel** rgRejoinLabels, unsigned offsetReturnThunk, BOOL bShouldProfile); -#endif // FEATURE_COMINTEROP && _TARGET_X86_ +#endif // FEATURE_COMINTEROP && TARGET_X86 #ifndef FEATURE_STUBS_AS_IL //=========================================================================== // Computes hash code for MulticastDelegate.Invoke() static UINT_PTR HashMulticastInvoke(MetaSig* pSig); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //=========================================================================== // Emits code for Delegate.Invoke() any delegate type VOID EmitDelegateInvoke(); -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_MULTICASTSTUB_AS_IL) //=========================================================================== // Emits code for MulticastDelegate.Invoke() - sig specific VOID EmitMulticastInvoke(UINT_PTR hash); -#endif // defined(_TARGET_X86_) && !defined(FEATURE_MULTICASTSTUB_AS_IL) +#endif // defined(TARGET_X86) && !defined(FEATURE_MULTICASTSTUB_AS_IL) #endif // !FEATURE_STUBS_AS_IL //=========================================================================== @@ -428,7 +428,7 @@ class StubLinkerCPU : public StubLinker VOID EmitDebugBreak(); #endif // !FEATURE_STUBS_AS_IL -#if defined(_DEBUG) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && !defined(TARGET_UNIX) //=========================================================================== // Emits code to log JITHelper access void EmitJITHelperLoggingThunk(PCODE pJitHelper, LPVOID helperFuncCount); @@ -443,13 +443,13 @@ class StubLinkerCPU : public StubLinker virtual VOID EmitUnwindInfoCheckSubfunction(); #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 static Stub * CreateTailCallCopyArgsThunk(CORINFO_SIG_INFO * pSig, MethodDesc* pMD, CorInfoHelperTailCallSpecialHandling flags); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 private: VOID X86EmitSubEspWorker(INT32 imm32); @@ -477,7 +477,7 @@ BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, TADDR expected, EXTERN_C VOID STDCALL PrecodeFixupThunk(); -#ifdef BIT64 +#ifdef HOST_64BIT #define OFFSETOF_PRECODE_TYPE 0 #define OFFSETOF_PRECODE_TYPE_CALL_OR_JMP 5 @@ -495,7 +495,7 @@ EXTERN_C VOID STDCALL PrecodeRemotingThunk(); #define SIZEOF_PRECODE_BASE 8 -#endif // BIT64 +#endif // HOST_64BIT #include @@ -510,7 +510,7 @@ struct InvalidPrecode { // Regular precode struct StubPrecode { -#ifdef BIT64 +#ifdef HOST_64BIT static const BYTE Type = 0x40; // mov r10,pMethodDesc // inc eax @@ -520,7 +520,7 @@ struct StubPrecode { // mov eax,pMethodDesc // mov ebp,ebp // jmp Stub -#endif // BIT64 +#endif // HOST_64BIT IN_TARGET_64BIT(USHORT m_movR10;) IN_TARGET_32BIT(BYTE m_movEAX;) @@ -583,7 +583,7 @@ typedef DPTR(StubPrecode) PTR_StubPrecode; // (This is fake precode. VTable slot does not point to it.) struct NDirectImportPrecode : StubPrecode { -#ifdef BIT64 +#ifdef HOST_64BIT static const int Type = 0x48; // mov r10,pMethodDesc // dec eax @@ -593,7 +593,7 @@ struct NDirectImportPrecode : StubPrecode { // mov eax,pMethodDesc // mov eax,eax // jmp NDirectImportThunk -#endif // BIT64 +#endif // HOST_64BIT void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator); @@ -708,11 +708,11 @@ typedef DPTR(FixupPrecode) PTR_FixupPrecode; // Precode to stuffle this and retbuf for closed delegates over static methods with return buffer struct ThisPtrRetBufPrecode { -#ifdef BIT64 +#ifdef HOST_64BIT static const int Type = 0x90; #else static const int Type = 0xC2; -#endif // BIT64 +#endif // HOST_64BIT // mov regScratch,regArg0 // mov regArg0,regArg1 diff --git a/src/coreclr/src/vm/ilmarshalers.cpp b/src/coreclr/src/vm/ilmarshalers.cpp index c2d10283dae56..a9257e048bf70 100644 --- a/src/coreclr/src/vm/ilmarshalers.cpp +++ b/src/coreclr/src/vm/ilmarshalers.cpp @@ -3497,7 +3497,7 @@ MarshalerOverrideStatus ILBlittableValueClassWithCopyCtorMarshaler::ArgumentOver pslIL->EmitLDARG(argidx); pslIL->EmitCALL(pslIL->GetToken(pargs->mm.m_pDtor), 1, 0); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 pslIL->SetStubTargetArgType(&locDesc); // native type is the value type pslILDispatch->EmitLDLOC(dwNewValueTypeLocal); // we load the local directly #else diff --git a/src/coreclr/src/vm/ilmarshalers.h b/src/coreclr/src/vm/ilmarshalers.h index 556391d467048..ed824987f0d81 100644 --- a/src/coreclr/src/vm/ilmarshalers.h +++ b/src/coreclr/src/vm/ilmarshalers.h @@ -716,7 +716,7 @@ class ILMarshaler nativeSize = wNativeSize; } -#if defined(PLATFORM_WINDOWS) +#if defined(TARGET_WINDOWS) // JIT32 and JIT64 (which is only used on the Windows Desktop CLR) has a problem generating // code for the pinvoke ILStubs which do a return using a struct type. Therefore, we // change the signature of calli to return void and make the return buffer as first argument. @@ -727,7 +727,7 @@ class ILMarshaler // and use byrefNativeReturn for all other structs. if (nativeMethodIsMemberFunction) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM byrefNativeReturn = !nativeType.InternalToken.GetMethodTable()->IsNativeHFA(); #else byrefNativeReturn = true; @@ -735,7 +735,7 @@ class ILMarshaler } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 switch (nativeSize) { case 1: typ = ELEMENT_TYPE_U1; break; @@ -744,9 +744,9 @@ class ILMarshaler case 8: typ = ELEMENT_TYPE_U8; break; default: byrefNativeReturn = true; break; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } -#endif // defined(PLATFORM_WINDOWS) +#endif // defined(TARGET_WINDOWS) // for UNIX_X86_ABI, we always need a return buffer argument for any size of structs. #ifdef UNIX_X86_ABI @@ -1841,11 +1841,11 @@ class ILCopyMarshalerSimple : public ILCopyMarshalerBase // return (ELEMENT_TYPE == -#ifdef BIT64 +#ifdef HOST_64BIT ELEMENT_TYPE_I8 -#else // BIT64 +#else // HOST_64BIT ELEMENT_TYPE_I4 -#endif // BIT64 +#endif // HOST_64BIT ) && (NULL != m_pargs->m_pMT); } @@ -1853,7 +1853,7 @@ class ILCopyMarshalerSimple : public ILCopyMarshalerBase { WRAPPER_NO_CONTRACT; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // If the argument is passed by value, if (!IsByref(m_dwMarshalFlags) && !IsRetval(m_dwMarshalFlags) && !IsFieldMarshal(m_dwMarshalFlags)) { @@ -1869,7 +1869,7 @@ class ILCopyMarshalerSimple : public ILCopyMarshalerBase } } } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 return false; } diff --git a/src/coreclr/src/vm/interoputil.cpp b/src/coreclr/src/vm/interoputil.cpp index e45c90af5c055..0ec23b624aede 100644 --- a/src/coreclr/src/vm/interoputil.cpp +++ b/src/coreclr/src/vm/interoputil.cpp @@ -2094,11 +2094,11 @@ HRESULT LoadRegTypeLib(_In_ REFGUID guid, hr = QueryPathOfRegTypeLib(guid, wVerMajor, wVerMinor, LOCALE_USER_DEFAULT, &wzPath); if (SUCCEEDED(hr)) { -#ifdef BIT64 +#ifdef HOST_64BIT REGKIND rk = (REGKIND)(REGKIND_NONE | LOAD_TLB_AS_64BIT); #else REGKIND rk = (REGKIND)(REGKIND_NONE | LOAD_TLB_AS_32BIT); -#endif // BIT64 +#endif // HOST_64BIT hr = LoadTypeLibEx(wzPath, rk, pptlib); } } @@ -4944,7 +4944,7 @@ void InitializeComInterop() InitializeSListHead(&RCW::s_RCWStandbyList); ComCall::Init(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 ComPlusCall::Init(); #endif #ifndef CROSSGEN_COMPILE diff --git a/src/coreclr/src/vm/interoputil.h b/src/coreclr/src/vm/interoputil.h index 3a4fab136b569..5072cdbf6c544 100644 --- a/src/coreclr/src/vm/interoputil.h +++ b/src/coreclr/src/vm/interoputil.h @@ -45,7 +45,7 @@ enum DefaultInterfaceType struct SYSTEMCOLOR { -#ifdef BIT64 +#ifdef HOST_64BIT STRINGREF name; INT64 value; #else diff --git a/src/coreclr/src/vm/interpreter.cpp b/src/coreclr/src/vm/interpreter.cpp index 0f236f65335d8..662e57354f97c 100644 --- a/src/coreclr/src/vm/interpreter.cpp +++ b/src/coreclr/src/vm/interpreter.cpp @@ -91,7 +91,7 @@ InterpreterMethodInfo::InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* hasRetBuff = false; } #endif -#if defined(_ARM_) || defined(_AMD64_)|| defined(_ARM64_) +#if defined(HOST_ARM) || defined(HOST_AMD64)|| defined(HOST_ARM64) // ...or it fits into one register. if (hasRetBuff && getClassSize(methInfo->args.retTypeClass) <= sizeof(void*)) { @@ -286,26 +286,26 @@ void InterpreterMethodInfo::InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* meth // If there is a return buffer, it will appear next in the arguments list for a direct call. // Reserve its offset now, for use after the explicit arguments. -#if defined(_ARM_) +#if defined(HOST_ARM) // On ARM, for direct calls we always treat HFA return types as having ret buffs. // So figure out if we have an HFA return type. bool hasHFARetType = methInfo->args.retType == CORINFO_TYPE_VALUECLASS && CorInfoTypeIsFloatingPoint(comp->getHFAType(methInfo->args.retTypeClass)) && methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG; -#endif // defined(_ARM_) +#endif // defined(HOST_ARM) if (GetFlag() -#if defined(_ARM_) +#if defined(HOST_ARM) // On ARM, for direct calls we always treat HFA return types as having ret buffs. || hasHFARetType -#endif // defined(_ARM_) +#endif // defined(HOST_ARM) ) { directRetBuffOffset = reinterpret_cast(ArgSlotEndianessFixup(directOffset, sizeof(void*))); directOffset++; } -#if defined(_AMD64_) +#if defined(HOST_AMD64) if (GetFlag()) { directVarArgOffset = reinterpret_cast(ArgSlotEndianessFixup(directOffset, sizeof(void*))); @@ -467,12 +467,12 @@ bool InterpreterMethodInfo::GetPinningBit(unsigned locNum) void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noReg, bool twoSlotAlign) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) assert(!noReg); assert(!twoSlotAlign); AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/false); -#else // !_AMD64_ -#if defined(_X86_) || defined(_ARM64_) +#else // !HOST_AMD64 +#if defined(HOST_X86) || defined(HOST_ARM64) assert(!twoSlotAlign); // Shouldn't use this flag on x86 (it wouldn't work right in the stack, at least). #endif // If the argument requires two-slot alignment, make sure we have it. This is the @@ -495,7 +495,7 @@ void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noR } } -#if defined(_ARM64_) +#if defined(HOST_ARM64) // On ARM64 we're not going to place an argument 'partially' on the stack // if all slots fits into registers, they go into registers, otherwise they go into stack. if (!noReg && numRegArgs+numSlots <= NumberOfIntegerRegArgs()) @@ -514,12 +514,12 @@ void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noR } else { -#if defined(_X86_) +#if defined(HOST_X86) // On X86, stack args are pushed in order. We will add the total size of the arguments to this offset, // so we set this to a negative number relative to the SP before the first arg push. callerArgStackSlots += numSlots; ClrSafeInt offset(-callerArgStackSlots); -#elif defined(_ARM_) || defined(_ARM64_) +#elif defined(HOST_ARM) || defined(HOST_ARM64) // On ARM, args are pushed in *reverse* order. So we will create an offset relative to the address // of the first stack arg; later, we will add the size of the non-stack arguments. ClrSafeInt offset(callerArgStackSlots); @@ -527,14 +527,14 @@ void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noR offset *= static_cast(sizeof(void*)); assert(!offset.IsOverflow()); argOffsets[canonIndex] = offset.Value(); -#if defined(_ARM_) || defined(_ARM64_) +#if defined(HOST_ARM) || defined(HOST_ARM64) callerArgStackSlots += numSlots; #endif } -#endif // !_AMD64_ +#endif // !HOST_AMD64 } -#if defined(_AMD64_) +#if defined(HOST_AMD64) // AMD64 calling convention allows any type that can be contained in 64 bits to be passed in registers, // if not contained or they are of a size not a power of 2, then they are passed by reference on the stack. // RCX, RDX, R8, R9 are the int arg registers. XMM0-3 overlap with the integer registers and are used @@ -575,13 +575,13 @@ void Interpreter::ArgState::AddArgAmd64(unsigned canonIndex, unsigned short numS void Interpreter::ArgState::AddFPArg(unsigned canonIndex, unsigned short numSlots, bool twoSlotAlign) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) assert(!twoSlotAlign); assert(numSlots == 1); AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/ true); -#elif defined(_X86_) +#elif defined(HOST_X86) assert(false); // Don't call this on x86; we pass all FP on the stack. -#elif defined(_ARM_) +#elif defined(HOST_ARM) // We require "numSlots" alignment. assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots); argIsReg[canonIndex] = ARS_FloatReg; @@ -630,7 +630,7 @@ void Interpreter::ArgState::AddFPArg(unsigned canonIndex, unsigned short numSlot numFPRegArgSlots += numSlots; } } -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots); assert(!twoSlotAlign); @@ -796,7 +796,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, // // So the structure of the code will look like this (in the non-ILstub case): // -#if defined(_X86_) || defined(_AMD64_) +#if defined(HOST_X86) || defined(HOST_AMD64) // push ebp // mov ebp, esp // [if there are register arguments in ecx or edx, push them] @@ -806,7 +806,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, // [if we pushed register arguments, increment esp by the right amount.] // pop ebp // ret ; where is the number of argument stack slots in the call to the stub. -#elif defined (_ARM_) +#elif defined (HOST_ARM) // TODO. #endif @@ -825,18 +825,18 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, if (!jmpCall) { sl.Init(); -#if defined(_X86_) || defined(_AMD64_) -#if defined(_X86_) +#if defined(HOST_X86) || defined(HOST_AMD64) +#if defined(HOST_X86) sl.X86EmitPushReg(kEBP); sl.X86EmitMovRegReg(kEBP, static_cast(kESP_Unsafe)); #endif -#elif defined(_ARM_) +#elif defined(HOST_ARM) // On ARM we use R12 as a "scratch" register -- callee-trashed, not used // for arguments. ThumbReg r11 = ThumbReg(11); ThumbReg r12 = ThumbReg(12); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) // x8 through x15 are scratch registers on ARM64. IntReg x8 = IntReg(8); IntReg x9 = IntReg(9); @@ -924,7 +924,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, unsigned physArgIndex = 0; -#if defined(_ARM_) +#if defined(HOST_ARM) // The stub linker has a weird little limitation: all stubs it's used // for on ARM push some callee-saved register, so the unwind info // code was written assuming at least one would be pushed. I don't know how to @@ -938,18 +938,18 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, #endif // The "1" here is for the return address. const int NumberOfFixedPushes = 1 + NumberOfCalleeSaveRegsToPush; -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) // FP, LR const int NumberOfFixedPushes = 2; #endif #if defined(FEATURE_HFA) -#if defined(_ARM_) || defined(_ARM64_) +#if defined(HOST_ARM) || defined(HOST_ARM64) // On ARM, a non-retBuffArg method that returns a struct type might be an HFA return. Figure // that out. unsigned HFARetTypeSize = 0; #endif -#if defined(_ARM64_) +#if defined(HOST_ARM64) unsigned cHFAVars = 0; #endif if (info->args.retType == CORINFO_TYPE_VALUECLASS @@ -957,10 +957,10 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, && info->args.getCallConv() != CORINFO_CALLCONV_VARARG) { HFARetTypeSize = getClassSize(info->args.retTypeClass); -#if defined(_ARM_) +#if defined(HOST_ARM) // Round up to a double boundary; HFARetTypeSize = ((HFARetTypeSize+ sizeof(double) - 1) / sizeof(double)) * sizeof(double); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) // We don't need to round it up to double. Unlike ARM, whether it's a float or a double each field will // occupy one slot. We'll handle the stack alignment in the prolog where we have all the information about // what is going to be pushed on the stack. @@ -1003,7 +1003,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, argState.AddArg(vaSigCookieIndex); } -#if defined(_ARM_) || defined(_AMD64_) || defined(_ARM64_) +#if defined(HOST_ARM) || defined(HOST_AMD64) || defined(HOST_ARM64) // Generics context comes before args on ARM. Would be better if I factored this out as a call, // to avoid large swatches of duplicate code. if (hasGenericsContextArg) @@ -1011,7 +1011,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++; argState.AddArg(genericsContextArgIndex); } -#endif // _ARM_ || _AMD64_ || _ARM64_ +#endif // HOST_ARM || HOST_AMD64 || HOST_ARM64 CORINFO_ARG_LIST_HANDLE argPtr = info->args.args; // Some arguments are have been passed in registers, some in memory. We must generate code that @@ -1062,13 +1062,13 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, // Two integer slot arguments. case CORINFO_TYPE_LONG: case CORINFO_TYPE_ULONG: -#if defined(_X86_) +#if defined(HOST_X86) // Longs are always passed on the stack -- with no obvious alignment. argState.AddArg(k, 2, /*noReg*/true); -#elif defined(_ARM_) +#elif defined(HOST_ARM) // LONGS have 2-reg alignment; inc reg if necessary. argState.AddArg(k, 2, /*noReg*/false, /*twoSlotAlign*/true); -#elif defined(_AMD64_) || defined(_ARM64_) +#elif defined(HOST_AMD64) || defined(HOST_ARM64) argState.AddArg(k); #else #error unknown platform @@ -1077,11 +1077,11 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, // One float slot args: case CORINFO_TYPE_FLOAT: -#if defined(_X86_) +#if defined(HOST_X86) argState.AddArg(k, 1, /*noReg*/true); -#elif defined(_ARM_) +#elif defined(HOST_ARM) argState.AddFPArg(k, 1, /*twoSlotAlign*/false); -#elif defined(_AMD64_) || defined(_ARM64_) +#elif defined(HOST_AMD64) || defined(HOST_ARM64) argState.AddFPArg(k, 1, false); #else #error unknown platform @@ -1090,11 +1090,11 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, // Two float slot args case CORINFO_TYPE_DOUBLE: -#if defined(_X86_) +#if defined(HOST_X86) argState.AddArg(k, 2, /*noReg*/true); -#elif defined(_ARM_) +#elif defined(HOST_ARM) argState.AddFPArg(k, 2, /*twoSlotAlign*/true); -#elif defined(_AMD64_) || defined(_ARM64_) +#elif defined(HOST_AMD64) || defined(HOST_ARM64) argState.AddFPArg(k, 1, false); #else #error unknown platform @@ -1107,18 +1107,18 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, { unsigned sz = getClassSize(vcTypeRet); unsigned szSlots = max(1, sz / sizeof(void*)); -#if defined(_X86_) +#if defined(HOST_X86) argState.AddArg(k, static_cast(szSlots), /*noReg*/true); -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) argState.AddArg(k, static_cast(szSlots)); -#elif defined(_ARM_) || defined(_ARM64_) +#elif defined(HOST_ARM) || defined(HOST_ARM64) CorInfoType hfaType = comp->getHFAType(vcTypeRet); if (CorInfoTypeIsFloatingPoint(hfaType)) { argState.AddFPArg(k, szSlots, -#if defined(_ARM_) +#if defined(HOST_ARM) /*twoSlotAlign*/ (hfaType == CORINFO_TYPE_DOUBLE) -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) /*twoSlotAlign*/ false // unlike ARM32 FP args always consume 1 slot on ARM64 #endif ); @@ -1127,9 +1127,9 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, { unsigned align = comp->getClassAlignmentRequirement(vcTypeRet, FALSE); argState.AddArg(k, static_cast(szSlots), /*noReg*/false, -#if defined(_ARM_) +#if defined(HOST_ARM) /*twoSlotAlign*/ (align == 8) -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) /*twoSlotAlign*/ false #endif ); @@ -1147,8 +1147,8 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, argPtr = comp->getArgNext(argPtr); } -#if defined(_X86_) - // Generics context comes last on _X86_. Would be better if I factored this out as a call, +#if defined(HOST_X86) + // Generics context comes last on HOST_X86. Would be better if I factored this out as a call, // to avoid large swatches of duplicate code. if (hasGenericsContextArg) { @@ -1162,7 +1162,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, unsigned short stackArgBaseOffset = (argState.numRegArgs + 2 + argState.callerArgStackSlots) * sizeof(void*); unsigned intRegArgBaseOffset = 0; -#elif defined(_ARM_) +#elif defined(HOST_ARM) // We're choosing to always push all arg regs on ARM -- this is the only option // that ThumbEmitProlog currently gives. @@ -1176,20 +1176,20 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, unsigned fpStackSlots = ((argState.numFPRegArgSlots + 1) / 2) * 2; unsigned intRegArgBaseOffset = (fpStackSlots + NumberOfFixedPushes) * sizeof(void*); unsigned short stackArgBaseOffset = intRegArgBaseOffset + (argState.numRegArgs) * sizeof(void*); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) // See StubLinkerCPU::EmitProlog for the layout of the stack unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*); unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*)); -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) unsigned short stackArgBaseOffset = (argState.numRegArgs) * sizeof(void*); #else #error unsupported platform #endif -#if defined(_ARM_) +#if defined(HOST_ARM) WORD regArgMask = 0; -#endif // defined(_ARM_) +#endif // defined(HOST_ARM) // argPerm maps from an index into the argOffsets/argIsReg arrays to // the order that the arguments are passed. unsigned* argPermInverse = new unsigned[totalArgs]; @@ -1211,7 +1211,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, { regArgsFound++; // If any int reg args are used on ARM, we push them all (in ThumbEmitProlog) -#if defined(_X86_) +#if defined(HOST_X86) if (regArgsFound == 1) { if (!jmpCall) { sl.X86EmitPushReg(kECX); } @@ -1223,9 +1223,9 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, if (!jmpCall) { sl.X86EmitPushReg(kEDX); } argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*); } -#elif defined(_ARM_) || defined(_ARM64_) +#elif defined(HOST_ARM) || defined(HOST_ARM64) argState.argOffsets[k] += intRegArgBaseOffset; -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) // First home the register arguments in the stack space allocated by the caller. // Refer to Stack Allocation on x64 [http://msdn.microsoft.com/en-US/library/ew5tede7(v=vs.80).aspx] X86Reg argRegs[] = { kECX, kEDX, kR8, kR9 }; @@ -1235,7 +1235,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, #error unsupported platform #endif } -#if defined(_AMD64_) +#if defined(HOST_AMD64) else if (argState.argIsReg[k] == ArgState::ARS_FloatReg) { // Increment regArgsFound since float/int arguments have overlapping registers. @@ -1311,7 +1311,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, break; } // The argument registers have been pushed by now, so we can use them. -#if defined(_X86_) +#if defined(HOST_X86) // First arg is pointer to the base of the ILargs arr -- i.e., the current stack value. sl.X86EmitMovRegReg(kEDX, static_cast(kESP_Unsafe)); // InterpretMethod uses F_CALL_CONV == __fastcall; pass 2 args in regs. @@ -1338,7 +1338,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, } sl.X86EmitPopReg(kEBP); sl.X86EmitReturn(static_cast(argState.callerArgStackSlots * sizeof(void*))); -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) // Pass "ilArgs", i.e. just the point where registers have been homed, as 2nd arg sl.X86EmitIndexLeaRSP(ARGUMENT_kREG2, static_cast(kESP_Unsafe), 8); @@ -1365,7 +1365,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0); sl.X86EmitAddEsp(interpMethodArgSize); sl.X86EmitReturn(0); -#elif defined(_ARM_) +#elif defined(HOST_ARM) // We have to maintain 8-byte stack alignment. So if the number of // slots we would normally push is not a multiple of two, add a random @@ -1427,7 +1427,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, sl.ThumbEmitEpilog(); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) UINT stackFrameSize = argState.numFPRegArgSlots; @@ -2541,13 +2541,13 @@ void Interpreter::ExecuteMethod(ARG_SLOT* retVal, __out bool* pDoJmpCall, __out { assert(m_curStackHt > 0); m_curStackHt--; -#if defined(_DEBUG) || defined(_AMD64_) +#if defined(_DEBUG) || defined(HOST_AMD64) CorInfoType cit = OpStackTypeGet(m_curStackHt).ToCorInfoType(); -#endif // _DEBUG || _AMD64_ +#endif // _DEBUG || HOST_AMD64 #ifdef _DEBUG assert(cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT || cit == CORINFO_TYPE_NATIVEINT); #endif // _DEBUG -#if defined(_AMD64_) +#if defined(HOST_AMD64) UINT32 val = (cit == CORINFO_TYPE_NATIVEINT) ? (INT32) OpStackGet(m_curStackHt) : OpStackGet(m_curStackHt); #else @@ -4371,7 +4371,7 @@ void Interpreter::BinaryArithOp() case CORINFO_TYPE_SHIFTED_LONG: { bool looseLong = false; -#if defined(_AMD64_) +#if defined(HOST_AMD64) looseLong = (s_InterpreterLooseRules && (t2.ToCorInfoType() == CORINFO_TYPE_NATIVEINT || t2.ToCorInfoType() == CORINFO_TYPE_BYREF)); #endif @@ -5498,7 +5498,7 @@ CORINFO_CLASS_HANDLE Interpreter::GetTypeFromToken(BYTE* codePtr, CorInfoTokenKi bool Interpreter::IsValidPointerType(CorInfoType cit) { bool isValid = (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_BYREF); -#if defined(_AMD64_) +#if defined(HOST_AMD64) isValid = isValid || (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG); #endif return isValid; @@ -5977,7 +5977,7 @@ void Interpreter::NewArr() COMPlusThrow(kOverflowException); } -#ifdef BIT64 +#ifdef HOST_64BIT // Even though ECMA allows using a native int as the argument to newarr instruction // (therefore size is INT_PTR), ArrayBase::m_NumComponents is 32-bit, so even on 64-bit // platforms we can't create an array whose size exceeds 32 bits. @@ -6136,14 +6136,14 @@ void Interpreter::MkRefany() InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo); TypedByRef* tbr; -#if defined(_AMD64_) +#if defined(HOST_AMD64) assert(typedRefIT.IsLargeStruct(&m_interpCeeInfo)); tbr = (TypedByRef*) LargeStructOperandStackPush(GetTypedRefSize(&m_interpCeeInfo)); OpStackSet(idx, tbr); -#elif defined(_X86_) || defined(_ARM_) +#elif defined(HOST_X86) || defined(HOST_ARM) assert(!typedRefIT.IsLargeStruct(&m_interpCeeInfo)); tbr = OpStackGetAddr(idx); -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) tbr = NULL; NYI_INTERP("Unimplemented code: MkRefAny"); #else @@ -6820,7 +6820,7 @@ INT32 Interpreter::CompareOpRes(unsigned op1idx) case CORINFO_TYPE_LONG: { bool looseLong = false; -#if defined(_AMD64_) +#if defined(HOST_AMD64) looseLong = s_InterpreterLooseRules && (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_BYREF); #endif if (cit2 == CORINFO_TYPE_LONG || looseLong) @@ -8289,7 +8289,7 @@ void Interpreter::InitBlk() #ifdef _DEBUG CorInfoType addrCIT = OpStackTypeGet(addrInd).ToCorInfoType(); bool addrValidType = (addrCIT == CORINFO_TYPE_NATIVEINT || addrCIT == CORINFO_TYPE_BYREF); -#if defined(_AMD64_) +#if defined(HOST_AMD64) if (s_InterpreterLooseRules && addrCIT == CORINFO_TYPE_LONG) addrValidType = true; #endif @@ -8339,7 +8339,7 @@ void Interpreter::CpBlk() #ifdef _DEBUG CorInfoType destCIT = OpStackTypeGet(destInd).ToCorInfoType(); bool destValidType = (destCIT == CORINFO_TYPE_NATIVEINT || destCIT == CORINFO_TYPE_BYREF); -#if defined(_AMD64_) +#if defined(HOST_AMD64) if (s_InterpreterLooseRules && destCIT == CORINFO_TYPE_LONG) destValidType = true; #endif @@ -8349,7 +8349,7 @@ void Interpreter::CpBlk() } CorInfoType srcCIT = OpStackTypeGet(srcInd).ToCorInfoType(); bool srcValidType = (srcCIT == CORINFO_TYPE_NATIVEINT || srcCIT == CORINFO_TYPE_BYREF); -#if defined(_AMD64_) +#if defined(HOST_AMD64) if (s_InterpreterLooseRules && srcCIT == CORINFO_TYPE_LONG) srcValidType = true; #endif @@ -9250,12 +9250,12 @@ void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_T ARG_SLOT* args; InterpreterType* argTypes; -#if defined(_X86_) +#if defined(HOST_X86) unsigned totalArgSlots = nSlots; -#elif defined(_ARM_) || defined(_ARM64_) +#elif defined(HOST_ARM) || defined(HOST_ARM64) // ARM64TODO: Verify that the following statement is correct for ARM64. unsigned totalArgSlots = nSlots + HFAReturnArgSlots; -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) unsigned totalArgSlots = nSlots; #else #error "unsupported platform" @@ -9269,11 +9269,11 @@ void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_T else { args = (ARG_SLOT*)_alloca(totalArgSlots * sizeof(ARG_SLOT)); -#if defined(_ARM_) +#if defined(HOST_ARM) // The HFA return buffer, if any, is assumed to be at a negative // offset from the IL arg pointer, so adjust that pointer upward. args = args + HFAReturnArgSlots; -#endif // defined(_ARM_) +#endif // defined(HOST_ARM) argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType)); } // Make sure that we don't scan any of these until we overwrite them with @@ -9473,7 +9473,7 @@ void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_T // This is the argument slot that will be used to hold the return value. ARG_SLOT retVal = 0; -#if !defined(_ARM_) && !defined(UNIX_AMD64_ABI) +#if !defined(HOST_ARM) && !defined(UNIX_AMD64_ABI) _ASSERTE (NUMBER_RETURNVALUE_SLOTS == 1); #endif @@ -9510,22 +9510,22 @@ void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_T // On ARM, if there's an HFA return type, we must also allocate a return buffer, since the // MDCS calling convention requires it. if (hasRetBuffArg -#if defined(_ARM_) +#if defined(HOST_ARM) || HFAReturnArgSlots > 0 -#endif // defined(_ARM_) +#endif // defined(HOST_ARM) ) { assert(retTypeClsHnd != NULL); retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd); retTypeSz = retTypeIt.Size(&m_interpCeeInfo); -#if defined(_ARM_) +#if defined(HOST_ARM) if (HFAReturnArgSlots > 0) { args[curArgSlot] = PtrToArgSlot(args - HFAReturnArgSlots); } else -#endif // defined(_ARM_) +#endif // defined(HOST_ARM) if (retTypeIt.IsLargeStruct(&m_interpCeeInfo)) { @@ -9864,7 +9864,7 @@ void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_T // We must be careful here to write the value, the type, and update the stack height in one // sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value // is protected by being fully "on" the operandStack. -#if defined(_ARM_) +#if defined(HOST_ARM) // Is the return type an HFA? if (HFAReturnArgSlots > 0) { @@ -9881,7 +9881,7 @@ void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_T } } else -#endif // defined(_ARM_) +#endif // defined(HOST_ARM) if (pLargeStructRetVal != NULL) { assert(hasRetBuffArg); @@ -10074,7 +10074,7 @@ void Interpreter::CallI() retTypeClsHnd = sigInfo.retTypeClass; retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd); retTypeSz = retTypeIt.Size(&m_interpCeeInfo); -#if defined(_AMD64_) +#if defined(HOST_AMD64) // TODO: Investigate why HasRetBuffArg can't be used. pMD is a hacked up MD for the // calli because it belongs to the current method. Doing what the JIT does. hasRetBuffArg = (retTypeSz > sizeof(void*)) || ((retTypeSz & (retTypeSz - 1)) != 0); diff --git a/src/coreclr/src/vm/interpreter.h b/src/coreclr/src/vm/interpreter.h index e508836b9d2f4..fe9c7bc18b70a 100644 --- a/src/coreclr/src/vm/interpreter.h +++ b/src/coreclr/src/vm/interpreter.h @@ -313,7 +313,7 @@ class InterpreterType bool IsLargeStruct(CEEInfo* ceeInfo) const { intptr_t asInt = reinterpret_cast(m_tp); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (asInt == CORINFO_TYPE_SHIFTED_REFANY) { return true; @@ -988,11 +988,11 @@ class Interpreter } } -#if defined(_ARM_) +#if defined(HOST_ARM) static const int MaxNumFPRegArgSlots = 16; -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) static const int MaxNumFPRegArgSlots = 8; -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) static const int MaxNumFPRegArgSlots = 4; #endif @@ -1008,7 +1008,7 @@ class Interpreter // of slots. Important that this be called in argument order. void AddFPArg(unsigned canonIndex, unsigned short numSlots, bool doubleAlign); -#if defined(_AMD64_) +#if defined(HOST_AMD64) // We have a special function for AMD64 because both integer/float registers overlap. However, all // callers are expected to call AddArg/AddFPArg directly. void AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType); @@ -1085,7 +1085,7 @@ class Interpreter { if (!m_directCall) { -#if defined(_AMD64_) +#if defined(HOST_AMD64) // In AMD64, a reference to the struct is passed if its size exceeds the word size. // Dereference the arg to get to the ref of the struct. if (GetArgType(argNum).IsLargeStruct(&m_interpCeeInfo)) @@ -2037,14 +2037,14 @@ class Interpreter #endif // INTERP_TRACING }; -#if defined(_X86_) +#if defined(HOST_X86) inline unsigned short Interpreter::NumberOfIntegerRegArgs() { return 2; } -#elif defined(_AMD64_) +#elif defined(HOST_AMD64) unsigned short Interpreter::NumberOfIntegerRegArgs() { return 4; } -#elif defined(_ARM_) +#elif defined(HOST_ARM) unsigned short Interpreter::NumberOfIntegerRegArgs() { return 4; } -#elif defined(_ARM64_) +#elif defined(HOST_ARM64) unsigned short Interpreter::NumberOfIntegerRegArgs() { return 8; } #else #error Unsupported architecture. diff --git a/src/coreclr/src/vm/jithelpers.cpp b/src/coreclr/src/vm/jithelpers.cpp index 9d06d74246057..07be69a245fd6 100644 --- a/src/coreclr/src/vm/jithelpers.cpp +++ b/src/coreclr/src/vm/jithelpers.cpp @@ -27,10 +27,10 @@ #include "corprof.h" #include "eeprofinterfaces.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Included for referencing __report_gsfailure #include "process.h" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef PROFILING_SUPPORTED #include "proftoeeinterfaceimpl.h" @@ -118,7 +118,7 @@ inline UINT64 ShiftToHi32Bits(UINT32 x) return ret.QuadPart; } -#if !defined(_TARGET_X86_) || defined(FEATURE_PAL) +#if !defined(TARGET_X86) || defined(TARGET_UNIX) /*********************************************************************/ HCIMPL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2) { @@ -133,7 +133,7 @@ HCIMPL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2) return (val1 * val2); } HCIMPLEND -#endif // !_TARGET_X86_ || FEATURE_PAL +#endif // !TARGET_X86 || TARGET_UNIX /*********************************************************************/ HCIMPL2_VV(INT64, JIT_LMulOvf, INT64 val1, INT64 val2) @@ -448,7 +448,7 @@ HCIMPL2_VV(UINT64, JIT_ULMod, UINT64 dividend, UINT64 divisor) } HCIMPLEND -#if !defined(BIT64) && !defined(_TARGET_X86_) +#if !defined(HOST_64BIT) && !defined(TARGET_X86) /*********************************************************************/ HCIMPL2_VV(UINT64, JIT_LLsh, UINT64 num, int shift) { @@ -472,7 +472,7 @@ HCIMPL2_VV(UINT64, JIT_LRsz, UINT64 num, int shift) return num >> (shift & 0x3F); } HCIMPLEND -#endif // !BIT64 && !_TARGET_X86_ +#endif // !HOST_64BIT && !TARGET_X86 #include @@ -527,7 +527,7 @@ ftype BankersRound(ftype value) if ((value -(integerPart +0.5)) == 0.0) { // round to even -#if defined(_TARGET_ARM_) && defined(FEATURE_CORESYSTEM) +#if defined(TARGET_ARM) && defined(FEATURE_CORESYSTEM) // @ARMTODO: On ARM when building on CoreSystem (where we link against the system CRT) an attempt to // use fmod(float, float) fails to link (apparently this is converted to a reference to fmodf, which // is not included in the system CRT). Use the double version instead. @@ -571,7 +571,7 @@ HCIMPLEND // Call fast Dbl2Lng conversion - used by functions below FORCEINLINE INT64 FastDbl2Lng(double val) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 FCALL_CONTRACT; return HCCALL1_V(JIT_Dbl2Lng, val); #else @@ -641,7 +641,7 @@ HCIMPL1_V(UINT64, JIT_Dbl2ULngOvf, double val) HCIMPLEND -#if !defined(_TARGET_X86_) || defined(FEATURE_PAL) +#if !defined(TARGET_X86) || defined(TARGET_UNIX) HCIMPL1_V(INT64, JIT_Dbl2Lng, double val) { @@ -742,7 +742,7 @@ HCIMPL2_VV(double, JIT_DblRem, double dividend, double divisor) } HCIMPLEND -#endif // !_TARGET_X86_ || FEATURE_PAL +#endif // !TARGET_X86 || TARGET_UNIX #include @@ -2701,7 +2701,7 @@ HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size) if (size < 0) COMPlusThrow(kOverflowException); -#ifdef BIT64 +#ifdef HOST_64BIT // Even though ECMA allows using a native int as the argument to newarr instruction // (therefore size is INT_PTR), ArrayBase::m_NumComponents is 32-bit, so even on 64-bit // platforms we can't create an array whose size exceeds 32 bits. @@ -2738,7 +2738,7 @@ OBJECTREF allocNewMDArr(TypeHandle typeHnd, unsigned dwNumArgs, va_list args) INT32* fwdArgList; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 fwdArgList = (INT32*)args; // reverse the order @@ -2896,7 +2896,7 @@ HCIMPL3(void, JIT_Stelem_Ref_Portable, PtrArray* array, unsigned idx, Object *va } } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 SetObjectReference((OBJECTREF*)&array->m_Array[idx], ObjectToOBJECTREF(val)); #else // The performance gain of the optimized JIT_Stelem_Ref in @@ -4244,10 +4244,10 @@ HCIMPL1(void, IL_Throw, Object* obj) OBJECTREF oref = ObjectToOBJECTREF(obj); -#if defined(_DEBUG) && defined(_TARGET_X86_) +#if defined(_DEBUG) && defined(TARGET_X86) __helperframe.InsureInit(false, NULL); g_ExceptionEIP = (LPVOID)__helperframe.GetReturnAddress(); -#endif // defined(_DEBUG) && defined(_TARGET_X86_) +#endif // defined(_DEBUG) && defined(TARGET_X86) if (oref == 0) @@ -4604,21 +4604,21 @@ void DoJITFailFast () _ASSERTE(!"About to FailFast. set ComPlus_AssertOnFailFast=0 if this is expected"); #endif -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Use the function provided by the C runtime. // // Ideally, this function is called directly from managed code so // that the address of the managed function will be included in the // error log. However, this function is also used by the stackwalker. // To keep things simple, we just call it from here. -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) __report_gsfailure(); -#else // !defined(_TARGET_X86_) +#else // !defined(TARGET_X86) // On AMD64/IA64/ARM, we need to pass a stack cookie, which will be saved in the context record // that is used to raise the buffer-overrun exception by __report_gsfailure. __report_gsfailure((ULONG_PTR)0); -#endif // defined(_TARGET_X86_) -#else // FEATURE_PAL +#endif // defined(TARGET_X86) +#else // TARGET_UNIX if(ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_DOTNET_Context, FailFast)) { // Fire an ETW FailFast event @@ -4630,7 +4630,7 @@ void DoJITFailFast () } TerminateProcess(GetCurrentProcess(), STATUS_STACK_BUFFER_OVERRUN); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } HCIMPL0(void, JIT_FailFast) @@ -4944,7 +4944,7 @@ HCIMPLEND extern "C" FCDECL0(VOID, JIT_RareDisableHelper); -#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_) +#if defined(TARGET_ARM) || defined(TARGET_AMD64) // The JIT expects this helper to preserve the return value on AMD64 and ARM. We should eventually // switch other platforms to the same convention since it produces smaller code. extern "C" FCDECL0(VOID, JIT_RareDisableHelperWorker); @@ -5102,7 +5102,7 @@ HCIMPLEND // //======================================================================== -#ifdef BIT64 +#ifdef HOST_64BIT /**********************************************************************/ /* Fills out portions of an InlinedCallFrame for JIT64 */ @@ -5128,7 +5128,7 @@ Thread * __stdcall JIT_InitPInvokeFrame(InlinedCallFrame *pFrame, PTR_VOID StubS return pThread; } -#endif // BIT64 +#endif // HOST_64BIT EXTERN_C void JIT_PInvokeBegin(InlinedCallFrame* pFrame); EXTERN_C void JIT_PInvokeEnd(InlinedCallFrame* pFrame); @@ -5169,7 +5169,7 @@ VMHELPDEF hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_COUNT] = #include "jithelpers.h" }; -#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && (defined(TARGET_AMD64) || defined(TARGET_X86)) && !defined(TARGET_UNIX) #define HELPERCOUNTDEF(lpv) { (LPVOID)(lpv), NULL, 0 }, VMHELPCOUNTDEF hlpFuncCountTable[CORINFO_HELP_COUNT+1] = @@ -5206,9 +5206,9 @@ void InitJITHelpers2() { STANDARD_VM_CONTRACT; -#if defined(_TARGET_X86_) || defined(_TARGET_ARM_) +#if defined(TARGET_X86) || defined(TARGET_ARM) SetJitHelperFunction(CORINFO_HELP_INIT_PINVOKE_FRAME, (void *)GenerateInitPInvokeFrameHelper()->GetEntryPoint()); -#endif // _TARGET_X86_ || _TARGET_ARM_ +#endif // TARGET_X86 || TARGET_ARM ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(GetThread), ECall::InternalGetCurrentThread); @@ -5224,7 +5224,7 @@ void InitJITHelpers2() g_pJitGenericHandleCache = tempGenericHandleCache.Extract(); } -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) NOINLINE void DoCopy(CONTEXT * ctx, void * pvTempStack, size_t cbTempStack, Thread * pThread, Frame * pNewFrame) { @@ -5278,7 +5278,7 @@ void F_CALL_VA_CONV JIT_TailCall(PCODE copyArgs, PCODE target, ...) STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_MODE_COOPERATIVE -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX Thread *pThread = GetThread(); @@ -5335,10 +5335,10 @@ void F_CALL_VA_CONV JIT_TailCall(PCODE copyArgs, PCODE target, ...) BYTE rgFrameBuffer[sizeof(FrameWithCookie)]; Frame * pNewFrame = NULL; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) # define STACK_ADJUST_FOR_RETURN_ADDRESS (sizeof(void*)) # define STACK_ALIGN_MASK (0xF) -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) # define STACK_ADJUST_FOR_RETURN_ADDRESS (0) # define STACK_ALIGN_MASK (0x7) #else @@ -5475,13 +5475,13 @@ void F_CALL_VA_CONV JIT_TailCall(PCODE copyArgs, PCODE target, ...) #undef STACK_ADJUST_FOR_RETURN_ADDRESS #undef STACK_ALIGN_MASK -#else // !FEATURE_PAL +#else // !TARGET_UNIX PORTABILITY_ASSERT("TODO: Implement JIT_TailCall for PAL"); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } -#endif // _TARGET_AMD64_ || _TARGET_ARM_ +#endif // TARGET_AMD64 || TARGET_ARM //======================================================================== // @@ -5489,7 +5489,7 @@ void F_CALL_VA_CONV JIT_TailCall(PCODE copyArgs, PCODE target, ...) // //======================================================================== -#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && (defined(TARGET_AMD64) || defined(TARGET_X86)) && !defined(TARGET_UNIX) // ***************************************************************************** // JitHelperLogging usage: // 1) Ngen using: @@ -5596,12 +5596,12 @@ void InitJitHelperLogging() if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitHelperLogging) != 0)) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 IMAGE_DOS_HEADER *pDOS = (IMAGE_DOS_HEADER *)g_pMSCorEE; _ASSERTE(pDOS->e_magic == VAL16(IMAGE_DOS_SIGNATURE) && pDOS->e_lfanew != 0); IMAGE_NT_HEADERS *pNT = (IMAGE_NT_HEADERS*)((LPBYTE)g_pMSCorEE + VAL32(pDOS->e_lfanew)); -#ifdef BIT64 +#ifdef HOST_64BIT _ASSERTE(pNT->Signature == VAL32(IMAGE_NT_SIGNATURE) && pNT->FileHeader.SizeOfOptionalHeader == VAL16(sizeof(IMAGE_OPTIONAL_HEADER64)) && pNT->OptionalHeader.Magic == VAL16(IMAGE_NT_OPTIONAL_HDR_MAGIC) ); @@ -5610,7 +5610,7 @@ void InitJitHelperLogging() && pNT->FileHeader.SizeOfOptionalHeader == VAL16(sizeof(IMAGE_OPTIONAL_HEADER32)) && pNT->OptionalHeader.Magic == VAL16(IMAGE_NT_OPTIONAL_HDR_MAGIC) ); #endif -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Make the static hlpFuncTable read/write for purposes of writing the logging thunks DWORD dwOldProtect; @@ -5641,7 +5641,7 @@ void InitJitHelperLogging() hlpFuncCount->pfnRealHelper = hlpFunc->pfnHelper; hlpFuncCount->helperName = hlpFunc->name; hlpFuncCount->count = 0; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 ULONGLONG uImageBase; PT_RUNTIME_FUNCTION pFunctionEntry; pFunctionEntry = RtlLookupFunctionEntry((ULONGLONG)hlpFunc->pfnHelper, &uImageBase, NULL); @@ -5655,10 +5655,10 @@ void InitJitHelperLogging() { hlpFuncCount->helperSize = 0; } -#else // _TARGET_X86_ +#else // TARGET_X86 // How do I get this for x86? hlpFuncCount->helperSize = 0; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 pSl->EmitJITHelperLoggingThunk(GetEEFuncEntryPoint(hlpFunc->pfnHelper), (LPVOID)hlpFuncCount); Stub* pStub = pSl->Link(pHeap); @@ -5686,7 +5686,7 @@ void InitJitHelperLogging() #pragma warning(pop) #endif /*_PREFAST_*/ -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 ULONGLONG uImageBase; PT_RUNTIME_FUNCTION pFunctionEntry; pFunctionEntry = RtlLookupFunctionEntry((ULONGLONG)hlpFunc->pfnHelper, &uImageBase, NULL); @@ -5701,7 +5701,7 @@ void InitJitHelperLogging() // if we can't get a function entry for this we'll just pretend the size is 0 hlpFuncCount->helperSize = 0; } -#else // _TARGET_X86_ +#else // TARGET_X86 // Is the address in mscoree.dll at all? (All helpers are in // mscoree.dll) if (dynamicHlpFunc->pfnHelper >= (LPBYTE*)g_pMSCorEE && dynamicHlpFunc->pfnHelper < (LPBYTE*)g_pMSCorEE + VAL32(pNT->OptionalHeader.SizeOfImage)) @@ -5715,7 +5715,7 @@ void InitJitHelperLogging() hlpFuncCount->helperSize -= sizeof(Stub); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 pSl->EmitJITHelperLoggingThunk(GetEEFuncEntryPoint(dynamicHlpFunc->pfnHelper), (LPVOID)hlpFuncCount); Stub* pStub = pSl->Link(pHeap); @@ -5733,4 +5733,4 @@ void InitJitHelperLogging() return; } -#endif // _DEBUG && (_TARGET_AMD64_ || _TARGET_X86_) +#endif // _DEBUG && (TARGET_AMD64 || TARGET_X86) diff --git a/src/coreclr/src/vm/jitinterface.cpp b/src/coreclr/src/vm/jitinterface.cpp index e9f78ab6840e2..a7ae8b7b1e3c8 100644 --- a/src/coreclr/src/vm/jitinterface.cpp +++ b/src/coreclr/src/vm/jitinterface.cpp @@ -480,14 +480,14 @@ CEEInfo::ConvToJitSig( IfFailThrow(sig.GetCallingConvInfo(&data)); sigRet->callConv = (CorInfoCallConv) data; -#if defined(PLATFORM_UNIX) || defined(_TARGET_ARM_) +#if defined(TARGET_UNIX) || defined(TARGET_ARM) if ((isCallConv(sigRet->callConv, IMAGE_CEE_CS_CALLCONV_VARARG)) || (isCallConv(sigRet->callConv, IMAGE_CEE_CS_CALLCONV_NATIVEVARARG))) { // This signature corresponds to a method that uses varargs, which are not supported. COMPlusThrow(kInvalidProgramException, IDS_EE_VARARG_NOT_SUPPORTED); } -#endif // defined(PLATFORM_UNIX) || defined(_TARGET_ARM_) +#endif // defined(TARGET_UNIX) || defined(TARGET_ARM) // Skip number of type arguments if (sigRet->callConv & IMAGE_CEE_CS_CALLCONV_GENERIC) @@ -9869,7 +9869,7 @@ CorInfoUnmanagedCallConv CEEInfo::getUnmanagedCallConv(CORINFO_METHOD_HANDLE met pMD = GetMethod(method); _ASSERTE(pMD->IsNDirect()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 EX_TRY { PInvokeStaticSigInfo sigInfo(pMD, PInvokeStaticSigInfo::NO_THROW_ON_ERROR); @@ -9893,12 +9893,12 @@ CorInfoUnmanagedCallConv CEEInfo::getUnmanagedCallConv(CORINFO_METHOD_HANDLE met result = CORINFO_UNMANAGED_CALLCONV_UNKNOWN; } EX_END_CATCH(SwallowAllExceptions) -#else // !_TARGET_X86_ +#else // !TARGET_X86 // // we have only one calling convention // result = CORINFO_UNMANAGED_CALLCONV_STDCALL; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 EE_TO_JIT_TRANSITION(); @@ -10182,9 +10182,9 @@ void InlinedCallFrame::GetEEInfo(CORINFO_EE_INFO::InlinedCallFrameInfo *pInfo) pInfo->offsetOfCalleeSavedFP = sizeof(GSCookie) + offsetof(InlinedCallFrame, m_pCalleeSavedFP); pInfo->offsetOfCallTarget = sizeof(GSCookie) + offsetof(InlinedCallFrame, m_Datum); pInfo->offsetOfReturnAddress = sizeof(GSCookie) + offsetof(InlinedCallFrame, m_pCallerReturnAddress); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM pInfo->offsetOfSPAfterProlog = sizeof(GSCookie) + offsetof(InlinedCallFrame, m_pSPAfterProlog); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } /*********************************************************************/ @@ -10261,7 +10261,7 @@ void CEEInfo::getEEInfo(CORINFO_EE_INFO *pEEInfoOut) pEEInfoOut->maxUncheckedOffsetForNullObject = MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT; pEEInfoOut->targetAbi = CORINFO_CORECLR_ABI; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX pEEInfoOut->osType = CORINFO_UNIX; #else pEEInfoOut->osType = CORINFO_WINNT; @@ -10730,7 +10730,7 @@ DWORD CEEInfo::getJitFlags(CORJIT_FLAGS* jitFlags, DWORD sizeInBytes) } /*********************************************************************/ -#if !defined(PLATFORM_UNIX) +#if !defined(TARGET_UNIX) struct RunWithErrorTrapFilterParam { @@ -10749,7 +10749,7 @@ static LONG RunWithErrorTrapFilter(struct _EXCEPTION_POINTERS* exceptionPointers return param->m_corInfo->FilterException(exceptionPointers); } -#endif // !defined(PLATFORM_UNIX) +#endif // !defined(TARGET_UNIX) bool CEEInfo::runWithErrorTrap(void (*function)(void*), void* param) { @@ -10765,7 +10765,7 @@ bool CEEInfo::runWithErrorTrap(void (*function)(void*), void* param) bool success = true; -#if !defined(PLATFORM_UNIX) +#if !defined(TARGET_UNIX) RunWithErrorTrapFilterParam trapParam; trapParam.m_corInfo = m_pOverride == nullptr ? this : m_pOverride; @@ -10783,7 +10783,7 @@ bool CEEInfo::runWithErrorTrap(void (*function)(void*), void* param) } PAL_ENDTRY -#else // !defined(PLATFORM_UNIX) +#else // !defined(TARGET_UNIX) // We shouldn't need PAL_TRY on *nix: any exceptions that we are able to catch // ought to originate from the runtime itself and should be catchable inside of @@ -10917,7 +10917,7 @@ void* CEEJitInfo::getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ #pragma warning(disable:26001) // "Bounds checked above using the underflow trick" #endif /*_PREFAST_ */ -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // To avoid using a jump stub we always call certain helpers using an indirect call. // Because when using a direct call and the target is father away than 2^31 bytes, // the direct call instead goes to a jump stub which jumps to the jit helper. @@ -11163,9 +11163,9 @@ void CEEJitInfo::CompressDebugInfo() void reservePersonalityRoutineSpace(ULONG &unwindSize) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Do nothing -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // Add space for personality routine, it must be 4-byte aligned. // Everything in the UNWIND_INFO up to the variable-sized UnwindCodes // array has already had its size included in unwindSize by the caller. @@ -11177,7 +11177,7 @@ void reservePersonalityRoutineSpace(ULONG &unwindSize) _ASSERTE(FitsInU4(unwindSize + sizeof(ULONG))); unwindSize = (ULONG)(ALIGN_UP(unwindSize, sizeof(ULONG))); -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) // The JIT passes in a 4-byte aligned block of unwind data. _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); @@ -11185,7 +11185,7 @@ void reservePersonalityRoutineSpace(ULONG &unwindSize) unwindSize += sizeof(ULONG); #else PORTABILITY_ASSERT("reservePersonalityRoutineSpace"); -#endif // !defined(_TARGET_AMD64_) +#endif // !defined(TARGET_AMD64) } // Reserve memory for the method/funclet's unwind information. @@ -11346,7 +11346,7 @@ void CEEJitInfo::allocUnwindInfo ( RUNTIME_FUNCTION__SetBeginAddress(pRuntimeFunction, currentCodeOffset + startOffset); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 pRuntimeFunction->EndAddress = currentCodeOffset + endOffset; #endif @@ -11369,25 +11369,25 @@ void CEEJitInfo::allocUnwindInfo ( /* Copy the UnwindBlock */ memcpy(pUnwindInfo, pUnwindBlock, unwindSize); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Do NOTHING -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) pUnwindInfo->Flags = UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER; ULONG * pPersonalityRoutine = (ULONG*)ALIGN_UP(&(pUnwindInfo->UnwindCode[pUnwindInfo->CountOfUnwindCodes]), sizeof(ULONG)); *pPersonalityRoutine = ExecutionManager::GetCLRPersonalityRoutineValue(); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) *(LONG *)pUnwindInfo |= (1 << 20); // X bit ULONG * pPersonalityRoutine = (ULONG*)((BYTE *)pUnwindInfo + ALIGN_UP(unwindSize, sizeof(ULONG))); *pPersonalityRoutine = ExecutionManager::GetCLRPersonalityRoutineValue(); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) *(LONG *)pUnwindInfo |= (1 << 20); // X bit @@ -11396,11 +11396,11 @@ void CEEJitInfo::allocUnwindInfo ( #endif -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // Publish the new unwind information in a way that the ETW stack crawler can find if (m_usedUnwindInfos == m_totalUnwindInfos) UnwindInfoTable::PublishUnwindInfoForMethod(baseAddress, m_CodeHeader->GetUnwindInfo(0), m_totalUnwindInfos); -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) EE_TO_JIT_TRANSITION(); #else // FEATURE_EH_FUNCLETS @@ -11436,7 +11436,7 @@ void CEEJitInfo::recordRelocation(void * location, MODE_PREEMPTIVE; } CONTRACTL_END; -#ifdef BIT64 +#ifdef HOST_64BIT JIT_TO_EE_TRANSITION(); INT64 delta; @@ -11448,7 +11448,7 @@ void CEEJitInfo::recordRelocation(void * location, *((UINT64 *) ((BYTE *) location + slot)) = (UINT64) target; break; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case IMAGE_REL_BASED_REL32: { target = (BYTE *)target + addlDelta; @@ -11502,9 +11502,9 @@ void CEEJitInfo::recordRelocation(void * location, *fixupLocation = (INT32) delta; } break; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 case IMAGE_REL_ARM64_BRANCH26: // 26 bit offset << 2 & sign ext, for B and BL { _ASSERTE(slot == 0); @@ -11606,7 +11606,7 @@ void CEEJitInfo::recordRelocation(void * location, } break; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 default: _ASSERTE(!"Unknown reloc type"); @@ -11614,13 +11614,13 @@ void CEEJitInfo::recordRelocation(void * location, } EE_TO_JIT_TRANSITION(); -#else // BIT64 +#else // HOST_64BIT JIT_TO_EE_TRANSITION_LEAF(); // Nothing to do on 32-bit EE_TO_JIT_TRANSITION_LEAF(); -#endif // BIT64 +#endif // HOST_64BIT } WORD CEEJitInfo::getRelocTypeHint(void * target) @@ -11631,14 +11631,14 @@ WORD CEEJitInfo::getRelocTypeHint(void * target) MODE_PREEMPTIVE; } CONTRACTL_END; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (m_fAllowRel32) { // The JIT calls this method for data addresses only. It always uses REL32s for direct code targets. if (IsPreferredExecutableRange(target)) return IMAGE_REL_BASED_REL32; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 // No hints return (WORD)-1; @@ -12103,12 +12103,12 @@ void * CEEJitInfo::allocGCInfo (size_t size) _ASSERTE(m_CodeHeader != 0); _ASSERTE(m_CodeHeader->GetGCInfo() == 0); -#ifdef BIT64 +#ifdef HOST_64BIT if (size & 0xFFFFFFFF80000000LL) { COMPlusThrowHR(CORJIT_OUTOFMEM); } -#endif // BIT64 +#endif // HOST_64BIT block = m_jitManager->allocGCInfo(m_CodeHeader,(DWORD)size, &m_GCinfo_len); if (!block) @@ -12524,10 +12524,10 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr, flags.Set(CORJIT_FLAGS::CORJIT_FLAG_FRAMED); if (g_pConfig->JitAlignLoops()) flags.Set(CORJIT_FLAGS::CORJIT_FLAG_ALIGN_LOOPS); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (g_pConfig->PInvokeRestoreEsp(ftn->GetModule()->IsPreV4Assembly())) flags.Set(CORJIT_FLAGS::CORJIT_FLAG_PINVOKE_RESTORE_ESP); -#endif // _TARGET_X86_ +#endif // TARGET_X86 //See if we should instruct the JIT to emit calls to JIT_PollGC for thread suspension. If we have a //non-default value in the EE Config, then use that. Otherwise select the platform specific default. @@ -12711,7 +12711,7 @@ LONG g_JitCount = 0; #endif //#define PERF_TRACK_METHOD_JITTIMES -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BOOL g_fAllowRel32 = TRUE; #endif @@ -12854,7 +12854,7 @@ PCODE UnsafeJitFunction(NativeCodeVersion nativeCodeVersion, COR_ILMETHOD_DECODE } #endif //_DEBUG -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) BOOL fForceJumpStubOverflow = FALSE; #ifdef _DEBUG @@ -12863,13 +12863,13 @@ PCODE UnsafeJitFunction(NativeCodeVersion nativeCodeVersion, COR_ILMETHOD_DECODE fForceJumpStubOverflow = TRUE; #endif -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) BOOL fAllowRel32 = (g_fAllowRel32 | fForceJumpStubOverflow); #endif size_t reserveForJumpStubs = 0; -#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) for (;;) { @@ -12883,8 +12883,8 @@ PCODE UnsafeJitFunction(NativeCodeVersion nativeCodeVersion, COR_ILMETHOD_DECODE EEJitManager *jitMgr = NULL; #endif -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) && !defined(CROSSGEN_COMPILE) -#ifdef _TARGET_AMD64_ +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64)) && !defined(CROSSGEN_COMPILE) +#ifdef TARGET_AMD64 if (fForceJumpStubOverflow) jitInfo.SetJumpStubOverflow(fAllowRel32); jitInfo.SetAllowRel32(fAllowRel32); @@ -13015,26 +13015,26 @@ PCODE UnsafeJitFunction(NativeCodeVersion nativeCodeVersion, COR_ILMETHOD_DECODE if (!nativeEntry) COMPlusThrow(kInvalidProgramException); -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) && !defined(CROSSGEN_COMPILE) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64)) && !defined(CROSSGEN_COMPILE) if (jitInfo.IsJumpStubOverflow()) { // Backout and try again with fAllowRel32 == FALSE. jitInfo.BackoutJitData(jitMgr); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Disallow rel32 relocs in future. g_fAllowRel32 = FALSE; fAllowRel32 = FALSE; -#endif // _TARGET_AMD64_ -#ifdef _TARGET_ARM64_ +#endif // TARGET_AMD64 +#ifdef TARGET_ARM64 fForceJumpStubOverflow = FALSE; -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 reserveForJumpStubs = jitInfo.GetReserveForJumpStubs(); continue; } -#endif // (_TARGET_AMD64_ || _TARGET_ARM64_) && !CROSSGEN_COMPILE +#endif // (TARGET_AMD64 || TARGET_ARM64) && !CROSSGEN_COMPILE LOG((LF_JIT, LL_INFO10000, "Jitted Entry at" FMT_ADDR "method %s::%s %s\n", DBG_ADDR(nativeEntry), @@ -13060,7 +13060,7 @@ PCODE UnsafeJitFunction(NativeCodeVersion nativeCodeVersion, COR_ILMETHOD_DECODE ClrFlushInstructionCache(nativeEntry, sizeOfCode); ret = (PCODE)nativeEntry; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM ret |= THUMB_CODE; #endif @@ -13174,7 +13174,7 @@ void Module::LoadHelperTable() // Jump thunk // -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) *curEntry = X86_INSTR_JMP_REL32; *(INT32 *)(curEntry + 1) = rel32UsingJumpStub((INT32 *)(curEntry + 1), pfnHelper, NULL, GetLoaderAllocator()); #else // all other platforms @@ -13555,7 +13555,7 @@ BOOL LoadDynamicInfoEntry(Module *currentModule, result = pMD->GetMultiCallableAddrOfCode(CORINFO_ACCESS_ANY); } - #ifndef _TARGET_ARM_ + #ifndef TARGET_ARM if (CORCOMPILE_IS_PCODE_TAGGED(result)) { // There is a rare case where the function entrypoint may not be aligned. This could happen only for FCalls, @@ -13848,7 +13848,7 @@ void* CEEInfo::getTailCallCopyArgsThunk(CORINFO_SIG_INFO *pSig, void * ftn = NULL; -#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)) && !defined(FEATURE_PAL) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM)) && !defined(TARGET_UNIX) JIT_TO_EE_TRANSITION(); @@ -13858,7 +13858,7 @@ void* CEEInfo::getTailCallCopyArgsThunk(CORINFO_SIG_INFO *pSig, EE_TO_JIT_TRANSITION(); -#endif // (_TARGET_AMD64_ || _TARGET_ARM_) && !FEATURE_PAL +#endif // (TARGET_AMD64 || TARGET_ARM) && !TARGET_UNIX return ftn; } @@ -14164,7 +14164,7 @@ TADDR EECodeInfo::GetSavedMethodCode() HOST_NOCALLS; SUPPORTS_DAC; } CONTRACTL_END; -#ifndef BIT64 +#ifndef HOST_64BIT #if defined(HAVE_GCCOVER) _ASSERTE (!m_pMD->m_GcCover || GCStress::IsEnabled()); if (GCStress::IsEnabled() @@ -14255,7 +14255,7 @@ PTR_RUNTIME_FUNCTION EECodeInfo::GetFunctionEntry() return m_pFunctionEntry; } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) BOOL EECodeInfo::HasFrameRegister() { @@ -14274,12 +14274,12 @@ BOOL EECodeInfo::HasFrameRegister() return fHasFrameRegister; } -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) #endif // defined(FEATURE_EH_FUNCLETS) -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // ---------------------------------------------------------------------------- // EECodeInfo::GetUnwindInfoHelper // @@ -14455,12 +14455,12 @@ LPVOID EECodeInfo::findNextFunclet (LPVOID pvFuncletStart, SIZE_T { PT_RUNTIME_FUNCTION pFunctionEntry; ULONGLONG uImageBase; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX EECodeInfo codeInfo; codeInfo.Init((PCODE)pvFuncletStart); pFunctionEntry = codeInfo.GetFunctionEntry(); uImageBase = (ULONGLONG)codeInfo.GetModuleBase(); -#else // !FEATURE_PAL +#else // !TARGET_UNIX // // This is GCStress debug only - use the slow OS APIs to enumerate funclets // @@ -14502,4 +14502,4 @@ LPVOID EECodeInfo::findNextFunclet (LPVOID pvFuncletStart, SIZE_T return NULL; } #endif // defined(_DEBUG) && !defined(HAVE_GCCOVER) -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) diff --git a/src/coreclr/src/vm/jitinterface.h b/src/coreclr/src/vm/jitinterface.h index 4a7002f6c0dfe..cf80d843ac11c 100644 --- a/src/coreclr/src/vm/jitinterface.h +++ b/src/coreclr/src/vm/jitinterface.h @@ -16,11 +16,11 @@ #include "corcompile.h" #endif // FEATURE_PREJIT -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT ((32*1024)-1) // when generating JIT code -#else // !FEATURE_PAL +#else // !TARGET_UNIX #define MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT ((GetOsPageSize() / 2) - 1) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX enum StompWriteBarrierCompletionAction @@ -91,7 +91,7 @@ BOOL LoadDynamicInfoEntry(Module *currentModule, // // The legacy x86 monitor helpers do not need a state argument // -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) #define FCDECL_MONHELPER(funcname, arg) FCDECL2(void, funcname, arg, BYTE* pbLockTaken) #define HCIMPL_MONHELPER(funcname, arg) HCIMPL2(void, funcname, arg, BYTE* pbLockTaken) @@ -105,7 +105,7 @@ BOOL LoadDynamicInfoEntry(Module *currentModule, #define MONHELPER_STATE(x) #define MONHELPER_ARG NULL -#endif // _TARGET_X86_ +#endif // TARGET_X86 // @@ -250,7 +250,7 @@ extern "C" FCDECL2(Object*, IsInstanceOfAny_NoCacheLookup, CORINFO_CLASS_HANDLE extern "C" FCDECL1(void, JIT_InternalThrow, unsigned exceptNum); extern "C" FCDECL1(void*, JIT_InternalThrowFromHelper, unsigned exceptNum); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 class WriteBarrierManager @@ -305,15 +305,15 @@ class WriteBarrierManager PBYTE m_pUpperBoundImmediate; // | POSTGROW | | WRITE_WATCH | }; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#ifdef BIT64 +#ifdef HOST_64BIT EXTERN_C FCDECL1(Object*, JIT_TrialAllocSFastMP_InlineGetThread, CORINFO_CLASS_HANDLE typeHnd_); EXTERN_C FCDECL2(Object*, JIT_BoxFastMP_InlineGetThread, CORINFO_CLASS_HANDLE type, void* data); EXTERN_C FCDECL2(Object*, JIT_NewArr1VC_MP_InlineGetThread, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size); EXTERN_C FCDECL2(Object*, JIT_NewArr1OBJ_MP_InlineGetThread, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size); -#endif // BIT64 +#endif // HOST_64BIT EXTERN_C FCDECL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2); @@ -328,20 +328,20 @@ EXTERN_C FCDECL1_V(INT32, JIT_Dbl2IntOvf, double val); EXTERN_C FCDECL2_VV(float, JIT_FltRem, float dividend, float divisor); EXTERN_C FCDECL2_VV(double, JIT_DblRem, double dividend, double divisor); -#ifndef BIT64 -#ifdef _TARGET_X86_ +#ifndef HOST_64BIT +#ifdef TARGET_X86 // JIThelp.asm EXTERN_C void STDCALL JIT_LLsh(); EXTERN_C void STDCALL JIT_LRsh(); EXTERN_C void STDCALL JIT_LRsz(); -#else // _TARGET_X86_ +#else // TARGET_X86 EXTERN_C FCDECL2_VV(UINT64, JIT_LLsh, UINT64 num, int shift); EXTERN_C FCDECL2_VV(INT64, JIT_LRsh, INT64 num, int shift); EXTERN_C FCDECL2_VV(UINT64, JIT_LRsz, UINT64 num, int shift); -#endif // !_TARGET_X86_ -#endif // !BIT64 +#endif // !TARGET_X86 +#endif // !HOST_64BIT -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 extern "C" { @@ -375,33 +375,33 @@ extern "C" void ValidateWriteBarrierHelpers(); -#endif //_TARGET_X86_ +#endif //TARGET_X86 extern "C" { #ifndef FEATURE_EH_FUNCLETS void STDCALL JIT_EndCatch(); // JIThelp.asm/JIThelp.s -#endif // _TARGET_X86_ +#endif // TARGET_X86 void STDCALL JIT_ByRefWriteBarrier(); // JIThelp.asm/JIThelp.s -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) FCDECL2VA(void, JIT_TailCall, PCODE copyArgs, PCODE target); -#else // _TARGET_AMD64_ || _TARGET_ARM_ +#else // TARGET_AMD64 || TARGET_ARM void STDCALL JIT_TailCall(); // JIThelp.asm -#endif // _TARGET_AMD64_ || _TARGET_ARM_ +#endif // TARGET_AMD64 || TARGET_ARM void STDCALL JIT_MemSet(void *dest, int c, SIZE_T count); void STDCALL JIT_MemCpy(void *dest, const void *src, SIZE_T count); void STDMETHODCALLTYPE JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle); -#ifndef _TARGET_ARM64_ +#ifndef TARGET_ARM64 void STDCALL JIT_StackProbe(); -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 }; @@ -1337,7 +1337,7 @@ class CEEJitInfo : public CEEInfo #endif // FEATURE_EH_FUNCLETS } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 void SetAllowRel32(BOOL fAllowRel32) { LIMITED_METHOD_CONTRACT; @@ -1345,7 +1345,7 @@ class CEEJitInfo : public CEEInfo } #endif -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) void SetJumpStubOverflow(BOOL fJumpStubOverflow) { LIMITED_METHOD_CONTRACT; @@ -1403,10 +1403,10 @@ class CEEJitInfo : public CEEInfo m_totalUnwindInfos(0), m_usedUnwindInfos(0), #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 m_fAllowRel32(FALSE), #endif -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) m_fJumpStubOverflow(FALSE), m_reserveForJumpStubs(0), #endif @@ -1490,10 +1490,10 @@ protected : ULONG m_usedUnwindInfos; #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 BOOL m_fAllowRel32; // Use 32-bit PC relative address modes #endif -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) BOOL m_fJumpStubOverflow; // Overflow while trying to alocate jump stub slot within PC relative branch region // The code will need to be regenerated (with m_fRel32Allowed == FALSE for AMD64). size_t m_reserveForJumpStubs; // Space to reserve for jump stubs when allocating code @@ -1554,7 +1554,7 @@ extern "C" const VMHELPDEF hlpFuncTable[CORINFO_HELP_COUNT]; #endif -#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && (defined(TARGET_AMD64) || defined(TARGET_X86)) && !defined(TARGET_UNIX) typedef struct { void* pfnRealHelper; const char* helperName; @@ -1597,7 +1597,7 @@ void DisableJitGCPoll(); #endif // Helper for RtlVirtualUnwind-based tail calls -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) // The Stub-linker generated assembly routine to copy arguments from the va_list // into the CONTEXT and the stack. @@ -1610,7 +1610,7 @@ class TailCallFrame; // The shared stub return location EXTERN_C void JIT_TailCallHelperStub_ReturnAddress(); -#endif // _TARGET_AMD64_ || _TARGET_ARM_ +#endif // TARGET_AMD64 || TARGET_ARM void *GenFastGetSharedStaticBase(bool bCheckCCtor); @@ -1636,7 +1636,7 @@ BOOL ObjIsInstanceOfCore(Object* pObject, TypeHandle toTypeHnd, BOOL throwCastEx EXTERN_C TypeHandle::CastResult STDCALL ObjIsInstanceOfCached(Object *pObject, TypeHandle toTypeHnd); -#ifdef BIT64 +#ifdef HOST_64BIT class InlinedCallFrame; Thread * __stdcall JIT_InitPInvokeFrame(InlinedCallFrame *pFrame, PTR_VOID StubSecretArg); #endif diff --git a/src/coreclr/src/vm/jitinterfacegen.cpp b/src/coreclr/src/vm/jitinterfacegen.cpp index 0beac76f2f2bc..ceb6d1a0d3de1 100644 --- a/src/coreclr/src/vm/jitinterfacegen.cpp +++ b/src/coreclr/src/vm/jitinterfacegen.cpp @@ -19,7 +19,7 @@ #include "field.h" #include "ecall.h" -#ifdef BIT64 +#ifdef HOST_64BIT // These are the fastest(?) versions of JIT helpers as they have the code to GetThread patched into them // that does not make a call. @@ -43,17 +43,17 @@ EXTERN_C Object* JIT_NewArr1OBJ_UP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size); EXTERN_C Object* JIT_NewArr1VC_MP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size); EXTERN_C Object* JIT_NewArr1VC_UP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 extern WriteBarrierManager g_WriteBarrierManager; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#endif // BIT64 +#endif // HOST_64BIT /*********************************************************************/ // Initialize the part of the JIT helpers that require very little of // EE infrastructure to be in place. /*********************************************************************/ -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 void InitJITHelpers1() { @@ -61,7 +61,7 @@ void InitJITHelpers1() _ASSERTE(g_SystemInfo.dwNumberOfProcessors != 0); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) g_WriteBarrierManager.Initialize(); @@ -73,7 +73,7 @@ void InitJITHelpers1() #endif // _DEBUG )) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_NewS_MP_FastPortable); SetJitHelperFunction(CORINFO_HELP_NEWSFAST_ALIGN8, JIT_NewS_MP_FastPortable); SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP_FastPortable); @@ -83,7 +83,7 @@ void InitJITHelpers1() #ifdef FEATURE_UTF8STRING ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateUtf8String_MP_FastPortable), ECall::FastAllocateUtf8String); #endif // FEATURE_UTF8STRING -#else // FEATURE_PAL +#else // TARGET_UNIX // if (multi-proc || server GC) if (GCHeapUtilities::UseThreadAllocationContexts()) { @@ -115,9 +115,9 @@ void InitJITHelpers1() ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateUtf8String_MP_FastPortable), ECall::FastAllocateUtf8String); #endif // FEATURE_UTF8STRING } -#endif // FEATURE_PAL +#endif // TARGET_UNIX } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 diff --git a/src/coreclr/src/vm/loaderallocator.cpp b/src/coreclr/src/vm/loaderallocator.cpp index a576c1ad32fb3..741441d51720d 100644 --- a/src/coreclr/src/vm/loaderallocator.cpp +++ b/src/coreclr/src/vm/loaderallocator.cpp @@ -1120,7 +1120,7 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory) dwTotalReserveMemSize = (DWORD) ALIGN_UP(dwTotalReserveMemSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); -#if !defined(BIT64) +#if !defined(HOST_64BIT) // Make sure that we reserve as little as possible on 32-bit to save address space _ASSERTE(dwTotalReserveMemSize <= VIRTUAL_ALLOC_RESERVE_GRANULARITY); #endif diff --git a/src/coreclr/src/vm/managedmdimport.cpp b/src/coreclr/src/vm/managedmdimport.cpp index afeb039abe8ef..8b96635bffeff 100644 --- a/src/coreclr/src/vm/managedmdimport.cpp +++ b/src/coreclr/src/vm/managedmdimport.cpp @@ -221,7 +221,7 @@ MDImpl3(void, MetaDataImport::Enum, mdToken type, mdToken tkParent, MetadataEnum } FCIMPLEND -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif @@ -658,7 +658,7 @@ MDImpl2(void, MetaDataImport::GetMemberRefProps, } FCIMPLEND -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // restore command line optimization defaults #endif diff --git a/src/coreclr/src/vm/managedmdimport.hpp b/src/coreclr/src/vm/managedmdimport.hpp index e1e5dc22774b3..cc1a166a6120c 100644 --- a/src/coreclr/src/vm/managedmdimport.hpp +++ b/src/coreclr/src/vm/managedmdimport.hpp @@ -29,7 +29,7 @@ typedef struct { I4Array * largeResult; int length; -#ifdef BIT64 +#ifdef HOST_64BIT int padding; #endif int smallResult[16]; diff --git a/src/coreclr/src/vm/marshalnative.cpp b/src/coreclr/src/vm/marshalnative.cpp index ca15a370af942..dd86ad4778481 100644 --- a/src/coreclr/src/vm/marshalnative.cpp +++ b/src/coreclr/src/vm/marshalnative.cpp @@ -82,13 +82,13 @@ INT32 QCALLTYPE MarshalNative::NumParamBytes(MethodDesc * pMD) cbParamBytes = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize(); -#ifdef _X86_ +#ifdef HOST_X86 if (((NDirectMethodDesc *)pMD)->IsThisCall()) { // The size of 'this' is not included in native stack arg size. cbParamBytes += sizeof(LPVOID); } -#endif // _X86_ +#endif // HOST_X86 END_QCALL; diff --git a/src/coreclr/src/vm/method.cpp b/src/coreclr/src/vm/method.cpp index a65c057b4a717..d60238f90599e 100644 --- a/src/coreclr/src/vm/method.cpp +++ b/src/coreclr/src/vm/method.cpp @@ -1032,7 +1032,7 @@ PCODE MethodDesc::GetNativeCode() // This means that NativeCodeSlot::GetValueMaybeNullAtPtr(GetAddrOfNativeCodeSlot()) // is not stable. It can turn from non-zero to zero. PCODE pCode = PCODE(NativeCodeSlot::GetValueMaybeNullAtPtr(GetAddrOfNativeCodeSlot()) & ~FIXUP_LIST_MASK); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (pCode != NULL) pCode |= THUMB_CODE; #endif @@ -1328,18 +1328,18 @@ ReturnKind MethodDesc::ParseReturnKindFromSig(INDEBUG(bool supportStringConstruc ReturnKind MethodDesc::GetReturnKind(INDEBUG(bool supportStringConstructors)) { -#ifdef BIT64 +#ifdef HOST_64BIT // For simplicity, we don't hijack in funclets, but if you ever change that, // be sure to choose the OnHijack... callback type to match that of the FUNCLET // not the main method (it would probably be Scalar). -#endif // BIT64 +#endif // HOST_64BIT ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); // Mark that we are performing a stackwalker like operation on the current thread. // This is necessary to allow the signature parsing functions to work without triggering any loads ClrFlsValueSwitch threadStackWalking(TlsIdx_StackWalkerWalkingThread, GetThread()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 MetaSig msig(this); if (msig.HasFPReturn()) { @@ -1349,7 +1349,7 @@ ReturnKind MethodDesc::GetReturnKind(INDEBUG(bool supportStringConstructors)) // restore of the return value around the call to OnHijackScalarWorker. return RT_Float; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 return ParseReturnKindFromSig(INDEBUG(supportStringConstructors)); } @@ -1795,7 +1795,7 @@ UINT MethodDesc::SizeOfNativeArgStack() #endif } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //******************************************************************************* UINT MethodDesc::CbStackPop() { @@ -1805,7 +1805,7 @@ UINT MethodDesc::CbStackPop() ArgIterator argit(&msig); return argit.CbStackPop(); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 #ifndef DACCESS_COMPILE @@ -2682,7 +2682,7 @@ void MethodDesc::Save(DataImage *image) if (!pNMD->MarshalingRequired()) { // import thunk is only needed if the P/Invoke is inlinable -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) image->SavePrecode(pNMD->GetNDirectImportThunkGlue(), pNMD, PRECODE_NDIRECT_IMPORT, DataImage::ITEM_METHOD_PRECODE_COLD); #else image->StoreStructure(pNMD->GetNDirectImportThunkGlue(), sizeof(NDirectImportThunkGlue), DataImage::ITEM_METHOD_PRECODE_COLD); @@ -4172,7 +4172,7 @@ BOOL MethodDesc::IsRestored() #ifdef HAS_COMPACT_ENTRYPOINTS -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #include static const struct CentralJumpCode { @@ -4203,7 +4203,7 @@ c_CentralJumpCode = { }; #include -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #include static const struct CentralJumpCode { @@ -4243,7 +4243,7 @@ c_CentralJumpCode = { }; #include -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) #include struct CentralJumpCode { @@ -4282,7 +4282,7 @@ static_assert_no_msg((TEP_CENTRAL_JUMP_SIZE & 1) == 0); #define TEP_ENTRY_SIZE 4 -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #define TEP_HALF_ENTRY_SIZE (TEP_ENTRY_SIZE / 2) @@ -4339,24 +4339,24 @@ static uint16_t EncodeBranchToCentralJump (int16_t offset) #endif // DACCESS_COMPILE -#else // _TARGET_ARM_ +#else // TARGET_ARM #define TEP_MAX_BEFORE_INDEX (1 + (127 / TEP_ENTRY_SIZE)) #define TEP_MAX_BLOCK_INDEX (TEP_MAX_BEFORE_INDEX + (128 - TEP_CENTRAL_JUMP_SIZE) / TEP_ENTRY_SIZE) #define TEP_FULL_BLOCK_SIZE (TEP_MAX_BLOCK_INDEX * TEP_ENTRY_SIZE + TEP_CENTRAL_JUMP_SIZE) -#endif // _TARGET_ARM_ +#endif // TARGET_ARM BOOL MethodDescChunk::IsCompactEntryPointAtAddress(PCODE addr) { LIMITED_METHOD_DAC_CONTRACT; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // Compact entrypoints start at odd addresses return (addr & 1) != 0; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Compact entrypoints start at odd addresses (thumb) with second bit set to 1 uint8_t compactEntryPointMask = THUMB_CODE | COMPACT_ENTRY_ARM_CODE; @@ -4380,23 +4380,23 @@ BOOL MethodDescChunk::IsCompactEntryPointAtAddress(PCODE addr) // Always do consistency check in debug if (fSpeculative INDEBUG(|| TRUE)) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM TADDR instrCodeAddr = PCODEToPINSTR(addr); if (!IsCompactEntryPointAtAddress(addr) || *PTR_BYTE(instrCodeAddr) != TEP_ENTRY_INSTR1_BYTE1 || *PTR_BYTE(instrCodeAddr+1) != TEP_ENTRY_INSTR1_BYTE2) -#else // _TARGET_ARM_ +#else // TARGET_ARM if ((addr & 3) != 1 || *PTR_BYTE(addr) != X86_INSTR_MOV_AL || *PTR_BYTE(addr+2) != X86_INSTR_JMP_REL8) -#endif // _TARGET_ARM_ +#endif // TARGET_ARM { if (fSpeculative) return NULL; _ASSERTE(!"Unexpected code in temporary entrypoint"); } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // On ARM compact entry points are thumb _ASSERTE ((addr & THUMB_CODE) != 0); @@ -4409,12 +4409,12 @@ BOOL MethodDescChunk::IsCompactEntryPointAtAddress(PCODE addr) TADDR centralJump = addr + offset; int index = (centralJump - addr - TEP_ENTRY_SIZE) / TEP_ENTRY_SIZE; -#else // _TARGET_ARM_ +#else // TARGET_ARM int index = *PTR_BYTE(addr+1); TADDR centralJump = addr + 4 + *PTR_SBYTE(addr+3); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM CentralJumpCode* pCentralJumpCode = PTR_CentralJumpCode(centralJump); @@ -4432,18 +4432,18 @@ BOOL MethodDescChunk::IsCompactEntryPointAtAddress(PCODE addr) } } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM _ASSERTE_IMPL(pCentralJumpCode->CheckTarget(GetPreStubCompactARMEntryPoint())); -#else // _TARGET_ARM_ +#else // TARGET_ARM _ASSERTE_IMPL(pCentralJumpCode->CheckTarget(GetPreStubEntryPoint())); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Go through all MethodDesc in MethodDescChunk and find the one with the required index PTR_MethodDescChunk pChunk = *((DPTR(PTR_MethodDescChunk))(centralJump + offsetof(CentralJumpCode, m_pChunk))); TADDR pMD = PTR_HOST_TO_TADDR (pChunk->GetFirstMethodDesc ()); @@ -4465,9 +4465,9 @@ BOOL MethodDescChunk::IsCompactEntryPointAtAddress(PCODE addr) } return PTR_MethodDesc (pMD); -#else // _TARGET_ARM_ +#else // TARGET_ARM return PTR_MethodDesc((TADDR)pCentralJumpCode->m_pBaseMD + index * MethodDesc::ALIGNMENT); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } //******************************************************************************* @@ -4475,11 +4475,11 @@ SIZE_T MethodDescChunk::SizeOfCompactEntryPoints(int count) { LIMITED_METHOD_DAC_CONTRACT; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM return COMPACT_ENTRY_ARM_CODE + count * TEP_ENTRY_SIZE + TEP_CENTRAL_JUMP_SIZE; -#else // _TARGET_ARM_ +#else // TARGET_ARM int fullBlocks = count / TEP_MAX_BLOCK_INDEX; int remainder = count % TEP_MAX_BLOCK_INDEX; @@ -4487,7 +4487,7 @@ SIZE_T MethodDescChunk::SizeOfCompactEntryPoints(int count) return 1 + (fullBlocks * TEP_FULL_BLOCK_SIZE) + (remainder * TEP_ENTRY_SIZE) + ((remainder != 0) ? TEP_CENTRAL_JUMP_SIZE : 0); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } #ifndef DACCESS_COMPILE @@ -4504,24 +4504,24 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca TADDR temporaryEntryPoints = (TADDR)pamTracker->Track(pLoaderAllocator->GetPrecodeHeap()->AllocAlignedMem(size, sizeof(TADDR))); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM BYTE* p = (BYTE*)temporaryEntryPoints + COMPACT_ENTRY_ARM_CODE; int relOffset = count * TEP_ENTRY_SIZE - TEP_ENTRY_SIZE; // relative offset for the short jump _ASSERTE (relOffset < MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB); -#else // _TARGET_ARM_ +#else // TARGET_ARM // make the temporary entrypoints unaligned, so they are easy to identify BYTE* p = (BYTE*)temporaryEntryPoints + 1; int indexInBlock = TEP_MAX_BLOCK_INDEX; // recompute relOffset in first iteration int relOffset = 0; // relative offset for the short jump -#endif // _TARGET_ARM_ +#endif // TARGET_ARM MethodDesc * pBaseMD = 0; // index of the start of the block MethodDesc * pMD = GetFirstMethodDesc(); for (int index = 0; index < count; index++) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM uint8_t *pMovInstrByte1 = (uint8_t *)p; uint8_t *pMovInstrByte2 = (uint8_t *)p+1; @@ -4533,7 +4533,7 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca p += TEP_ENTRY_SIZE; -#else // _TARGET_ARM_ +#else // TARGET_ARM if (indexInBlock == TEP_MAX_BLOCK_INDEX) { @@ -4568,13 +4568,13 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca indexInBlock++; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM relOffset -= TEP_ENTRY_SIZE; pMD = (MethodDesc *)((BYTE *)pMD + pMD->SizeOf()); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM CentralJumpCode* pCode = (CentralJumpCode*)p; memcpy(pCode, &c_CentralJumpCode, TEP_CENTRAL_JUMP_SIZE); @@ -4582,11 +4582,11 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca _ASSERTE(p + TEP_CENTRAL_JUMP_SIZE == (BYTE*)temporaryEntryPoints + size); -#else // _TARGET_ARM_ +#else // TARGET_ARM _ASSERTE(p == (BYTE*)temporaryEntryPoints + size); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM ClrFlushInstructionCache((LPVOID)temporaryEntryPoints, size); @@ -4607,11 +4607,11 @@ PCODE MethodDescChunk::GetTemporaryEntryPoint(int index) #ifdef HAS_COMPACT_ENTRYPOINTS if (HasCompactEntryPoints()) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM return GetTemporaryEntryPoints() + COMPACT_ENTRY_ARM_CODE + THUMB_CODE + index * TEP_ENTRY_SIZE; -#else // _TARGET_ARM_ +#else // TARGET_ARM int fullBlocks = index / TEP_MAX_BLOCK_INDEX; int remainder = index % TEP_MAX_BLOCK_INDEX; @@ -4619,7 +4619,7 @@ PCODE MethodDescChunk::GetTemporaryEntryPoint(int index) return GetTemporaryEntryPoints() + 1 + (fullBlocks * TEP_FULL_BLOCK_SIZE) + (remainder * TEP_ENTRY_SIZE) + ((remainder >= TEP_MAX_BEFORE_INDEX) ? TEP_CENTRAL_JUMP_SIZE : 0); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM } #endif // HAS_COMPACT_ENTRYPOINTS @@ -5062,7 +5062,7 @@ BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/) if (HasNativeCodeSlot()) { -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM _ASSERTE(IsThumbCode(addr) || (addr==NULL)); addr &= ~THUMB_CODE; @@ -5263,13 +5263,13 @@ FARPROC NDirectMethodDesc::FindEntryPointWithMangling(NATIVE_LIBRARY_HANDLE hMod } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX FARPROC pFunc = GetProcAddress(hMod, entryPointName); #else FARPROC pFunc = PAL_GetProcAddressDirect(hMod, entryPointName); #endif -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (pFunc) { @@ -5324,7 +5324,7 @@ LPVOID NDirectMethodDesc::FindEntryPoint(NATIVE_LIBRARY_HANDLE hMod) const FARPROC pFunc = NULL; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Handle ordinals. if (funcName[0] == '#') { @@ -5866,7 +5866,7 @@ void ComPlusCallMethodDesc::InitRetThunk() { WRAPPER_NO_CONTRACT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (m_pComPlusCallInfo->m_pRetThunk != NULL) return; @@ -5878,7 +5878,7 @@ void ComPlusCallMethodDesc::InitRetThunk() LPVOID pRetThunk = ComPlusCall::GetRetThunk(numStackBytes); FastInterlockCompareExchangePointer(&m_pComPlusCallInfo->m_pRetThunk, pRetThunk, NULL); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } #endif //!DACCESS_COMPILE #endif // FEATURE_COMINTEROP diff --git a/src/coreclr/src/vm/method.hpp b/src/coreclr/src/vm/method.hpp index 72d648f02efc1..730e7f34ad64b 100644 --- a/src/coreclr/src/vm/method.hpp +++ b/src/coreclr/src/vm/method.hpp @@ -209,7 +209,7 @@ class MethodDesc public: -#ifdef BIT64 +#ifdef HOST_64BIT static const int ALIGNMENT_SHIFT = 3; #else static const int ALIGNMENT_SHIFT = 2; @@ -2313,9 +2313,9 @@ class MethodDescChunk static BOOL IsCompactEntryPointAtAddress(PCODE addr); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM static int GetCompactEntryPointMaxCount (); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM #endif // HAS_COMPACT_ENTRYPOINTS FORCEINLINE PTR_MethodTable GetMethodTable() @@ -2490,7 +2490,7 @@ class StoredSigMethodDesc : public MethodDesc RelativePointer m_pSig; DWORD m_cSig; -#ifdef BIT64 +#ifdef HOST_64BIT // m_dwExtendedFlags is not used by StoredSigMethodDesc itself. // It is used by child classes. We allocate the space here to get // optimal layout. @@ -2549,7 +2549,7 @@ class FCallMethodDesc : public MethodDesc #endif DWORD m_dwECallID; -#ifdef BIT64 +#ifdef HOST_64BIT DWORD m_padding; #endif @@ -2589,7 +2589,7 @@ class DynamicMethodDesc : public StoredSigMethodDesc RelativePointer m_pszMethodName; PTR_DynamicResolver m_pResolver; -#ifndef BIT64 +#ifndef HOST_64BIT // We use m_dwExtendedFlags from StoredSigMethodDesc on WIN64 DWORD m_dwExtendedFlags; // see DynamicMethodDesc::ExtendedFlags enum #endif @@ -2873,12 +2873,12 @@ class NDirectMethodDesc : public MethodDesc // Various attributes needed at runtime. WORD m_wFlags; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // Size of outgoing arguments (on stack). Note that in order to get the @n stdcall name decoration, // it may be necessary to subtract 4 as the hidden large structure pointer parameter does not count. // See code:kStdCallWithRetBuf WORD m_cbStackArgumentSize; -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) // This field gets set only when this MethodDesc is marked as PreImplemented RelativePointer m_pStubMD; @@ -3149,7 +3149,7 @@ class NDirectMethodDesc : public MethodDesc { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // thiscall passes the this pointer in ECX if (unmgdCallConv == pmCallConvThiscall) { @@ -3166,10 +3166,10 @@ class NDirectMethodDesc : public MethodDesc { _ASSERTE(ndirect.m_cbStackArgumentSize == cbDstBuffer); } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) WORD GetStackArgumentSize() const { LIMITED_METHOD_DAC_CONTRACT; @@ -3180,7 +3180,7 @@ class NDirectMethodDesc : public MethodDesc // the outgoing marshalling buffer. return ndirect.m_cbStackArgumentSize; } -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) VOID InitEarlyBoundNDirectTarget(); @@ -3272,7 +3272,7 @@ struct ComPlusCallInfo return &m_pILStub; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Size of outgoing arguments (on stack). This is currently used only // on x86 when we have an InlinedCallFrame representing a CLR->COM call. WORD m_cbStackArgumentSize; @@ -3306,7 +3306,7 @@ struct ComPlusCallInfo LPVOID m_pRetThunk; -#else // _TARGET_X86_ +#else // TARGET_X86 void InitStackArgumentSize() { LIMITED_METHOD_CONTRACT; @@ -3316,7 +3316,7 @@ struct ComPlusCallInfo { LIMITED_METHOD_CONTRACT; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // This field gets set only when this MethodDesc is marked as PreImplemented RelativePointer m_pStubMD; @@ -3376,7 +3376,7 @@ class ComPlusCallMethodDesc : public MethodDesc FastInterlockOr(reinterpret_cast(&m_pComPlusCallInfo->m_flags), newFlags); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 WORD GetStackArgumentSize() { LIMITED_METHOD_DAC_CONTRACT; @@ -3388,12 +3388,12 @@ class ComPlusCallMethodDesc : public MethodDesc LIMITED_METHOD_CONTRACT; m_pComPlusCallInfo->SetStackArgumentSize(cbDstBuffer); } -#else // _TARGET_X86_ +#else // TARGET_X86 void SetStackArgumentSize(WORD cbDstBuffer) { LIMITED_METHOD_CONTRACT; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 }; #endif // FEATURE_COMINTEROP diff --git a/src/coreclr/src/vm/methodtable.cpp b/src/coreclr/src/vm/methodtable.cpp index b528369c7c660..a509eec929484 100644 --- a/src/coreclr/src/vm/methodtable.cpp +++ b/src/coreclr/src/vm/methodtable.cpp @@ -138,7 +138,7 @@ class MethodDataCache UINT32 m_cEntries; UINT32 m_iLastTouched; -#ifdef BIT64 +#ifdef HOST_64BIT UINT32 pad; // insures that we are a multiple of 8-bytes #endif }; // class MethodDataCache @@ -1025,7 +1025,7 @@ void MethodTable::FixupExtraInterfaceInfo(DataImage *pImage) #endif // FEATURE_NATIVE_IMAGE_GENERATION // Define a macro that generates a mask for a given bit in a TADDR correctly on either 32 or 64 bit platforms. -#ifdef BIT64 +#ifdef HOST_64BIT #define SELECT_TADDR_BIT(_index) (1ULL << (_index)) #else #define SELECT_TADDR_BIT(_index) (1U << (_index)) @@ -3608,7 +3608,7 @@ OBJECTREF MethodTable::FastBox(void** data) return ref; } -#if _TARGET_X86_ || _TARGET_AMD64_ +#if TARGET_X86 || TARGET_AMD64 //========================================================================================== static void FastCallFinalize(Object *obj, PCODE funcPtr, BOOL fCriticalCall) { @@ -3618,7 +3618,7 @@ static void FastCallFinalize(Object *obj, PCODE funcPtr, BOOL fCriticalCall) BEGIN_CALL_TO_MANAGEDEX(fCriticalCall ? EEToManagedCriticalCall : EEToManagedDefault); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) __asm { @@ -3627,16 +3627,16 @@ static void FastCallFinalize(Object *obj, PCODE funcPtr, BOOL fCriticalCall) INDEBUG(nop) // Mark the fact that we can call managed code } -#else // _TARGET_X86_ +#else // TARGET_X86 FastCallFinalizeWorker(obj, funcPtr); -#endif // _TARGET_X86_ +#endif // TARGET_X86 END_CALL_TO_MANAGED(); } -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 void CallFinalizerOnThreadObject(Object *obj) { @@ -3747,7 +3747,7 @@ void MethodTable::CallFinalizer(Object *obj) } #endif -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) #ifdef DEBUGGING_SUPPORTED if (CORDebuggerTraceCall()) @@ -3756,7 +3756,7 @@ void MethodTable::CallFinalizer(Object *obj) FastCallFinalize(obj, funcPtr, fCriticalFinalizer); -#else // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#else // defined(TARGET_X86) || defined(TARGET_AMD64) PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(funcPtr); @@ -3771,7 +3771,7 @@ void MethodTable::CallFinalizer(Object *obj) CALL_MANAGED_METHOD_NORET(args); -#endif // (defined(_TARGET_X86_) && defined(_TARGET_AMD64_) +#endif // (defined(TARGET_X86) && defined(TARGET_AMD64) #ifdef STRESS_LOG if (fCriticalFinalizer) @@ -9583,7 +9583,7 @@ void MethodTable::SetSlot(UINT32 slotNumber, PCODE slotCode) // IBC logging is not needed here - slots in ngen images are immutable. -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Ensure on ARM that all target addresses are marked as thumb code. _ASSERTE(IsThumbCode(slotCode)); #endif diff --git a/src/coreclr/src/vm/methodtable.h b/src/coreclr/src/vm/methodtable.h index 045abe0325b00..d5bd84adc2cae 100644 --- a/src/coreclr/src/vm/methodtable.h +++ b/src/coreclr/src/vm/methodtable.h @@ -239,7 +239,7 @@ typedef DPTR(GuidInfo) PTR_GuidInfo; // GenericsDictInfo is stored at negative offset of the dictionary struct GenericsDictInfo { -#ifdef BIT64 +#ifdef HOST_64BIT DWORD m_dwPadding; // Just to keep the size a multiple of 8 #endif @@ -354,7 +354,7 @@ struct MethodTableWriteableData // GC (like AD unload) Volatile m_dwLastVerifedGCCnt; -#ifdef BIT64 +#ifdef HOST_64BIT DWORD m_dwPadding; // Just to keep the size a multiple of 8 #endif diff --git a/src/coreclr/src/vm/methodtablebuilder.cpp b/src/coreclr/src/vm/methodtablebuilder.cpp index 315ac363ff4e3..cf6000afae9a5 100644 --- a/src/coreclr/src/vm/methodtablebuilder.cpp +++ b/src/coreclr/src/vm/methodtablebuilder.cpp @@ -1162,7 +1162,7 @@ BOOL MethodTableBuilder::CheckIfSIMDAndUpdateSize() { STANDARD_VM_CONTRACT; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) if (!bmtProp->fIsIntrinsicType) return false; @@ -1206,7 +1206,7 @@ BOOL MethodTableBuilder::CheckIfSIMDAndUpdateSize() } } #endif // !CROSSGEN_COMPILE -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) return false; } @@ -1498,7 +1498,7 @@ MethodTableBuilder::BuildMethodTableThrowing( } } -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) if (bmtProp->fIsIntrinsicType && !bmtGenerics->HasInstantiation()) { LPCUTF8 className; @@ -1510,7 +1510,7 @@ MethodTableBuilder::BuildMethodTableThrowing( IfFailThrow(GetMDImport()->GetNameOfTypeDef(bmtInternal->pType->GetEnclosingTypeToken(), NULL, &nameSpace)); } -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) // All the funtions in System.Runtime.Intrinsics.Arm are hardware intrinsics. if (hr == S_OK && strcmp(nameSpace, "System.Runtime.Intrinsics.Arm") == 0) #else @@ -1519,10 +1519,10 @@ MethodTableBuilder::BuildMethodTableThrowing( #endif { #if defined(CROSSGEN_COMPILE) -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) if ((!IsNgenPDBCompilationProcess() && GetAppDomain()->ToCompilationDomain()->GetTargetModule() != g_pObjectClass->GetModule())) -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) { // Disable AOT compiling for managed implementation of hardware intrinsics. // We specially treat them here to ensure correct ISA features are set during compilation @@ -5953,9 +5953,9 @@ MethodTableBuilder::InitMethodDesc( pNewNMD->GetNDirectImportThunkGlue()->Init(pNewNMD); #endif // !HAS_NDIRECT_IMPORT_PRECODE -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) pNewNMD->ndirect.m_cbStackArgumentSize = 0xFFFF; -#endif // defined(_TARGET_X86_) +#endif // defined(TARGET_X86) // If the RVA of a native method is set, this is an early-bound IJW call if (RVA != 0 && IsMiUnmanaged(dwImplFlags) && IsMiNative(dwImplFlags)) @@ -8106,17 +8106,17 @@ VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCach // value classes could have GC pointers in them, which need to be pointer-size aligned // so do this if it has not been done already -#if !defined(_TARGET_64BIT_) && (DATA_ALIGNMENT > 4) +#if !defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4) dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, (pByValueMT->GetNumInstanceFieldBytes() >= DATA_ALIGNMENT) ? DATA_ALIGNMENT : TARGET_POINTER_SIZE); -#else // !(!defined(_TARGET_64BIT_) && (DATA_ALIGNMENT > 4)) +#else // !(!defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4)) #ifdef FEATURE_64BIT_ALIGNMENT if (pByValueMT->RequiresAlign8()) dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, 8); else #endif // FEATURE_64BIT_ALIGNMENT dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, TARGET_POINTER_SIZE); -#endif // !(!defined(_TARGET_64BIT_) && (DATA_ALIGNMENT > 4)) +#endif // !(!defined(TARGET_64BIT) && (DATA_ALIGNMENT > 4)) pFieldDescList[i].SetOffset(dwCumulativeInstanceFieldPos - dwOffsetBias); dwCumulativeInstanceFieldPos += pByValueMT->GetAlignedNumInstanceFieldBytes(); @@ -9617,7 +9617,7 @@ void MethodTableBuilder::CheckForSystemTypes() } else if (strcmp(name, g_Vector128Name) == 0) { - #ifdef _TARGET_ARM_ + #ifdef TARGET_ARM // The Procedure Call Standard for ARM defaults to 8-byte alignment for __m128 pLayout->m_LargestAlignmentRequirementOfAllMembers = 8; @@ -9625,17 +9625,17 @@ void MethodTableBuilder::CheckForSystemTypes() #else pLayout->m_LargestAlignmentRequirementOfAllMembers = 16; // sizeof(__m128) pLayout->m_ManagedLargestAlignmentRequirementOfAllMembers = 16; // sizeof(__m128) - #endif // _TARGET_ARM_ + #endif // TARGET_ARM } else if (strcmp(name, g_Vector256Name) == 0) { - #ifdef _TARGET_ARM_ + #ifdef TARGET_ARM // No such type exists for the Procedure Call Standard for ARM. We will default // to the same alignment as __m128, which is supported by the ABI. pLayout->m_LargestAlignmentRequirementOfAllMembers = 8; pLayout->m_ManagedLargestAlignmentRequirementOfAllMembers = 8; - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) // The Procedure Call Standard for ARM 64-bit (with SVE support) defaults to // 16-byte alignment for __m256. @@ -9644,7 +9644,7 @@ void MethodTableBuilder::CheckForSystemTypes() #else pLayout->m_LargestAlignmentRequirementOfAllMembers = 32; // sizeof(__m256) pLayout->m_ManagedLargestAlignmentRequirementOfAllMembers = 32; // sizeof(__m256) - #endif // _TARGET_ARM_ elif _TARGET_ARM64_ + #endif // TARGET_ARM elif TARGET_ARM64 } else { @@ -9663,7 +9663,7 @@ void MethodTableBuilder::CheckForSystemTypes() _ASSERTE(g_pByReferenceClass != NULL); _ASSERTE(g_pByReferenceClass->IsByRefLike()); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (GetCl() == g_pByReferenceClass->GetCl()) { // x86 by default treats the type of ByReference as the actual type of its IntPtr field, see calls to @@ -9712,7 +9712,7 @@ void MethodTableBuilder::CheckForSystemTypes() pMT->SetInternalCorElementType(type); pMT->SetIsTruePrimitive(); -#if defined(_TARGET_X86_) && defined(UNIX_X86_ABI) +#if defined(TARGET_X86) && defined(UNIX_X86_ABI) switch (type) { // The System V ABI for i386 defines different packing for these types. @@ -9730,7 +9730,7 @@ void MethodTableBuilder::CheckForSystemTypes() default: break; } -#endif // _TARGET_X86_ && UNIX_X86_ABI +#endif // TARGET_X86 && UNIX_X86_ABI #ifdef _DEBUG if (FAILED(GetMDImport()->GetNameOfTypeDef(GetCl(), &name, &nameSpace))) @@ -9744,7 +9744,7 @@ void MethodTableBuilder::CheckForSystemTypes() { pMT->SetIsNullable(); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 else if (strcmp(name, g_ByReferenceName) == 0) { // x86 by default treats the type of ByReference as the actual type of its IntPtr field, see calls to @@ -9755,7 +9755,7 @@ void MethodTableBuilder::CheckForSystemTypes() pMT->SetInternalCorElementType(ELEMENT_TYPE_VALUETYPE); } #endif -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 else if (strcmp(name, g_RuntimeArgumentHandleName) == 0) { pMT->SetInternalCorElementType (ELEMENT_TYPE_I); @@ -10478,7 +10478,7 @@ MethodTableBuilder::SetupMethodTable2( } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // JIT64 is not aware of normalized value types and this // optimization (return small value types by value in registers) // is already done in JIT64. @@ -12047,7 +12047,7 @@ BOOL HasLayoutMetadata(Assembly* pAssembly, IMDInternalImport* pInternalImport, } else if (IsTdAutoClass(clFlags)) { -#ifdef PLATFORM_WINDOWS +#ifdef TARGET_WINDOWS *pNLTType = nltUnicode; #else *pNLTType = nltAnsi; // We don't have a utf8 charset in metadata yet, but ANSI == UTF-8 off-Windows diff --git a/src/coreclr/src/vm/mlinfo.cpp b/src/coreclr/src/vm/mlinfo.cpp index de086738a28f2..cb4b7f3a9a3a2 100644 --- a/src/coreclr/src/vm/mlinfo.cpp +++ b/src/coreclr/src/vm/mlinfo.cpp @@ -1632,7 +1632,7 @@ MarshalInfo::MarshalInfo(Module* pModule, // System primitive types (System.Int32, et.al.) will be marshaled as expected // because the mtype CorElementType is normalized (e.g. ELEMENT_TYPE_I4). -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // We however need to detect if such a normalization occurred for non-system // trivial value types, because we hold CorNativeType belonging to the original // "un-normalized" signature type. It has to be verified that all the value types @@ -1659,7 +1659,7 @@ MarshalInfo::MarshalInfo(Module* pModule, } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (nativeType == NATIVE_TYPE_CUSTOMMARSHALER) @@ -1904,7 +1904,7 @@ MarshalInfo::MarshalInfo(Module* pModule, m_resID = IDS_EE_BADMARSHAL_I; IfFailGoto(E_FAIL, lFail); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT m_type = MARSHAL_TYPE_GENERIC_8; #else m_type = MARSHAL_TYPE_GENERIC_4; @@ -1918,7 +1918,7 @@ MarshalInfo::MarshalInfo(Module* pModule, m_resID = IDS_EE_BADMARSHAL_I; IfFailGoto(E_FAIL, lFail); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT m_type = MARSHAL_TYPE_GENERIC_8; #else m_type = MARSHAL_TYPE_GENERIC_4; @@ -1954,7 +1954,7 @@ MarshalInfo::MarshalInfo(Module* pModule, m_resID = IDS_EE_BADMARSHAL_PTR; IfFailGoto(E_FAIL, lFail); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT m_type = MARSHAL_TYPE_GENERIC_8; #else m_type = MARSHAL_TYPE_GENERIC_4; @@ -1975,7 +1975,7 @@ MarshalInfo::MarshalInfo(Module* pModule, m_resID = IDS_EE_BADMARSHAL_FNPTR; IfFailGoto(E_FAIL, lFail); } -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT m_type = MARSHAL_TYPE_GENERIC_8; #else m_type = MARSHAL_TYPE_GENERIC_4; @@ -2851,7 +2851,7 @@ MarshalInfo::MarshalInfo(Module* pModule, m_type = MARSHAL_TYPE_BLITTABLEVALUECLASSWITHCOPYCTOR; } else -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // JIT64 is not aware of normalized value types and this optimization // (returning small value types by value in registers) is already done in JIT64. if ( !m_byref // Permit register-sized structs as return values @@ -2867,7 +2867,7 @@ MarshalInfo::MarshalInfo(Module* pModule, m_args.m_pMT = m_pMT; } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { m_args.m_pMT = m_pMT; m_type = MARSHAL_TYPE_BLITTABLEVALUECLASS; diff --git a/src/coreclr/src/vm/mlinfo.h b/src/coreclr/src/vm/mlinfo.h index 773bbbc8fd9dc..6b1e3d52ac772 100644 --- a/src/coreclr/src/vm/mlinfo.h +++ b/src/coreclr/src/vm/mlinfo.h @@ -825,7 +825,7 @@ class ArrayMarshalInfo { // for the purpose of marshaling, we don't care about the inner // type - we just marshal pointer-sized values -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return VT_I8; #else return VT_I4; @@ -893,7 +893,7 @@ class ArrayMarshalInfo { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return 8; #else return 4; diff --git a/src/coreclr/src/vm/mscorlib.h b/src/coreclr/src/vm/mscorlib.h index d9bb8c92f665c..397f8b86b61a7 100644 --- a/src/coreclr/src/vm/mscorlib.h +++ b/src/coreclr/src/vm/mscorlib.h @@ -754,7 +754,7 @@ DEFINE_FIELD(RAW_DATA, DATA, Data) DEFINE_CLASS(RAW_ARRAY_DATA, CompilerServices, RawArrayData) DEFINE_FIELD(RAW_ARRAY_DATA, LENGTH, Length) -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT DEFINE_FIELD(RAW_ARRAY_DATA, PADDING, Padding) #endif DEFINE_FIELD(RAW_ARRAY_DATA, DATA, Data) @@ -1020,9 +1020,9 @@ DEFINE_METHOD(STUBHELPERS, VALIDATE_OBJECT, Validate DEFINE_METHOD(STUBHELPERS, VALIDATE_BYREF, ValidateByref, SM_IntPtr_IntPtr_Obj_RetVoid) DEFINE_METHOD(STUBHELPERS, GET_STUB_CONTEXT, GetStubContext, SM_RetIntPtr) DEFINE_METHOD(STUBHELPERS, LOG_PINNED_ARGUMENT, LogPinnedArgument, SM_IntPtr_IntPtr_RetVoid) -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT DEFINE_METHOD(STUBHELPERS, GET_STUB_CONTEXT_ADDR, GetStubContextAddr, SM_RetIntPtr) -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT DEFINE_METHOD(STUBHELPERS, SAFE_HANDLE_ADD_REF, SafeHandleAddRef, SM_SafeHandle_RefBool_RetIntPtr) DEFINE_METHOD(STUBHELPERS, SAFE_HANDLE_RELEASE, SafeHandleRelease, SM_SafeHandle_RetVoid) diff --git a/src/coreclr/src/vm/multicorejit.cpp b/src/coreclr/src/vm/multicorejit.cpp index ee86fc4888d79..ebafea7d6b153 100644 --- a/src/coreclr/src/vm/multicorejit.cpp +++ b/src/coreclr/src/vm/multicorejit.cpp @@ -654,7 +654,7 @@ HRESULT MulticoreJitModuleEnumerator::EnumerateLoadedModules(AppDomain * pDomain // static: single instace within a process -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX TP_TIMER * MulticoreJitRecorder::s_delayedWriteTimer; // = NULL; // static @@ -683,7 +683,7 @@ MulticoreJitRecorder::WriteMulticoreJitProfiler(PTP_CALLBACK_INSTANCE pInstance, } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX void MulticoreJitRecorder::PreRecordFirstMethod() { @@ -705,7 +705,7 @@ void MulticoreJitRecorder::PreRecordFirstMethod() // Get the timeout in seconds. int profileWriteTimeout = (int)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MultiCoreJitProfileWriteDelay); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Using the same threadpool timer used by UsageLog to write out profile when running under Appx or CoreCLR. s_delayedWriteTimer = CreateThreadpoolTimer(WriteMulticoreJitProfiler, this, NULL); @@ -723,7 +723,7 @@ void MulticoreJitRecorder::PreRecordFirstMethod() // This function is safe to call SetThreadpoolTimer(s_delayedWriteTimer, &ftDueTime, 0, 2000 /* large 2000 ms window for executing this timer is acceptable as the timing here is very much not critical */); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } } diff --git a/src/coreclr/src/vm/multicorejitimpl.h b/src/coreclr/src/vm/multicorejitimpl.h index bce655690f1a0..26e6a3b78c3b2 100644 --- a/src/coreclr/src/vm/multicorejitimpl.h +++ b/src/coreclr/src/vm/multicorejitimpl.h @@ -350,9 +350,9 @@ class MulticoreJitRecorder bool m_fAborted; bool m_fAppxMode; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static TP_TIMER * s_delayedWriteTimer; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX unsigned FindModule(Module * pModule); @@ -370,9 +370,9 @@ class MulticoreJitRecorder void PreRecordFirstMethod(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static void CALLBACK WriteMulticoreJitProfiler(PTP_CALLBACK_INSTANCE pInstance, PVOID pvContext, PTP_TIMER pTimer); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX public: @@ -395,7 +395,7 @@ class MulticoreJitRecorder m_stats.Clear(); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static bool CloseTimer() { LIMITED_METHOD_CONTRACT; @@ -418,7 +418,7 @@ class MulticoreJitRecorder CloseTimer(); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX bool IsAtFullCapacity() const { diff --git a/src/coreclr/src/vm/nativeformatreader.h b/src/coreclr/src/vm/nativeformatreader.h index 092ddd181bd78..397a27b7cc2a3 100644 --- a/src/coreclr/src/vm/nativeformatreader.h +++ b/src/coreclr/src/vm/nativeformatreader.h @@ -12,22 +12,22 @@ #ifndef DACCESS_COMPILE -#if defined(_AMD64_) || defined(_X86_) +#if defined(HOST_AMD64) || defined(HOST_X86) #include "emmintrin.h" #define USE_INTEL_INTRINSICS_FOR_CUCKOO_FILTER -#elif defined(_ARM_) || defined(_ARM64_) +#elif defined(HOST_ARM) || defined(HOST_ARM64) -#ifndef FEATURE_PAL // The Mac and Linux build environments are not setup for NEON simd. +#ifndef TARGET_UNIX // The Mac and Linux build environments are not setup for NEON simd. #define USE_ARM_INTRINSICS_FOR_CUCKOO_FILTER -#if defined(_ARM_) +#if defined(HOST_ARM) #include "arm_neon.h" #else #include "arm64_neon.h" #endif -#endif // FEATURE_PAL +#endif // TARGET_UNIX -#endif // _ARM_ || _ARM64_ +#endif // HOST_ARM || HOST_ARM64 #endif // DACCESS_COMPILE diff --git a/src/coreclr/src/vm/nativeoverlapped.cpp b/src/coreclr/src/vm/nativeoverlapped.cpp index f24a98530e118..548b1a5a6f1dc 100644 --- a/src/coreclr/src/vm/nativeoverlapped.cpp +++ b/src/coreclr/src/vm/nativeoverlapped.cpp @@ -30,7 +30,7 @@ FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode, { FCALL_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX Thread *pThread = GetThread(); size_t key=0; @@ -82,9 +82,9 @@ FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode, if (*lpOverlapped != NULL && ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, ThreadPoolIODequeue)) FireEtwThreadPoolIODequeue(*lpOverlapped, OverlappedDataObject::GetOverlapped(*lpOverlapped), GetClrInstanceId()); -#else // !FEATURE_PAL +#else // !TARGET_UNIX *lpOverlapped = NULL; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return; } diff --git a/src/coreclr/src/vm/object.cpp b/src/coreclr/src/vm/object.cpp index cd26fedeb0092..d2e0cb9a14cae 100644 --- a/src/coreclr/src/vm/object.cpp +++ b/src/coreclr/src/vm/object.cpp @@ -427,7 +427,7 @@ void STDCALL CopyValueClassArgUnchecked(ArgDestination *argDest, void* src, Meth return; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (argDest->IsHFA()) { @@ -753,7 +753,7 @@ STRINGREF StringObject::NewString(const WCHAR *pwsz) } } -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif @@ -791,7 +791,7 @@ STRINGREF StringObject::NewString(const WCHAR *pwsz, int length) { } } -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // Go back to command line default optimizations #endif diff --git a/src/coreclr/src/vm/object.h b/src/coreclr/src/vm/object.h index 05a9c40763f37..a7d68c1079657 100644 --- a/src/coreclr/src/vm/object.h +++ b/src/coreclr/src/vm/object.h @@ -87,7 +87,7 @@ class ArgDestination; struct RCW; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define OBJHEADER_SIZE (sizeof(DWORD) /* m_alignpad */ + sizeof(DWORD) /* m_SyncBlockValue */) #else #define OBJHEADER_SIZE sizeof(DWORD) /* m_SyncBlockValue */ @@ -96,7 +96,7 @@ struct RCW; #define OBJECT_SIZE TARGET_POINTER_SIZE /* m_pMethTab */ #define OBJECT_BASESIZE (OBJHEADER_SIZE + OBJECT_SIZE) -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define ARRAYBASE_SIZE (OBJECT_SIZE /* m_pMethTab */ + sizeof(DWORD) /* m_NumComponents */ + sizeof(DWORD) /* pad */) #else #define ARRAYBASE_SIZE (OBJECT_SIZE /* m_pMethTab */ + sizeof(DWORD) /* m_NumComponents */) @@ -560,9 +560,9 @@ class ArrayBase : public Object // Object::GetSize() looks at m_NumComponents even though it may not be an array (the // values is shifted out if not an array, so it's ok). DWORD m_NumComponents; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT DWORD pad; -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT SVAL_DECL(INT32, s_arrayBoundsZero); // = 0 @@ -1537,9 +1537,9 @@ NOINLINE AssemblyBaseObject* GetRuntimeAssemblyHelper(LPVOID __me, DomainAssembl // AssemblyLoadContextBaseObject // This class is the base class for AssemblyLoadContext // -#if defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if defined(TARGET_X86) && !defined(TARGET_UNIX) #include "pshpack4.h" -#endif // defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#endif // defined(TARGET_X86) && !defined(TARGET_UNIX) class AssemblyLoadContextBaseObject : public Object { friend class MscorlibBinder; @@ -1548,7 +1548,7 @@ class AssemblyLoadContextBaseObject : public Object // READ ME: // Modifying the order or fields of this object may require other changes to the // classlib class definition of this object. -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT OBJECTREF _unloadLock; OBJECTREF _resovlingUnmanagedDll; OBJECTREF _resolving; @@ -1558,7 +1558,7 @@ class AssemblyLoadContextBaseObject : public Object int64_t _id; // On 64-bit platforms this is a value type so it is placed after references and pointers DWORD _state; CLR_BOOL _isCollectible; -#else // _TARGET_64BIT_ +#else // TARGET_64BIT int64_t _id; // On 32-bit platforms this 64-bit value type is larger than a pointer so JIT places it first OBJECTREF _unloadLock; OBJECTREF _resovlingUnmanagedDll; @@ -1568,7 +1568,7 @@ class AssemblyLoadContextBaseObject : public Object INT_PTR _nativeAssemblyLoadContext; DWORD _state; CLR_BOOL _isCollectible; -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT protected: AssemblyLoadContextBaseObject() { LIMITED_METHOD_CONTRACT; } @@ -1577,9 +1577,9 @@ class AssemblyLoadContextBaseObject : public Object public: INT_PTR GetNativeAssemblyLoadContext() { LIMITED_METHOD_CONTRACT; return _nativeAssemblyLoadContext; } }; -#if defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if defined(TARGET_X86) && !defined(TARGET_UNIX) #include "poppack.h" -#endif // defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#endif // defined(TARGET_X86) && !defined(TARGET_UNIX) // AssemblyNameBaseObject // This class is the base class for assembly names diff --git a/src/coreclr/src/vm/olevariant.cpp b/src/coreclr/src/vm/olevariant.cpp index 975a3e43472af..0a8268a6d18e1 100644 --- a/src/coreclr/src/vm/olevariant.cpp +++ b/src/coreclr/src/vm/olevariant.cpp @@ -317,7 +317,7 @@ VARTYPE OleVariant::GetVarTypeForTypeHandle(TypeHandle type) if(MscorlibBinder::IsClass(pMT, CLASS__DECIMAL)) return VT_DECIMAL; -#ifdef BIT64 +#ifdef HOST_64BIT if (MscorlibBinder::IsClass(pMT, CLASS__INTPTR)) return VT_I8; if (MscorlibBinder::IsClass(pMT, CLASS__UINTPTR)) @@ -3537,11 +3537,11 @@ void OleVariant::MarshalComVariantForOleVariant(VARIANT *pOle, VariantData *pCom if (V_ISBYREF(pOle)) { // Must set ObjectRef field of Variant to a specific instance. -#ifdef BIT64 +#ifdef HOST_64BIT VariantData::NewVariant(pCom, CV_U8, (INT64)(size_t)V_BYREF(pOle)); -#else // BIT64 +#else // HOST_64BIT VariantData::NewVariant(pCom, CV_U4, (INT32)(size_t)V_BYREF(pOle)); -#endif // BIT64 +#endif // HOST_64BIT } else { diff --git a/src/coreclr/src/vm/olevariant.h b/src/coreclr/src/vm/olevariant.h index f869f6eccb7d8..2cca65fadf8cb 100644 --- a/src/coreclr/src/vm/olevariant.h +++ b/src/coreclr/src/vm/olevariant.h @@ -327,7 +327,7 @@ struct VariantData private: // Typeloader reorders fields of non-blitable types. This reordering differs between 32-bit and 64-bit platforms. -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT Object* m_objref; INT64 m_data; INT32 m_flags; diff --git a/src/coreclr/src/vm/pefile.cpp b/src/coreclr/src/vm/pefile.cpp index 2ff6fb4106ecd..6928cc6f98c47 100644 --- a/src/coreclr/src/vm/pefile.cpp +++ b/src/coreclr/src/vm/pefile.cpp @@ -187,7 +187,7 @@ static void ValidatePEFileMachineType(PEFile *peFile) if (actualMachineType != IMAGE_FILE_MACHINE_NATIVE && actualMachineType != IMAGE_FILE_MACHINE_NATIVE_NI) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // v4.0 64-bit compatibility workaround. The 64-bit v4.0 CLR's Reflection.Load(byte[]) api does not detect cpu-matches. We should consider fixing that in // the next SxS release. In the meantime, this bypass will retain compat for 64-bit v4.0 CLR for target platforms that existed at the time. // @@ -238,7 +238,7 @@ void PEFile::LoadLibrary(BOOL allowNativeSkip/*=TRUE*/) // if allowNativeSkip==F RETURN; } -#if !defined(_TARGET_64BIT_) +#if !defined(TARGET_64BIT) if (!HasNativeImage() && !GetILimage()->Has32BitNTHeaders()) { // Tried to load 64-bit assembly on 32-bit platform. @@ -296,13 +296,13 @@ void PEFile::LoadLibrary(BOOL allowNativeSkip/*=TRUE*/) // if allowNativeSkip==F { if (GetILimage()->IsFile()) { -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX if (GetILimage()->IsILOnly()) { GetILimage()->Load(); } else -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX { GetILimage()->LoadFromMapped(); } @@ -922,7 +922,7 @@ void PEFile::SetNativeImage(PEImage *image) m_nativeImage->AddRef(); m_nativeImage->Load(); -#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE) +#if defined(TARGET_AMD64) && !defined(CROSSGEN_COMPILE) static ConfigDWORD configNGenReserveForJumpStubs; int percentReserveForJumpStubs = configNGenReserveForJumpStubs.val(CLRConfig::INTERNAL_NGenReserveForJumpStubs); if (percentReserveForJumpStubs != 0) @@ -2164,7 +2164,7 @@ void PEAssembly::PathToUrl(SString &string) SString::Iterator i = string.Begin(); -#if !defined(PLATFORM_UNIX) +#if !defined(TARGET_UNIX) if (i[0] == W('\\')) { // Network path @@ -2203,7 +2203,7 @@ void PEAssembly::UrlToPath(SString &string) SString::Iterator i = string.Begin(); SString sss2(SString::Literal, W("file://")); -#if !defined(PLATFORM_UNIX) +#if !defined(TARGET_UNIX) SString sss3(SString::Literal, W("file:///")); if (string.MatchCaseInsensitive(i, sss3)) string.Delete(i, 8); @@ -2222,7 +2222,7 @@ void PEAssembly::UrlToPath(SString &string) BOOL PEAssembly::FindLastPathSeparator(const SString &path, SString::Iterator &i) { -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX SString::Iterator slash = i; SString::Iterator backSlash = i; BOOL foundSlash = path.FindBack(slash, '/'); @@ -2238,7 +2238,7 @@ BOOL PEAssembly::FindLastPathSeparator(const SString &path, SString::Iterator &i return TRUE; #else return path.FindBack(i, '\\'); -#endif //PLATFORM_UNIX +#endif //TARGET_UNIX } diff --git a/src/coreclr/src/vm/peimage.cpp b/src/coreclr/src/vm/peimage.cpp index 9879443e93844..cc809b20ba805 100644 --- a/src/coreclr/src/vm/peimage.cpp +++ b/src/coreclr/src/vm/peimage.cpp @@ -348,7 +348,7 @@ BOOL PEImage::PathEquals(const SString &p1, const SString &p2) #endif } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX /* static */ void PEImage::GetPathFromDll(HINSTANCE hMod, SString &result) { @@ -367,7 +367,7 @@ void PEImage::GetPathFromDll(HINSTANCE hMod, SString &result) WszGetModuleFileName(hMod, result); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX /* static */ BOOL PEImage::CompareImage(UPTR u1, UPTR u2) @@ -996,12 +996,12 @@ PTR_PEImageLayout PEImage::GetLayoutInternal(DWORD imageLayoutMask,DWORD flags) BOOL bIsMappedLayoutSuitable = ((imageLayoutMask & PEImageLayout::LAYOUT_MAPPED) != 0); BOOL bIsFlatLayoutSuitable = ((imageLayoutMask & PEImageLayout::LAYOUT_FLAT) != 0); -#if !defined(PLATFORM_UNIX) +#if !defined(TARGET_UNIX) if (bIsMappedLayoutSuitable) { bIsFlatLayoutSuitable = FALSE; } -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX _ASSERTE(bIsMappedLayoutSuitable || bIsFlatLayoutSuitable); @@ -1153,7 +1153,7 @@ PTR_PEImage PEImage::LoadFlat(const void *flat, COUNT_T size) RETURN dac_cast(pImage.Extract()); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX /* static */ PTR_PEImage PEImage::LoadImage(HMODULE hMod) { @@ -1184,7 +1184,7 @@ PTR_PEImage PEImage::LoadImage(HMODULE hMod) RETURN dac_cast(pImage.Extract()); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX void PEImage::Load() { @@ -1206,7 +1206,7 @@ void PEImage::Load() return; } -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX if (m_pLayouts[IMAGE_FLAT] != NULL && m_pLayouts[IMAGE_FLAT]->CheckILOnlyFormat() && !m_pLayouts[IMAGE_FLAT]->HasWriteableSections()) @@ -1224,7 +1224,7 @@ void PEImage::Load() SetLayout(IMAGE_LOADED, m_pLayouts[IMAGE_FLAT]); } else -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX { if(!IsFile()) { diff --git a/src/coreclr/src/vm/peimage.h b/src/coreclr/src/vm/peimage.h index 57cafe4df9e5a..ad7a30d6c002d 100644 --- a/src/coreclr/src/vm/peimage.h +++ b/src/coreclr/src/vm/peimage.h @@ -96,10 +96,10 @@ class PEImage static PTR_PEImage LoadFlat( const void *flat, COUNT_T size); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static PTR_PEImage LoadImage( HMODULE hMod); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX static PTR_PEImage OpenImage( LPCWSTR pPath, MDInternalImportFlags flags = MDInternalImport_Default); @@ -176,9 +176,9 @@ class PEImage static CHECK CheckStartup(); PTR_CVOID GetMetadata(COUNT_T *pSize = NULL); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static void GetPathFromDll(HINSTANCE hMod, SString &result); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX static BOOL PathEquals(const SString &p1, const SString &p2); BOOL IsTrustedNativeImage(){LIMITED_METHOD_CONTRACT; return m_bIsTrustedNativeImage;}; void SetIsTrustedNativeImage(){LIMITED_METHOD_CONTRACT; m_bIsTrustedNativeImage=TRUE;}; diff --git a/src/coreclr/src/vm/peimagelayout.cpp b/src/coreclr/src/vm/peimagelayout.cpp index 78fc62c42f7fa..94f73419a40e9 100644 --- a/src/coreclr/src/vm/peimagelayout.cpp +++ b/src/coreclr/src/vm/peimagelayout.cpp @@ -10,7 +10,7 @@ #include "peimagelayout.inl" #include "dataimage.h" -#if defined(PLATFORM_WINDOWS) && !defined(CROSSGEN_COMPILE) +#if defined(TARGET_WINDOWS) && !defined(CROSSGEN_COMPILE) #include "amsi.h" #endif @@ -44,7 +44,7 @@ PEImageLayout* PEImageLayout::Load(PEImage* pOwner, BOOL bNTSafeLoad, BOOL bThro { STANDARD_VM_CONTRACT; -#if defined(CROSSGEN_COMPILE) || defined(FEATURE_PAL) +#if defined(CROSSGEN_COMPILE) || defined(TARGET_UNIX) return PEImageLayout::Map(pOwner); #else PEImageLayoutHolder pAlloc(new LoadedImageLayout(pOwner,bNTSafeLoad,bThrowOnError)); @@ -89,7 +89,7 @@ PEImageLayout* PEImageLayout::Map(PEImage* pOwner) RETURN pAlloc.Extract(); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX DWORD SectionCharacteristicsToPageProtection(UINT characteristics) { _ASSERTE((characteristics & VAL32(IMAGE_SCN_MEM_READ)) != 0); @@ -120,7 +120,7 @@ DWORD SectionCharacteristicsToPageProtection(UINT characteristics) return pageProtection; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX //To force base relocation on Vista (which uses ASLR), unmask IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE //(0x40) for OptionalHeader.DllCharacteristics @@ -205,20 +205,20 @@ void PEImageLayout::ApplyBaseRelocations() if (((pSection->Characteristics & VAL32(IMAGE_SCN_MEM_WRITE)) == 0)) { DWORD dwNewProtection = PAGE_READWRITE; -#if defined(FEATURE_PAL) && !defined(CROSSGEN_COMPILE) +#if defined(TARGET_UNIX) && !defined(CROSSGEN_COMPILE) if (((pSection->Characteristics & VAL32(IMAGE_SCN_MEM_EXECUTE)) != 0)) { // On SELinux, we cannot change protection that doesn't have execute access rights // to one that has it, so we need to set the protection to RWX instead of RW dwNewProtection = PAGE_EXECUTE_READWRITE; } -#endif // FEATURE_PAL && !CROSSGEN_COMPILE +#endif // TARGET_UNIX && !CROSSGEN_COMPILE if (!ClrVirtualProtect(pWriteableRegion, cbWriteableRegion, dwNewProtection, &dwOldProtection)) ThrowLastError(); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX dwOldProtection = SectionCharacteristicsToPageProtection(pSection->Characteristics); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } } @@ -236,7 +236,7 @@ void PEImageLayout::ApplyBaseRelocations() pEndAddressToFlush = max(pEndAddressToFlush, address + sizeof(TADDR)); break; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM case IMAGE_REL_BASED_THUMB_MOV32: PutThumb2Mov32((UINT16 *)address, GetThumb2Mov32((UINT16 *)address) + (INT32)delta); pEndAddressToFlush = max(pEndAddressToFlush, address + 8); @@ -285,9 +285,9 @@ void PEImageLayout::ApplyBaseRelocations() dwOldProtection, &dwOldProtection)) ThrowLastError(); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_LOADMarkSectionAsNotNeeded((void*)dir); -#endif // FEATURE_PAL +#endif // TARGET_UNIX #endif // CROSSGEN_COMPILE if (pFlushRegion != NULL) @@ -313,7 +313,7 @@ RawImageLayout::RawImageLayout(const void *flat, COUNT_T size, PEImage* pOwner) if (size) { -#if defined(PLATFORM_WINDOWS) && !defined(CROSSGEN_COMPILE) +#if defined(TARGET_WINDOWS) && !defined(CROSSGEN_COMPILE) if (Amsi::IsBlockedByAmsiScan((void*)flat, size)) { // This is required to throw a BadImageFormatException for compatibility, but @@ -322,7 +322,7 @@ RawImageLayout::RawImageLayout(const void *flat, COUNT_T size, PEImage* pOwner) GetHRMsg(HRESULT_FROM_WIN32(ERROR_VIRUS_INFECTED), virusHrString); ThrowHR(COR_E_BADIMAGEFORMAT, virusHrString); } -#endif // defined(PLATFORM_WINDOWS) && !defined(CROSSGEN_COMPILE) +#endif // defined(TARGET_WINDOWS) && !defined(CROSSGEN_COMPILE) HandleHolder mapping(WszCreateFileMapping(INVALID_HANDLE_VALUE, NULL, @@ -356,14 +356,14 @@ RawImageLayout::RawImageLayout(const void *mapped, PEImage* pOwner, BOOL bTakeOw if (bTakeOwnership) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX PathString wszDllName; WszGetModuleFileName((HMODULE)mapped, wszDllName); m_LibraryHolder=CLRLoadLibraryEx(wszDllName,NULL,GetLoadWithAlteredSearchPathFlag()); -#else // !FEATURE_PAL - _ASSERTE(!"bTakeOwnership Should not be used on FEATURE_PAL"); -#endif // !FEATURE_PAL +#else // !TARGET_UNIX + _ASSERTE(!"bTakeOwnership Should not be used on TARGET_UNIX"); +#endif // !TARGET_UNIX } IfFailThrow(Init((void*)mapped,(bool)(bFixedUp!=FALSE))); @@ -426,7 +426,7 @@ MappedImageLayout::MappedImageLayout(PEImage* pOwner) // If mapping was requested, try to do SEC_IMAGE mapping LOG((LF_LOADER, LL_INFO100, "PEImage: Opening OS mapped %S (hFile %p)\n", (LPCWSTR) GetPath(), hFile)); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Let OS map file for us @@ -520,7 +520,7 @@ MappedImageLayout::MappedImageLayout(PEImage* pOwner) } #endif // _DEBUG -#else //!FEATURE_PAL +#else //!TARGET_UNIX #ifndef CROSSGEN_COMPILE m_LoadedFile = PAL_LOADLoadPEFile(hFile); @@ -556,10 +556,10 @@ MappedImageLayout::MappedImageLayout(PEImage* pOwner) m_LoadedFile = NULL; #endif // !CROSSGEN_COMPILE -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } -#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_PAL) +#if !defined(CROSSGEN_COMPILE) && !defined(TARGET_UNIX) LoadedImageLayout::LoadedImageLayout(PEImage* pOwner, BOOL bNTSafeLoad, BOOL bThrowOnError) { CONTRACTL @@ -591,7 +591,7 @@ LoadedImageLayout::LoadedImageLayout(PEImage* pOwner, BOOL bNTSafeLoad, BOOL bTh LOG((LF_LOADER, LL_INFO1000, "PEImage: Opened HMODULE %S\n", (LPCWSTR) GetPath())); } -#endif // !CROSSGEN_COMPILE && !FEATURE_PAL +#endif // !CROSSGEN_COMPILE && !TARGET_UNIX FlatImageLayout::FlatImageLayout(PEImage* pOwner) { diff --git a/src/coreclr/src/vm/peimagelayout.h b/src/coreclr/src/vm/peimagelayout.h index f59566afa516a..c8f9adfc9403f 100644 --- a/src/coreclr/src/vm/peimagelayout.h +++ b/src/coreclr/src/vm/peimagelayout.h @@ -90,9 +90,9 @@ class RawImageLayout: public PEImageLayout VPTR_VTABLE_CLASS(RawImageLayout,PEImageLayout) protected: CLRMapViewHolder m_DataCopy; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HModuleHolder m_LibraryHolder; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX public: RawImageLayout(const void *flat, COUNT_T size,PEImage* pOwner); @@ -117,7 +117,7 @@ class MappedImageLayout: public PEImageLayout VPTR_VTABLE_CLASS(MappedImageLayout,PEImageLayout) VPTR_UNIQUE(0x15) protected: -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HandleHolder m_FileMap; CLRMapViewHolder m_FileView; #else @@ -129,7 +129,7 @@ class MappedImageLayout: public PEImageLayout #endif }; -#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_PAL) +#if !defined(CROSSGEN_COMPILE) && !defined(TARGET_UNIX) class LoadedImageLayout: public PEImageLayout { VPTR_VTABLE_CLASS(LoadedImageLayout,PEImageLayout) @@ -152,7 +152,7 @@ class LoadedImageLayout: public PEImageLayout } #endif // !DACCESS_COMPILE }; -#endif // !CROSSGEN_COMPILE && !FEATURE_PAL +#endif // !CROSSGEN_COMPILE && !TARGET_UNIX class FlatImageLayout: public PEImageLayout { diff --git a/src/coreclr/src/vm/precode.cpp b/src/coreclr/src/vm/precode.cpp index 28e51ab3fae70..06573a542642d 100644 --- a/src/coreclr/src/vm/precode.cpp +++ b/src/coreclr/src/vm/precode.cpp @@ -545,14 +545,14 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk, // we will allocate a new exact type of precode in GetOrCreatePrecode. BOOL fForcedPrecode = hasMethodDescVersionableWithPrecode || pFirstMD->RequiresStableEntryPoint(count > 1); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM if (pFirstMD->RequiresMethodDescCallingConvention(count > 1) || count >= MethodDescChunk::GetCompactEntryPointMaxCount ()) { // We do not pass method desc on scratch register fForcedPrecode = TRUE; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM if (!fForcedPrecode && (totalSize > MethodDescChunk::SizeOfCompactEntryPoints(count))) return NULL; @@ -672,7 +672,7 @@ void Precode::Save(DataImage *image) _ASSERTE(GetType() != PRECODE_FIXUP); #endif -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // StubPrecode may have straddlers (relocations crossing pages) on x86 and x64. We need // to insert padding to eliminate it. To do that, we need to save these using custom ZapNode that can only // be implemented in dataimage.cpp or zapper due to factoring of the header files. @@ -688,7 +688,7 @@ void Precode::Save(DataImage *image) static_cast(SizeOf(t)), GetPrecodeItemKind(image, pMD, IsPrebound(image)), AlignOf(t)); -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 } void Precode::Fixup(DataImage *image, MethodDesc * pMD) @@ -697,14 +697,14 @@ void Precode::Fixup(DataImage *image, MethodDesc * pMD) PrecodeType precodeType = GetType(); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) #if defined(HAS_FIXUP_PRECODE) if (precodeType == PRECODE_FIXUP) { AsFixupPrecode()->Fixup(image, pMD); } #endif -#else // _TARGET_X86_ || _TARGET_AMD64_ +#else // TARGET_X86 || TARGET_AMD64 ZapNode * pCodeNode = NULL; if (IsPrebound(image)) @@ -730,7 +730,7 @@ void Precode::Fixup(DataImage *image, MethodDesc * pMD) UnexpectedPrecodeType("Precode::Save", precodeType); break; } -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 } BOOL Precode::IsPrebound(DataImage *image) diff --git a/src/coreclr/src/vm/precode.h b/src/coreclr/src/vm/precode.h index cef1307267023..0363700d095b9 100644 --- a/src/coreclr/src/vm/precode.h +++ b/src/coreclr/src/vm/precode.h @@ -113,24 +113,24 @@ class Precode { #ifdef OFFSETOF_PRECODE_TYPE BYTE type = m_data[OFFSETOF_PRECODE_TYPE]; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (type == X86_INSTR_MOV_RM_R) type = m_data[OFFSETOF_PRECODE_TYPE_MOV_RM_R]; -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (type == (X86_INSTR_MOV_R10_IMM64 & 0xFF)) type = m_data[OFFSETOF_PRECODE_TYPE_MOV_R10]; else if ((type == (X86_INSTR_CALL_REL32 & 0xFF)) || (type == (X86_INSTR_JMP_REL32 & 0xFF))) type = m_data[OFFSETOF_PRECODE_TYPE_CALL_OR_JMP]; #endif // _AMD64 -#if defined(HAS_FIXUP_PRECODE) && (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) +#if defined(HAS_FIXUP_PRECODE) && (defined(TARGET_X86) || defined(TARGET_AMD64)) if (type == FixupPrecode::TypePrestub) type = FixupPrecode::Type; #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM static_assert_no_msg(offsetof(StubPrecode, m_pTarget) == offsetof(NDirectImportPrecode, m_pMethodDesc)); // If the precode does not have thumb bit on target, it must be NDirectImportPrecode. if (type == StubPrecode::Type && ((AsStubPrecode()->m_pTarget & THUMB_CODE) == 0)) @@ -151,16 +151,16 @@ class Precode { SUPPORTS_DAC; unsigned int align = PRECODE_ALIGNMENT; -#if defined(_TARGET_X86_) && defined(HAS_FIXUP_PRECODE) +#if defined(TARGET_X86) && defined(HAS_FIXUP_PRECODE) // Fixup precodes has to be aligned to allow atomic patching if (t == PRECODE_FIXUP) align = 8; -#endif // _TARGET_X86_ && HAS_FIXUP_PRECODE +#endif // TARGET_X86 && HAS_FIXUP_PRECODE -#if defined(_TARGET_ARM_) && defined(HAS_COMPACT_ENTRYPOINTS) +#if defined(TARGET_ARM) && defined(HAS_COMPACT_ENTRYPOINTS) // Precodes have to be aligned to allow fast compact entry points check _ASSERTE (align >= sizeof(void*)); -#endif // _TARGET_ARM_ && HAS_COMPACT_ENTRYPOINTS +#endif // TARGET_ARM && HAS_COMPACT_ENTRYPOINTS return align; } @@ -189,14 +189,14 @@ class Precode { if (target == addr) return TRUE; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Handle jump stubs if (isJumpRel64(target)) { target = decodeJump64(target); if (target == addr) return TRUE; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 return FALSE; #endif // CROSSGEN_COMPILE @@ -227,7 +227,7 @@ class Precode { static SIZE_T GetEntryPointOffset() { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM return THUMB_CODE; #else return 0; diff --git a/src/coreclr/src/vm/prestub.cpp b/src/coreclr/src/vm/prestub.cpp index ca7e73d515c3f..41ccc22e6d56b 100644 --- a/src/coreclr/src/vm/prestub.cpp +++ b/src/coreclr/src/vm/prestub.cpp @@ -320,7 +320,7 @@ PCODE MethodDesc::PrepareInitialCode() PrepareCodeConfig config(NativeCodeVersion(this), TRUE, TRUE); PCODE pCode = PrepareCode(&config); -#if defined(FEATURE_GDBJIT) && defined(FEATURE_PAL) && !defined(CROSSGEN_COMPILE) +#if defined(FEATURE_GDBJIT) && defined(TARGET_UNIX) && !defined(CROSSGEN_COMPILE) NotifyGdb::MethodPrepared(this); #endif @@ -1395,10 +1395,10 @@ void CreateInstantiatingILStubTargetSig(MethodDesc *pBaseMD, SigPointer pReturn = msig.GetReturnProps(); pReturn.ConvertToInternalExactlyOne(msig.GetModule(), &typeContext, stubSigBuilder); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // The hidden context parameter stubSigBuilder->AppendElementType(ELEMENT_TYPE_I); -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 // Copy rest of the arguments msig.NextArg(); @@ -1408,10 +1408,10 @@ void CreateInstantiatingILStubTargetSig(MethodDesc *pBaseMD, pArgs.ConvertToInternalExactlyOne(msig.GetModule(), &typeContext, stubSigBuilder); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // The hidden context parameter stubSigBuilder->AppendElementType(ELEMENT_TYPE_I); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } Stub * CreateUnboxingILStubForSharedGenericValueTypeMethods(MethodDesc* pTargetMD) @@ -1455,7 +1455,7 @@ Stub * CreateUnboxingILStubForSharedGenericValueTypeMethods(MethodDesc* pTargetM pCode->EmitLoadThis(); pCode->EmitLDFLDA(tokRawData); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // 2.2 Push the rest of the arguments for x86 for (unsigned i = 0; i < msig.NumFixedArgs();i++) { @@ -1471,7 +1471,7 @@ Stub * CreateUnboxingILStubForSharedGenericValueTypeMethods(MethodDesc* pTargetM pCode->EmitSUB(); pCode->EmitLDIND_I(); -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) // 2.4 Push the rest of the arguments for not x86 for (unsigned i = 0; i < msig.NumFixedArgs();i++) { @@ -1561,25 +1561,25 @@ Stub * CreateInstantiatingILStub(MethodDesc* pTargetMD, void* pHiddenArg) pCode->EmitLoadThis(); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // 2.2 Push the rest of the arguments for x86 for (unsigned i = 0; i < msig.NumFixedArgs();i++) { pCode->EmitLDARG(i); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // 2.3 Push the hidden context param // InstantiatingStub pCode->EmitLDC((TADDR)pHiddenArg); -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) // 2.4 Push the rest of the arguments for not x86 for (unsigned i = 0; i < msig.NumFixedArgs();i++) { pCode->EmitLDARG(i); } -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 // 2.5 Push the target address pCode->EmitLDC((TADDR)pTargetMD->GetMultiCallableAddrOfCode(CORINFO_ACCESS_ANY)); @@ -1750,7 +1750,7 @@ Stub * MakeInstantiatingStubWorker(MethodDesc *pMD) } #endif // defined(FEATURE_SHARE_GENERIC_CODE) -#if defined (HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_) +#if defined (HAS_COMPACT_ENTRYPOINTS) && defined (TARGET_ARM) extern "C" MethodDesc * STDCALL PreStubGetMethodDescForCompactEntryPoint (PCODE pCode) { @@ -1763,7 +1763,7 @@ extern "C" MethodDesc * STDCALL PreStubGetMethodDescForCompactEntryPoint (PCODE return MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, FALSE); } -#endif // defined (HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_) +#endif // defined (HAS_COMPACT_ENTRYPOINTS) && defined (TARGET_ARM) //============================================================================= // This function generates the real code for a method and installs it into @@ -2128,7 +2128,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT) // should have thrown an exception if it couldn't make a stub. _ASSERTE((pStub != NULL) ^ (pCode != NULL)); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // // We are seeing memory reordering race around fixups (see DDB 193514 and related bugs). We get into // situation where the patched precode is visible by other threads, but the resolved fixups @@ -2174,9 +2174,9 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT) // use the prestub. //========================================================================== -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) static PCODE g_UMThunkPreStub; -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL #ifndef DACCESS_COMPILE @@ -2203,9 +2203,9 @@ void InitPreStubManager(void) return; } -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) g_UMThunkPreStub = GenerateUMThunkPrestub()->GetEntryPoint(); -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL ThePreStubManager::Init(); } @@ -2214,18 +2214,18 @@ PCODE TheUMThunkPreStub() { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL) +#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL) return g_UMThunkPreStub; -#else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#else // TARGET_X86 && !FEATURE_STUBS_AS_IL return GetEEFuncEntryPoint(TheUMEntryPrestub); -#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL +#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL } PCODE TheVarargNDirectStub(BOOL hasRetBuffArg) { LIMITED_METHOD_CONTRACT; -#if !defined(_TARGET_X86_) && !defined(_TARGET_ARM64_) +#if !defined(TARGET_X86) && !defined(TARGET_ARM64) if (hasRetBuffArg) { return GetEEFuncEntryPoint(VarargPInvokeStub_RetBuffArg); @@ -2259,7 +2259,7 @@ static PCODE PatchNonVirtualExternalMethod(MethodDesc * pMD, PCODE pCode, PTR_CO { CORCOMPILE_EXTERNAL_METHOD_THUNK * pThunk = (CORCOMPILE_EXTERNAL_METHOD_THUNK *)pIndirection; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) INT64 oldValue = *(INT64*)pThunk; BYTE* pOldValue = (BYTE*)&oldValue; @@ -2276,11 +2276,11 @@ static PCODE PatchNonVirtualExternalMethod(MethodDesc * pMD, PCODE pCode, PTR_CO FlushInstructionCache(GetCurrentProcess(), pThunk, 8); } -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) // Patchup the thunk to point to the actual implementation of the cross module external method pThunk->m_pTarget = pCode; - #if defined(_TARGET_ARM_) + #if defined(TARGET_ARM) // ThumbBit must be set on the target address _ASSERTE(pCode & THUMB_CODE); #endif @@ -2343,13 +2343,13 @@ EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBl FrameWithCookie frame(pTransitionBlock); ExternalMethodFrame * pEMFrame = &frame; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // Decode indirection cell from callsite if it is not present if (pIndirection == NULL) { // Asssume that the callsite is call [xxxxxxxx] PCODE retAddr = pEMFrame->GetReturnAddress(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 pIndirection = *(((TADDR *)retAddr) - 1); #else pIndirection = *(((INT32 *)retAddr) - 1) + retAddr; @@ -2624,7 +2624,7 @@ EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBl } -#if !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) && defined(FEATURE_PREJIT) +#if !defined(TARGET_X86) && !defined(TARGET_AMD64) && defined(FEATURE_PREJIT) //========================================================================================== // In NGen image, virtual slots inherited from cross-module dependencies point to jump thunks. @@ -2674,13 +2674,13 @@ EXTERN_C PCODE VirtualMethodFixupWorker(Object * pThisPtr, CORCOMPILE_VIRTUAL_I // Patch the thunk to the actual method body pThunk->m_pTarget = pCode; } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // The target address should have the thumb bit set _ASSERTE(pCode & THUMB_CODE); #endif return pCode; } -#endif // !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_) && defined(FEATURE_PREJIT) +#endif // !defined(TARGET_X86) && !defined(TARGET_AMD64) && defined(FEATURE_PREJIT) #ifdef FEATURE_READYTORUN @@ -2849,7 +2849,7 @@ static PCODE getHelperForStaticBase(Module * pModule, CORCOMPILE_FIXUP_BLOB_KIND TADDR GetFirstArgumentRegisterValuePtr(TransitionBlock * pTransitionBlock) { TADDR pArgument = (TADDR)pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // x86 is special as always pArgument += offsetof(ArgumentRegisters, ECX); #endif @@ -3352,13 +3352,13 @@ extern "C" SIZE_T STDCALL DynamicHelperWorker(TransitionBlock * pTransitionBlock INSTALL_MANAGED_EXCEPTION_DISPATCHER; INSTALL_UNWIND_AND_CONTINUE_HANDLER; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) // Decode indirection cell from callsite if it is not present if (pCell == NULL) { // Asssume that the callsite is call [xxxxxxxx] PCODE retAddr = pFrame->GetReturnAddress(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 pCell = *(((TADDR **)retAddr) - 1); #else pCell = (TADDR *)(*(((INT32 *)retAddr) - 1) + retAddr); diff --git a/src/coreclr/src/vm/profilinghelper.cpp b/src/coreclr/src/vm/profilinghelper.cpp index ffba540e073a1..aac5fbb563b27 100644 --- a/src/coreclr/src/vm/profilinghelper.cpp +++ b/src/coreclr/src/vm/profilinghelper.cpp @@ -145,9 +145,9 @@ #include "utilcode.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include "securitywrapper.h" -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX //--------------------------------------------------------------------------------------- // Normally, this would go in profilepriv.inl, but it's not easily inlineable because of @@ -596,7 +596,7 @@ HRESULT ProfilingAPIUtility::ProfilerCLSIDFromString( } else { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX WCHAR *szFrom, *szTo; #ifdef _PREFAST_ @@ -618,10 +618,10 @@ HRESULT ProfilingAPIUtility::ProfilerCLSIDFromString( #pragma warning(pop) #endif /*_PREFAST_*/ -#else // !FEATURE_PAL - // ProgID not supported on FEATURE_PAL +#else // !TARGET_UNIX + // ProgID not supported on TARGET_UNIX hr = E_INVALIDARG; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } if (FAILED(hr)) @@ -702,9 +702,9 @@ HRESULT ProfilingAPIUtility::AttemptLoadProfilerForStartup() IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER, &wszClsid)); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_32, &wszProfilerDLL)); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH_64, &wszProfilerDLL)); #endif if(wszProfilerDLL == NULL) @@ -732,7 +732,7 @@ HRESULT ProfilingAPIUtility::AttemptLoadProfilerForStartup() return S_FALSE; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // If the environment variable doesn't exist, profiling is not enabled. if (wszProfilerDLL == NULL) { @@ -743,7 +743,7 @@ HRESULT ProfilingAPIUtility::AttemptLoadProfilerForStartup() return S_FALSE; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX CLSID clsid; hr = ProfilingAPIUtility::ProfilerCLSIDFromString(wszClsid, &clsid); diff --git a/src/coreclr/src/vm/proftoeeinterfaceimpl.cpp b/src/coreclr/src/vm/proftoeeinterfaceimpl.cpp index 571f1a3b951e4..139d3bae44174 100644 --- a/src/coreclr/src/vm/proftoeeinterfaceimpl.cpp +++ b/src/coreclr/src/vm/proftoeeinterfaceimpl.cpp @@ -7550,7 +7550,7 @@ StackWalkAction ProfilerStackWalkCallback(CrawlFrame *pCf, PROFILER_STACK_WALK_D UINT_PTR currentIP = 0; REGDISPLAY *pRegDisplay = pCf->GetRegisterSet(); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) CONTEXT builtContext; #endif @@ -7602,7 +7602,7 @@ StackWalkAction ProfilerStackWalkCallback(CrawlFrame *pCf, PROFILER_STACK_WALK_D if (pData->infoFlags & COR_PRF_SNAPSHOT_REGISTER_CONTEXT) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // // X86 stack walking does not keep the context up-to-date during the // walk. Instead it keeps the REGDISPLAY up-to-date. Thus, we need to @@ -7636,7 +7636,7 @@ StackWalkAction ProfilerStackWalkCallback(CrawlFrame *pCf, PROFILER_STACK_WALK_D return SWA_ABORT; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //--------------------------------------------------------------------------------------- // Normally, calling GetFunction() on the frame is sufficient to ensure @@ -7970,7 +7970,7 @@ HRESULT ProfToEEInterfaceImpl::ProfilerEbpWalker( } } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 //***************************************************************************** // The profiler stackwalk Wrapper @@ -8139,7 +8139,7 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread, pThreadToSnapshot = (Thread *)thread; } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if ((infoFlags & ~(COR_PRF_SNAPSHOT_REGISTER_CONTEXT | COR_PRF_SNAPSHOT_X86_OPTIMIZED)) != 0) #else if ((infoFlags & ~(COR_PRF_SNAPSHOT_REGISTER_CONTEXT)) != 0) @@ -8557,7 +8557,7 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshotHelper(Thread * pThreadToSnapshot, { if ((pParam->pData->infoFlags & COR_PRF_SNAPSHOT_X86_OPTIMIZED) != 0) { -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // If check in the beginning of DoStackSnapshot (to return E_INVALIDARG) should // make this unreachable _ASSERTE(!"COR_PRF_SNAPSHOT_X86_OPTIMIZED on non-X86 should be unreachable!"); @@ -8568,7 +8568,7 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshotHelper(Thread * pThreadToSnapshot, pParam->pctxSeed, pParam->pData->callback, pParam->pData->clientData); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } else { diff --git a/src/coreclr/src/vm/proftoeeinterfaceimpl.h b/src/coreclr/src/vm/proftoeeinterfaceimpl.h index 33756345d1915..a67bd63345888 100644 --- a/src/coreclr/src/vm/proftoeeinterfaceimpl.h +++ b/src/coreclr/src/vm/proftoeeinterfaceimpl.h @@ -702,9 +702,9 @@ class ProfToEEInterfaceImpl : public ICorProfilerInfo12 HRESULT SetupThreadForReJIT(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 HRESULT ProfilerEbpWalker(Thread * pThreadToSnapshot, LPCONTEXT pctxSeed, StackSnapshotCallback * callback, void * clientData); -#endif //_TARGET_X86_ +#endif //TARGET_X86 }; #endif // PROFILING_SUPPORTED diff --git a/src/coreclr/src/vm/qcall.h b/src/coreclr/src/vm/qcall.h index 975359291f174..380a041ca9215 100644 --- a/src/coreclr/src/vm/qcall.h +++ b/src/coreclr/src/vm/qcall.h @@ -118,11 +118,11 @@ // } -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX #define QCALLTYPE __cdecl -#else // PLATFORM_UNIX +#else // TARGET_UNIX #define QCALLTYPE __stdcall -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX #define BEGIN_QCALL \ INSTALL_MANAGED_EXCEPTION_DISPATCHER \ diff --git a/src/coreclr/src/vm/readytoruninfo.cpp b/src/coreclr/src/vm/readytoruninfo.cpp index 60534e575692c..7222cdccc09be 100644 --- a/src/coreclr/src/vm/readytoruninfo.cpp +++ b/src/coreclr/src/vm/readytoruninfo.cpp @@ -53,7 +53,7 @@ MethodDesc * ReadyToRunInfo::GetMethodDescForEntryPoint(PCODE entryPoint) } CONTRACTL_END; -#if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && defined(FEATURE_PAL)) +#if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(TARGET_UNIX)) // A normal method entry point is always 8 byte aligned, but a funclet can start at an odd address. // Since PtrHashMap can't handle odd pointers, check for this case and return NULL. if ((entryPoint & 0x1) != 0) diff --git a/src/coreclr/src/vm/reflectioninvocation.cpp b/src/coreclr/src/vm/reflectioninvocation.cpp index 8b4bcb1323171..2724de4771b1f 100644 --- a/src/coreclr/src/vm/reflectioninvocation.cpp +++ b/src/coreclr/src/vm/reflectioninvocation.cpp @@ -844,7 +844,7 @@ void DECLSPEC_NORETURN ThrowInvokeMethodException(MethodDesc * pMethod, OBJECTRE GCPROTECT_BEGIN(targetException); -#if defined(_DEBUG) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && !defined(TARGET_UNIX) if (IsWatsonEnabled()) { if (!CLRException::IsPreallocatedExceptionObject(targetException)) @@ -880,7 +880,7 @@ void DECLSPEC_NORETURN ThrowInvokeMethodException(MethodDesc * pMethod, OBJECTRE } } } -#endif // _DEBUG && !FEATURE_PAL +#endif // _DEBUG && !TARGET_UNIX #ifdef FEATURE_CORRUPTING_EXCEPTIONS // Get the corruption severity of the exception that came in through reflection invocation. @@ -893,7 +893,7 @@ void DECLSPEC_NORETURN ThrowInvokeMethodException(MethodDesc * pMethod, OBJECTRE OBJECTREF except = InvokeUtil::CreateTargetExcept(&targetException); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (IsWatsonEnabled()) { struct @@ -936,7 +936,7 @@ void DECLSPEC_NORETURN ThrowInvokeMethodException(MethodDesc * pMethod, OBJECTRE except = gcTIE.oExcept; GCPROTECT_END(); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Since the original exception is inner of target invocation exception, // when TIE is seen to be raised for the first time, we will end up diff --git a/src/coreclr/src/vm/rtlfunctions.cpp b/src/coreclr/src/vm/rtlfunctions.cpp index 9e657d2c28a79..2f86b1e0966be 100644 --- a/src/coreclr/src/vm/rtlfunctions.cpp +++ b/src/coreclr/src/vm/rtlfunctions.cpp @@ -17,7 +17,7 @@ #include "rtlfunctions.h" -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 RtlVirtualUnwindFn* RtlVirtualUnwind_Unsafe = NULL; @@ -46,7 +46,7 @@ HRESULT EnsureRtlFunctions() return S_OK; } -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 HRESULT EnsureRtlFunctions() { @@ -54,7 +54,7 @@ HRESULT EnsureRtlFunctions() return S_OK; } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #if defined(FEATURE_EH_FUNCLETS) diff --git a/src/coreclr/src/vm/rtlfunctions.h b/src/coreclr/src/vm/rtlfunctions.h index 0afb7d66cccbc..9db918718c0f2 100644 --- a/src/coreclr/src/vm/rtlfunctions.h +++ b/src/coreclr/src/vm/rtlfunctions.h @@ -50,7 +50,7 @@ PVOID DecodeDynamicFunctionTableContext (PVOID pvContext) #endif // FEATURE_EH_FUNCLETS -#if defined(FEATURE_EH_FUNCLETS) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) && !defined(FEATURE_PAL) +#if defined(FEATURE_EH_FUNCLETS) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) && !defined(TARGET_UNIX) // Wrapper for RtlInstallFunctionTableCallback. VOID InstallEEFunctionTable( @@ -68,12 +68,12 @@ VOID DeleteEEFunctionTable( RtlDeleteFunctionTable((PT_RUNTIME_FUNCTION)((ULONG64)pvTableID | 3)); } -#else // FEATURE_EH_FUNCLETS && !DACCESS_COMPILE && !CROSSGEN_COMPILE && !FEATURE_PAL +#else // FEATURE_EH_FUNCLETS && !DACCESS_COMPILE && !CROSSGEN_COMPILE && !TARGET_UNIX #define InstallEEFunctionTable(pvTableID, pvStartRange, cbRange, pfnGetRuntimeFunctionCallback, pvContext, TableType) do { } while (0) #define DeleteEEFunctionTable(pvTableID) do { } while (0) -#endif // FEATURE_EH_FUNCLETS && !DACCESS_COMPILE && !CROSSGEN_COMPILE && !FEATURE_PAL +#endif // FEATURE_EH_FUNCLETS && !DACCESS_COMPILE && !CROSSGEN_COMPILE && !TARGET_UNIX #endif // !__RTLFUNCTIONS_H__ diff --git a/src/coreclr/src/vm/runtimecallablewrapper.h b/src/coreclr/src/vm/runtimecallablewrapper.h index 52dad25f66d07..ed1e95b2aeaba 100644 --- a/src/coreclr/src/vm/runtimecallablewrapper.h +++ b/src/coreclr/src/vm/runtimecallablewrapper.h @@ -78,17 +78,17 @@ class Thread; #define GC_PRESSURE_MACHINE_LOCAL 4004 #define GC_PRESSURE_REMOTE 4824 -#ifdef BIT64 +#ifdef HOST_64BIT #define GC_PRESSURE_WINRT_BASE 1000 #define GC_PRESSURE_WINRT_LOW 12000 #define GC_PRESSURE_WINRT_MEDIUM 120000 #define GC_PRESSURE_WINRT_HIGH 1200000 -#else // BIT64 +#else // HOST_64BIT #define GC_PRESSURE_WINRT_BASE 750 #define GC_PRESSURE_WINRT_LOW 8000 #define GC_PRESSURE_WINRT_MEDIUM 80000 #define GC_PRESSURE_WINRT_HIGH 800000 -#endif // BIT64 +#endif // HOST_64BIT extern bool g_fShutDownCOM; diff --git a/src/coreclr/src/vm/sampleprofiler.cpp b/src/coreclr/src/vm/sampleprofiler.cpp index 563139897addf..851b56f48c1f2 100644 --- a/src/coreclr/src/vm/sampleprofiler.cpp +++ b/src/coreclr/src/vm/sampleprofiler.cpp @@ -11,9 +11,9 @@ #ifdef FEATURE_PERFTRACING -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #include -#endif //FEATURE_PAL +#endif //TARGET_UNIX const unsigned long NUM_NANOSECONDS_IN_1_MS = 1000000; @@ -29,13 +29,13 @@ unsigned long SampleProfiler::s_samplingRateInNs = NUM_NANOSECONDS_IN_1_MS; // 1 bool SampleProfiler::s_timePeriodIsSet = FALSE; int32_t SampleProfiler::s_RefCount = 0; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX PVOID SampleProfiler::s_timeBeginPeriodFn = NULL; PVOID SampleProfiler::s_timeEndPeriodFn = NULL; HINSTANCE SampleProfiler::s_hMultimediaLib = NULL; typedef MMRESULT(WINAPI *TimePeriodFnPtr)(UINT uPeriod); -#endif //FEATURE_PAL +#endif //TARGET_UNIX void SampleProfiler::Initialize(EventPipeProviderCallbackDataQueue* pEventPipeProviderCallbackDataQueue) { @@ -77,7 +77,7 @@ void SampleProfiler::Enable(EventPipeProviderCallbackDataQueue *pEventPipeProvid const bool fSuccess = LoadDependencies(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX _ASSERTE(fSuccess); // TODO: Stress log on failure? #else @@ -269,11 +269,11 @@ void SampleProfiler::PlatformSleep(unsigned long nanoseconds) } CONTRACTL_END; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_nanosleep(nanoseconds); -#else //FEATURE_PAL +#else //TARGET_UNIX ClrSleepEx(s_samplingRateInNs / NUM_NANOSECONDS_IN_1_MS, FALSE); -#endif //FEATURE_PAL +#endif //TARGET_UNIX } void SampleProfiler::SetTimeGranularity() @@ -286,7 +286,7 @@ void SampleProfiler::SetTimeGranularity() } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Attempt to set the systems minimum timer period to the sampling rate // If the sampling rate is lower than the current system setting (16ms by default), // this will cause the OS to wake more often for scheduling descsion, allowing us to take samples @@ -299,7 +299,7 @@ void SampleProfiler::SetTimeGranularity() s_timePeriodIsSet = TRUE; } } -#endif //FEATURE_PAL +#endif //TARGET_UNIX } void SampleProfiler::ResetTimeGranularity() @@ -312,7 +312,7 @@ void SampleProfiler::ResetTimeGranularity() } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // End the modifications we had to the timer period in Enable if (s_timeEndPeriodFn != NULL) { @@ -321,7 +321,7 @@ void SampleProfiler::ResetTimeGranularity() s_timePeriodIsSet = FALSE; } } -#endif //FEATURE_PAL +#endif //TARGET_UNIX } bool SampleProfiler::LoadDependencies() @@ -334,7 +334,7 @@ bool SampleProfiler::LoadDependencies() } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (s_RefCount > 0) return true; // Already loaded. @@ -349,7 +349,7 @@ bool SampleProfiler::LoadDependencies() return s_hMultimediaLib != NULL && s_timeBeginPeriodFn != NULL && s_timeEndPeriodFn != NULL; #else return FALSE; -#endif //FEATURE_PAL +#endif //TARGET_UNIX } void SampleProfiler::UnloadDependencies() @@ -362,7 +362,7 @@ void SampleProfiler::UnloadDependencies() } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (s_hMultimediaLib != NULL) { FreeLibrary(s_hMultimediaLib); @@ -370,7 +370,7 @@ void SampleProfiler::UnloadDependencies() s_timeBeginPeriodFn = NULL; s_timeEndPeriodFn = NULL; } -#endif //FEATURE_PAL +#endif //TARGET_UNIX } #endif // FEATURE_PERFTRACING diff --git a/src/coreclr/src/vm/sampleprofiler.h b/src/coreclr/src/vm/sampleprofiler.h index edf3717017289..d775fe08c68c7 100644 --- a/src/coreclr/src/vm/sampleprofiler.h +++ b/src/coreclr/src/vm/sampleprofiler.h @@ -70,11 +70,11 @@ class SampleProfiler static bool LoadDependencies(); static void UnloadDependencies(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static HINSTANCE s_hMultimediaLib; static PVOID s_timeBeginPeriodFn; static PVOID s_timeEndPeriodFn; -#endif //FEATURE_PAL +#endif //TARGET_UNIX static void SetTimeGranularity(); static void ResetTimeGranularity(); diff --git a/src/coreclr/src/vm/siginfo.hpp b/src/coreclr/src/vm/siginfo.hpp index 715b95327c521..e294239670520 100644 --- a/src/coreclr/src/vm/siginfo.hpp +++ b/src/coreclr/src/vm/siginfo.hpp @@ -27,7 +27,7 @@ // These macros tell us whether the arguments we see as we proceed with the signature walk are mapped // to increasing or decreasing stack addresses. This is valid only for arguments that go on the stack. //--------------------------------------------------------------------------------------- -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) #define STACK_GROWS_DOWN_ON_ARGS_WALK #else #define STACK_GROWS_UP_ON_ARGS_WALK diff --git a/src/coreclr/src/vm/simplerwlock.hpp b/src/coreclr/src/vm/simplerwlock.hpp index 60c99aa57f53a..302822d80245d 100644 --- a/src/coreclr/src/vm/simplerwlock.hpp +++ b/src/coreclr/src/vm/simplerwlock.hpp @@ -102,7 +102,7 @@ class SimpleRWLock // Check for dead lock situation. Volatile m_countNoTriggerGC; -#ifdef BIT64 +#ifdef HOST_64BIT // ensures that we are a multiple of 8-bytes UINT32 pad; #endif diff --git a/src/coreclr/src/vm/stackingallocator.cpp b/src/coreclr/src/vm/stackingallocator.cpp index fdb826e5a145f..7b332e0460b7e 100644 --- a/src/coreclr/src/vm/stackingallocator.cpp +++ b/src/coreclr/src/vm/stackingallocator.cpp @@ -384,7 +384,7 @@ void * __cdecl operator new(size_t n, StackingAllocator * alloc) STATIC_CONTRACT_THROWS; STATIC_CONTRACT_FAULT; -#ifdef BIT64 +#ifdef HOST_64BIT // size_t's too big on 64-bit platforms so we check for overflow if(n > (size_t)(1<<31)) ThrowOutOfMemory(); #endif @@ -399,7 +399,7 @@ void * __cdecl operator new[](size_t n, StackingAllocator * alloc) STATIC_CONTRACT_THROWS; STATIC_CONTRACT_FAULT; -#ifdef BIT64 +#ifdef HOST_64BIT // size_t's too big on 64-bit platforms so we check for overflow if(n > (size_t)(1<<31)) ThrowOutOfMemory(); #else @@ -418,7 +418,7 @@ void * __cdecl operator new(size_t n, StackingAllocator * alloc, const NoThrow&) STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_FAULT; -#ifdef BIT64 +#ifdef HOST_64BIT // size_t's too big on 64-bit platforms so we check for overflow if(n > (size_t)(1<<31)) return NULL; #endif @@ -431,7 +431,7 @@ void * __cdecl operator new[](size_t n, StackingAllocator * alloc, const NoThrow STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_FAULT; -#ifdef BIT64 +#ifdef HOST_64BIT // size_t's too big on 64-bit platforms so we check for overflow if(n > (size_t)(1<<31)) return NULL; #else diff --git a/src/coreclr/src/vm/stackwalk.cpp b/src/coreclr/src/vm/stackwalk.cpp index a81667157b77e..1cc74c32222fc 100644 --- a/src/coreclr/src/vm/stackwalk.cpp +++ b/src/coreclr/src/vm/stackwalk.cpp @@ -59,13 +59,13 @@ OBJECTREF* CrawlFrame::GetAddrOfSecurityObject() { _ASSERTE(pFunc); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (isCachedMethod) { return pSecurityObject; } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { return (static_cast (GetCodeManager()->GetAddrOfSecurityObject(this))); } @@ -165,7 +165,7 @@ OBJECTREF CrawlFrame::GetThisPointer() // As discussed in the specification comment at the declaration, the precondition, unfortunately, // differs by architecture. @TODO: fix this. -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) _ASSERTE_MSG((pFunc->IsSharedByGenericInstantiations() && pFunc->AcquiresInstMethodTableFromThis()) || pFunc->IsSynchronized(), "Precondition"); @@ -189,13 +189,13 @@ OBJECTREF CrawlFrame::GetThisPointer() //@TODO: What about other calling conventions? // _ASSERT(pFunc()->GetCallSig()->CALLING CONVENTION); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // @TODO: PORT: we need to find the this pointer without triggering a GC // or find a way to make this method GC_TRIGGERS return NULL; #else return (dac_cast(pFrame))->GetThis(); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } } @@ -208,7 +208,7 @@ OBJECTREF CrawlFrame::GetThisPointer() TADDR CrawlFrame::GetAmbientSPFromCrawlFrame() { SUPPORTS_DAC; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // we set nesting level to zero because it won't be used for esp-framed methods, // and zero is at least valid for ebp based methods (where we won't use the ambient esp anyways) DWORD nestingLevel = 0; @@ -220,7 +220,7 @@ TADDR CrawlFrame::GetAmbientSPFromCrawlFrame() GetCodeManState() ); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return GetRegisterSet()->pCurrentContext->Sp; #else return NULL; @@ -261,7 +261,7 @@ PTR_VOID CrawlFrame::GetParamTypeArg() return NULL; } -#ifdef BIT64 +#ifdef HOST_64BIT if (!pFunc->IsSharedByGenericInstantiations() || !(pFunc->RequiresInstMethodTableArg() || pFunc->RequiresInstMethodDescArg())) { @@ -269,7 +269,7 @@ PTR_VOID CrawlFrame::GetParamTypeArg() // and actually has a param type arg return NULL; } -#endif // BIT64 +#endif // HOST_64BIT _ASSERTE(pFrame); _ASSERTE(pFunc); @@ -351,7 +351,7 @@ bool CrawlFrame::IsGcSafe() return GetCodeManager()->IsGcSafe(&codeInfo, GetRelOffset()); } -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) bool CrawlFrame::HasTailCalls() { CONTRACTL { @@ -362,7 +362,7 @@ bool CrawlFrame::HasTailCalls() return GetCodeManager()->HasTailCalls(&codeInfo); } -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 inline void CrawlFrame::GotoNextFrame() { @@ -606,17 +606,17 @@ PCODE Thread::VirtualUnwindCallFrame(T_CONTEXT* pContext, if (pCodeInfo == NULL) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pFunctionEntry = RtlLookupFunctionEntry(uControlPc, ARM_ONLY((DWORD*))(&uImageBase), NULL); -#else // !FEATURE_PAL +#else // !TARGET_UNIX EECodeInfo codeInfo; codeInfo.Init(uControlPc); pFunctionEntry = codeInfo.GetFunctionEntry(); uImageBase = (UINT_PTR)codeInfo.GetModuleBase(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } else { @@ -627,7 +627,7 @@ PCODE Thread::VirtualUnwindCallFrame(T_CONTEXT* pContext, // expects this indirection to be resolved, so we use RUNTIME_FUNCTION of the hot code even // if we are in cold code. -#if defined(_DEBUG) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && !defined(TARGET_UNIX) UINT_PTR uImageBaseFromOS; PT_RUNTIME_FUNCTION pFunctionEntryFromOS; @@ -638,7 +638,7 @@ PCODE Thread::VirtualUnwindCallFrame(T_CONTEXT* pContext, // Note that he address returned from the OS is different from the one we have computed // when unwind info is registered using RtlAddGrowableFunctionTable. Compare RUNTIME_FUNCTION content. _ASSERTE( (uImageBase == uImageBaseFromOS) && (memcmp(pFunctionEntry, pFunctionEntryFromOS, sizeof(RUNTIME_FUNCTION)) == 0) ); -#endif // _DEBUG && !FEATURE_PAL +#endif // _DEBUG && !TARGET_UNIX } if (pFunctionEntry) @@ -672,7 +672,7 @@ PCODE Thread::VirtualUnwindLeafCallFrame(T_CONTEXT* pContext) { PCODE uControlPc; -#if defined(_DEBUG) && !defined(FEATURE_PAL) +#if defined(_DEBUG) && !defined(TARGET_UNIX) UINT_PTR uImageBase; PT_RUNTIME_FUNCTION pFunctionEntry = RtlLookupFunctionEntry((UINT_PTR)GetIP(pContext), @@ -680,14 +680,14 @@ PCODE Thread::VirtualUnwindLeafCallFrame(T_CONTEXT* pContext) NULL); CONSISTENCY_CHECK(NULL == pFunctionEntry); -#endif // _DEBUG && !FEATURE_PAL +#endif // _DEBUG && !TARGET_UNIX -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) uControlPc = *(ULONGLONG*)pContext->Rsp; pContext->Rsp += sizeof(ULONGLONG); -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) uControlPc = TADDR(pContext->Lr); @@ -717,16 +717,16 @@ PCODE Thread::VirtualUnwindNonLeafCallFrame(T_CONTEXT* pContext, KNONVOLATILE_CO CONTRACTL_END; PCODE uControlPc = GetIP(pContext); -#ifdef BIT64 +#ifdef HOST_64BIT UINT64 EstablisherFrame; -#else // BIT64 +#else // HOST_64BIT DWORD EstablisherFrame; -#endif // BIT64 +#endif // HOST_64BIT PVOID HandlerData; if (NULL == pFunctionEntry) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pFunctionEntry = RtlLookupFunctionEntry(uControlPc, ARM_ONLY((DWORD*))(&uImageBase), NULL); @@ -775,9 +775,9 @@ UINT_PTR Thread::VirtualUnwindToFirstManagedCallFrame(T_CONTEXT* pContext) } #endif // FEATURE_WRITEBARRIER_COPY -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX uControlPc = VirtualUnwindCallFrame(pContext); -#else // !FEATURE_PAL +#else // !TARGET_UNIX #ifdef VSD_STUB_CAN_THROW_AV if (IsIPinVirtualStub(uControlPc)) @@ -801,7 +801,7 @@ UINT_PTR Thread::VirtualUnwindToFirstManagedCallFrame(T_CONTEXT* pContext) { break; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } return uControlPc; @@ -903,7 +903,7 @@ StackWalkAction Thread::MakeStackwalkerCallback( } -#if !defined(DACCESS_COMPILE) && defined(_TARGET_X86_) && !defined(FEATURE_EH_FUNCLETS) +#if !defined(DACCESS_COMPILE) && defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS) #define STACKWALKER_MAY_POP_FRAMES #endif @@ -1369,14 +1369,14 @@ BOOL StackFrameIterator::ResetRegDisp(PREGDISPLAY pRegDisp, } #endif // PROCESS_EXPLICIT_FRAME_BEFORE_MANAGED_FRAME -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // special processing on x86; see below for more information TADDR curEBP = GetRegdisplayFP(m_crawl.pRD); CONTEXT tmpCtx; REGDISPLAY tmpRD; CopyRegDisplay(m_crawl.pRD, &tmpRD, &tmpCtx); -#endif // _TARGET_X86_ +#endif // TARGET_X86 // // The basic idea is to loop the frame chain until we find an explicit frame whose address is below @@ -1407,7 +1407,7 @@ BOOL StackFrameIterator::ResetRegDisp(PREGDISPLAY pRegDisp, // this check is sufficient on WIN64 if (dac_cast(m_crawl.pFrame) >= curSP) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // check the IP if (m_crawl.pFrame->GetReturnAddress() != curPc) { @@ -1422,9 +1422,9 @@ BOOL StackFrameIterator::ResetRegDisp(PREGDISPLAY pRegDisp, break; } } -#else // !_TARGET_X86_ +#else // !TARGET_X86 break; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } // if the REGDISPLAY represents the managed stack frame at a M2U transition boundary, @@ -1748,7 +1748,7 @@ StackWalkAction StackFrameIterator::Filter(void) // Check if we are in the mode of enumerating GC references (or not) if (m_flags & GC_FUNCLET_REFERENCE_REPORTING) { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // For interleaved exception handling on non-windows systems, we need to find out if the current frame // was a caller of an already executed exception handler based on the previous exception trackers. // The handler funclet frames are already gone from the stack, so the exception trackers are the @@ -1810,7 +1810,7 @@ StackWalkAction StackFrameIterator::Filter(void) fSkippingFunclet = true; } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX fRecheckCurrentFrame = false; // Do we already have a reference to a funclet parent? @@ -2330,7 +2330,7 @@ StackWalkAction StackFrameIterator::NextRaw(void) if (m_frameState == SFITER_SKIPPED_FRAME_FUNCTION) { -#if !defined(_TARGET_X86_) && defined(_DEBUG) +#if !defined(TARGET_X86) && defined(_DEBUG) // make sure we're not skipping a different transition if (m_crawl.pFrame->NeedsUpdateRegDisplay()) { @@ -2348,7 +2348,7 @@ StackWalkAction StackFrameIterator::NextRaw(void) CONSISTENCY_CHECK(GetControlPC(m_crawl.pRD) == m_crawl.pFrame->GetReturnAddress()); } } -#endif // !defined(_TARGET_X86_) && defined(_DEBUG) +#endif // !defined(TARGET_X86) && defined(_DEBUG) #if defined(STACKWALKER_MAY_POP_FRAMES) if (m_flags & POPFRAMES) @@ -2420,7 +2420,7 @@ StackWalkAction StackFrameIterator::NextRaw(void) { // Now find out if we need to leave monitors -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // // For non-x86 platforms, the JIT generates try/finally to leave monitors; for x86, the VM handles the monitor // @@ -2465,7 +2465,7 @@ StackWalkAction StackFrameIterator::NextRaw(void) END_GCX_ASSERT_COOP; } #endif // STACKWALKER_MAY_POP_FRAMES -#endif // _TARGET_X86_ +#endif // TARGET_X86 #if !defined(ELIMINATE_FEF) // FaultingExceptionFrame is special case where it gets @@ -2964,7 +2964,7 @@ void StackFrameIterator::ProcessCurrentFrame(void) _ASSERTE (m_crawl.isCachedMethod != m_crawl.stackWalkCache.IsEmpty()); m_crawl.pSecurityObject = NULL; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (m_crawl.isCachedMethod && m_crawl.stackWalkCache.m_CacheEntry.HasSecurityObject()) { // pCallback will use this to save time on GetAddrOfSecurityObject @@ -2973,7 +2973,7 @@ void StackFrameIterator::ProcessCurrentFrame(void) m_crawl.pRD, &stackwalkCacheUnwindInfo); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } #endif // DACCESS_COMPILE @@ -3081,10 +3081,10 @@ BOOL StackFrameIterator::CheckForSkippedFrames(void) m_crawl.pFunc->AsDynamicMethodDesc()->HasMDContextArg(); if (fHandleSkippedFrames -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 || // On x86 we have already reported the InlinedCallFrame, don't report it again. (InlinedCallFrame::FrameHasActiveCall(m_crawl.pFrame) && !fReportInteropMD) -#endif // _TARGET_X86_ +#endif // TARGET_X86 ) { m_crawl.GotoNextFrame(); @@ -3293,10 +3293,10 @@ void StackFrameIterator::PostProcessingForNoFrameTransition() } // StackFrameIterator::PostProcessingForNoFrameTransition() -#if defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE) +#if defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) static CrstStatic g_StackwalkCacheLock; // Global StackwalkCache lock; only used on AMD64 EXTERN_C void moveOWord(LPVOID src, LPVOID target); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 /* copies 64-bit *src to *target, atomically accessing the data @@ -3306,7 +3306,7 @@ inline static void atomicMoveCacheEntry(UINT64* src, UINT64* target) { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // the most negative value is used a sort of integer infinity // value, so it have to be avoided _ASSERTE(*src != 0x8000000000000000); @@ -3317,7 +3317,7 @@ inline static void atomicMoveCacheEntry(UINT64* src, UINT64* target) mov eax, target fistp qword ptr [eax] } -#elif defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE) +#elif defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) // On AMD64 there's no way to move 16 bytes atomically, so we need to take a lock before calling moveOWord(). CrstHolder ch(&g_StackwalkCacheLock); moveOWord(src, target); @@ -3381,9 +3381,9 @@ StackwalkCache::StackwalkCache() // static void StackwalkCache::Init() { -#if defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE) +#if defined(TARGET_AMD64) && !defined(DACCESS_COMPILE) g_StackwalkCacheLock.Init(CrstSecurityStackwalkCache, CRST_UNSAFE_ANYMODE); -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 } /* @@ -3407,7 +3407,7 @@ BOOL StackwalkCache::Lookup(UINT_PTR IP) GC_NOTRIGGER; } CONTRACTL_END; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) _ASSERTE(Enabled()); _ASSERTE(IP); @@ -3425,9 +3425,9 @@ BOOL StackwalkCache::Lookup(UINT_PTR IP) #endif return (IP == m_CacheEntry.IP); -#else // _TARGET_X86_ +#else // TARGET_X86 return FALSE; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } /* diff --git a/src/coreclr/src/vm/stackwalk.h b/src/coreclr/src/vm/stackwalk.h index e9a28f509d8d8..8f74db8a01664 100644 --- a/src/coreclr/src/vm/stackwalk.h +++ b/src/coreclr/src/vm/stackwalk.h @@ -36,14 +36,14 @@ class AppDomain; // on the stack. The FEF is used for unwinding. If not defined, the unwinding // uses the exception context. #define USE_FEF // to mark where code needs to be changed to eliminate the FEF -#if defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if defined(TARGET_X86) && !defined(TARGET_UNIX) #undef USE_FEF // Turn off the FEF use on x86. #define ELIMINATE_FEF #else #if defined(ELIMINATE_FEF) #undef ELIMINATE_FEF #endif -#endif // _TARGET_X86_ && !FEATURE_PAL +#endif // TARGET_X86 && !TARGET_UNIX #if defined(FEATURE_EH_FUNCLETS) #define RECORD_RESUMABLE_FRAME_SP @@ -69,9 +69,9 @@ class CrawlFrame { public: -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 friend StackWalkAction TAStackCrawlCallBack(CrawlFrame* pCf, void* data); -#endif // _TARGET_X86_ +#endif // TARGET_X86 //************************************************************************ // Functions available for the callbacks (using the current pCrawlFrame) @@ -323,9 +323,9 @@ class CrawlFrame */ bool IsGcSafe(); -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) bool HasTailCalls(); -#endif // _TARGET_ARM_ || _TARGET_ARM64_ +#endif // TARGET_ARM || TARGET_ARM64 PREGDISPLAY GetRegisterSet() { diff --git a/src/coreclr/src/vm/stackwalktypes.h b/src/coreclr/src/vm/stackwalktypes.h index 8b3048fed6819..f013d3de81630 100644 --- a/src/coreclr/src/vm/stackwalktypes.h +++ b/src/coreclr/src/vm/stackwalktypes.h @@ -83,14 +83,14 @@ typedef StackWalkAction (*PSTACKWALKFRAMESCALLBACK)( struct StackwalkCacheUnwindInfo { -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) ULONG RBPOffset; ULONG RSPOffsetFromUnwindInfo; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 size_t securityObjectOffset; // offset of SecurityObject. 0 if there is no security object BOOL fUseEbp; // Is EBP modified by the method - either for a frame-pointer or for a scratch-register? BOOL fUseEbpAsFrameReg; // use EBP as the frame pointer? -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 inline StackwalkCacheUnwindInfo() { SUPPORTS_DAC; ZeroMemory(this, sizeof(StackwalkCacheUnwindInfo)); } StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry); @@ -98,11 +98,11 @@ struct StackwalkCacheUnwindInfo //************************************************************************ -#if defined(BIT64) +#if defined(HOST_64BIT) #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x10 -#else // !BIT64 +#else // !HOST_64BIT #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x8 -#endif // !BIT64 +#endif // !HOST_64BIT struct DECLSPEC_ALIGN(STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY) @@ -113,16 +113,16 @@ StackwalkCacheEntry // as StackwalkCacheEntry, it's required for atomicMOVQ using FILD/FISTP instructions // UINT_PTR IP; -#if !defined(_TARGET_AMD64_) +#if !defined(TARGET_AMD64) WORD ESPOffset:15; // stack offset (frame size + pending arguments + etc) WORD securityObjectOffset:3;// offset of SecurityObject. 0 if there is no security object WORD fUseEbp:1; // For ESP methods, is EBP touched at all? WORD fUseEbpAsFrameReg:1; // use EBP as the frame register? WORD argSize:11; // size of args pushed on stack -#else // _TARGET_AMD64_ +#else // TARGET_AMD64 DWORD RSPOffset; DWORD RBPOffset; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 inline BOOL Init(UINT_PTR IP, UINT_PTR SPOffset, @@ -133,7 +133,7 @@ StackwalkCacheEntry this->IP = IP; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) this->ESPOffset = SPOffset; this->argSize = argSize; @@ -147,7 +147,7 @@ StackwalkCacheEntry // return success if we fit SPOffset and argSize into return ((this->ESPOffset == SPOffset) && (this->argSize == argSize)); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // The size of a stack frame is guaranteed to fit in 4 bytes, so we don't need to check RSPOffset and RBPOffset. // The actual SP offset may be bigger than the offset we get from the unwind info because of stack allocations. @@ -158,42 +158,42 @@ StackwalkCacheEntry _ASSERTE(FitsIn(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo))); this->RBPOffset = static_cast(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo)); return TRUE; -#else // !_TARGET_X86_ && !_TARGET_AMD64_ +#else // !TARGET_X86 && !TARGET_AMD64 return FALSE; -#endif // !_TARGET_X86_ && !_TARGET_AMD64_ +#endif // !TARGET_X86 && !TARGET_AMD64 } inline BOOL HasSecurityObject() { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return securityObjectOffset != 0; -#else // !_TARGET_X86_ +#else // !TARGET_X86 // On AMD64 we don't save anything by grabbing the security object before it is needed. This is because // we need to crack the GC info in order to find the security object, and to unwind we only need to // crack the unwind info. return FALSE; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 } inline BOOL IsSafeToUseCache() { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return (!fUseEbp || fUseEbpAsFrameReg); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) return TRUE; -#else // !_TARGET_X86_ && !_TARGET_AMD64_ +#else // !TARGET_X86 && !TARGET_AMD64 return FALSE; -#endif // !_TARGET_X86_ && !_TARGET_AMD64_ +#endif // !TARGET_X86 && !TARGET_AMD64 } }; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) static_assert_no_msg(sizeof(StackwalkCacheEntry) == 2 * sizeof(UINT_PTR)); -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 //************************************************************************ @@ -234,13 +234,13 @@ inline StackwalkCacheUnwindInfo::StackwalkCacheUnwindInfo(StackwalkCacheEntry * { LIMITED_METHOD_CONTRACT; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) RBPOffset = pCacheEntry->RBPOffset; -#else // !_TARGET_AMD64_ +#else // !TARGET_AMD64 securityObjectOffset = pCacheEntry->securityObjectOffset; fUseEbp = pCacheEntry->fUseEbp; fUseEbpAsFrameReg = pCacheEntry->fUseEbpAsFrameReg; -#endif // !_TARGET_AMD64_ +#endif // !TARGET_AMD64 } #endif // __STACKWALKTYPES_H__ diff --git a/src/coreclr/src/vm/stubcache.cpp b/src/coreclr/src/vm/stubcache.cpp index 9add4b230a06e..2766537f05a8c 100644 --- a/src/coreclr/src/vm/stubcache.cpp +++ b/src/coreclr/src/vm/stubcache.cpp @@ -35,7 +35,7 @@ StubCacheBase::StubCacheBase(LoaderHeap *pHeap) : { WRAPPER_NO_CONTRACT; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (m_heap == NULL) m_heap = SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap(); #endif diff --git a/src/coreclr/src/vm/stubgen.cpp b/src/coreclr/src/vm/stubgen.cpp index 5c37846870dc8..111a8a1770bd3 100644 --- a/src/coreclr/src/vm/stubgen.cpp +++ b/src/coreclr/src/vm/stubgen.cpp @@ -909,11 +909,11 @@ BYTE* ILStubLinker::GenerateCodeWorker(BYTE* pbBuffer, ILInstruction* pInstrBuff case 8: { UINT64 uVal = pInstrBuffer[i].uArg; -#ifndef BIT64 // We don't have room on 32-bit platforms to store the CLR_NAN_64 value, so +#ifndef HOST_64BIT // We don't have room on 32-bit platforms to store the CLR_NAN_64 value, so // we use a special value to represent CLR_NAN_64 and then recreate it here. if ((instr == ILCodeStream::CEE_LDC_R8) && (((UINT32)uVal) == ILCodeStream::SPECIAL_VALUE_NAN_64_ON_32)) uVal = CLR_NAN_64; -#endif // BIT64 +#endif // HOST_64BIT SET_UNALIGNED_VAL64(pbBuffer, uVal); } break; @@ -1289,7 +1289,7 @@ void ILCodeStream::EmitLDC(DWORD_PTR uConst) { WRAPPER_NO_CONTRACT; Emit( -#ifdef BIT64 +#ifdef HOST_64BIT CEE_LDC_I8 #else CEE_LDC_I4 @@ -1304,14 +1304,14 @@ void ILCodeStream::EmitLDC_R4(UINT32 uConst) void ILCodeStream::EmitLDC_R8(UINT64 uConst) { STANDARD_VM_CONTRACT; -#ifndef BIT64 // We don't have room on 32-bit platforms to stor the CLR_NAN_64 value, so +#ifndef HOST_64BIT // We don't have room on 32-bit platforms to stor the CLR_NAN_64 value, so // we use a special value to represent CLR_NAN_64 and then recreate it later. CONSISTENCY_CHECK(((UINT32)uConst) != SPECIAL_VALUE_NAN_64_ON_32); if (uConst == CLR_NAN_64) uConst = SPECIAL_VALUE_NAN_64_ON_32; else CONSISTENCY_CHECK(FitsInU4(uConst)); -#endif // BIT64 +#endif // HOST_64BIT Emit(CEE_LDC_R8, 1, (UINT_PTR)uConst); } void ILCodeStream::EmitLDELEMA(int token) @@ -2432,7 +2432,7 @@ void ILStubLinker::TransformArgForJIT(LocalDesc *pLoc) case ELEMENT_TYPE_PTR: { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (pLoc->bIsCopyConstructed) { // The only pointers that we don't transform to ELEMENT_TYPE_I are those that are @@ -2443,7 +2443,7 @@ void ILStubLinker::TransformArgForJIT(LocalDesc *pLoc) // because we are not supposed to make a copy. } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 { pLoc->ElementType[0] = ELEMENT_TYPE_I; pLoc->cbType = 1; diff --git a/src/coreclr/src/vm/stubgen.h b/src/coreclr/src/vm/stubgen.h index 3b2e4d1936ab3..7476115df5561 100644 --- a/src/coreclr/src/vm/stubgen.h +++ b/src/coreclr/src/vm/stubgen.h @@ -787,9 +787,9 @@ class ILCodeStream UINT m_uCurInstrIdx; ILStubLinker::CodeStreamType m_codeStreamType; // Type of the ILCodeStream -#ifndef BIT64 +#ifndef HOST_64BIT const static UINT32 SPECIAL_VALUE_NAN_64_ON_32 = 0xFFFFFFFF; -#endif // BIT64 +#endif // HOST_64BIT }; #define TOKEN_ILSTUB_TARGET_SIG (TokenFromRid(0xFFFFFF, mdtSignature)) diff --git a/src/coreclr/src/vm/stubhelpers.cpp b/src/coreclr/src/vm/stubhelpers.cpp index 2408c421db86d..ddba024b80862 100644 --- a/src/coreclr/src/vm/stubhelpers.cpp +++ b/src/coreclr/src/vm/stubhelpers.cpp @@ -189,7 +189,7 @@ FORCEINLINE static void GetCOMIPFromRCW_ClearFP() { LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // As per ASURT 146699 we need to clear FP state before calling to COM // the following sequence was previously generated to compiled ML stubs // and is faster than _clearfp(). @@ -201,7 +201,7 @@ FORCEINLINE static void GetCOMIPFromRCW_ClearFP() fnclex NoNeedToClear: } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } FORCEINLINE static SOleTlsData *GetOrCreateOleTlsData() @@ -209,14 +209,14 @@ FORCEINLINE static SOleTlsData *GetOrCreateOleTlsData() LIMITED_METHOD_CONTRACT; SOleTlsData *pOleTlsData; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // This saves 1 memory instruction over NtCurretTeb()->ReservedForOle because // NtCurrentTeb() reads _TEB.NtTib.Self which is the same as what FS:0 already // points to. pOleTlsData = (SOleTlsData *)(ULONG_PTR)__readfsdword(offsetof(TEB, ReservedForOle)); -#else // _TARGET_X86_ +#else // TARGET_X86 pOleTlsData = (SOleTlsData *)NtCurrentTeb()->ReservedForOle; -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (pOleTlsData == NULL) { pOleTlsData = (SOleTlsData *)SetupOleContext(); @@ -1018,7 +1018,7 @@ FCIMPL2(void*, StubHelpers::GetDelegateTarget, DelegateObject *pThisUNSAFE, UINT DELEGATEREF orefThis = (DELEGATEREF)ObjectToOBJECTREF(pThisUNSAFE); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // On x86 we wrap the call with a thunk that handles host notifications. SyncBlock *pSyncBlock = orefThis->PassiveGetSyncBlock(); if (pSyncBlock != NULL) @@ -1034,9 +1034,9 @@ FCIMPL2(void*, StubHelpers::GetDelegateTarget, DelegateObject *pThisUNSAFE, UINT } } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(BIT64) +#if defined(HOST_64BIT) UINT_PTR target = (UINT_PTR)orefThis->GetMethodPtrAux(); // See code:GenericPInvokeCalliHelper @@ -1047,9 +1047,9 @@ FCIMPL2(void*, StubHelpers::GetDelegateTarget, DelegateObject *pThisUNSAFE, UINT // see IL code gen in NDirectStubLinker::DoNDirect for details. *ppStubArg = target; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // @ARMTODO: Nothing to do for ARM yet since we don't support the hosted path. -#endif // BIT64, _TARGET_ARM_ +#endif // HOST_64BIT, TARGET_ARM if (pEntryPoint == NULL) { @@ -1695,7 +1695,7 @@ FCIMPL2(void, StubHelpers::LogPinnedArgument, MethodDesc *target, Object *pinned } FCIMPLEND -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT FCIMPL0(void*, StubHelpers::GetStubContextAddr) { FCALL_CONTRACT; @@ -1704,7 +1704,7 @@ FCIMPL0(void*, StubHelpers::GetStubContextAddr) UNREACHABLE_MSG("This is a JIT intrinsic!"); } FCIMPLEND -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT FCIMPL1(DWORD, StubHelpers::CalcVaListSize, VARARGS *varargs) { diff --git a/src/coreclr/src/vm/stubhelpers.h b/src/coreclr/src/vm/stubhelpers.h index 00b6a16e029fc..51de39efde061 100644 --- a/src/coreclr/src/vm/stubhelpers.h +++ b/src/coreclr/src/vm/stubhelpers.h @@ -97,9 +97,9 @@ class StubHelpers static FCDECL2(void, MarshalToManagedVaListInternal, va_list va, VARARGS* pArgIterator); static FCDECL0(void*, GetStubContext); static FCDECL2(void, LogPinnedArgument, MethodDesc *localDesc, Object *nativeArg); -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT static FCDECL0(void*, GetStubContextAddr); -#endif // _TARGET_64BIT_ +#endif // TARGET_64BIT static FCDECL1(DWORD, CalcVaListSize, VARARGS *varargs); static FCDECL3(void, ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD, Object *pThisUNSAFE); static FCDECL3(void, ValidateByref, void *pByref, MethodDesc *pMD, Object *pThisUNSAFE); diff --git a/src/coreclr/src/vm/stublink.cpp b/src/coreclr/src/vm/stublink.cpp index 0f979dfdfd880..f13fcebabb9e2 100644 --- a/src/coreclr/src/vm/stublink.cpp +++ b/src/coreclr/src/vm/stublink.cpp @@ -95,7 +95,7 @@ struct LabelRef : public CodeElement #ifdef STUBLINKER_GENERATES_UNWIND_INFO -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // List of unwind operations, queued in StubLinker::m_pUnwindInfoList. struct IntermediateUnwindInfo { @@ -104,7 +104,7 @@ struct IntermediateUnwindInfo UINT LocalOffset; UNWIND_CODE rgUnwindCode[1]; // variable length, depends on first entry's UnwindOp }; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 StubUnwindInfoHeapSegment *g_StubHeapSegments; @@ -236,7 +236,7 @@ bool UnregisterUnwindInfoInLoaderHeapCallback (PVOID pvArgs, PVOID pvAllocationB _ASSERTE((BYTE*)pvAllocationBase + cbReserved <= pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment); DeleteEEFunctionTable(pStubHeapSegment); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (pStubHeapSegment->pUnwindInfoTable != 0) delete pStubHeapSegment->pUnwindInfoTable; #endif @@ -352,7 +352,7 @@ StubLinker::StubLinker() m_pPatchLabel = NULL; m_stackSize = 0; m_fDataOnly = FALSE; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM m_fProlog = FALSE; m_cCalleeSavedRegs = 0; m_cbStackFrame = 0; @@ -362,12 +362,12 @@ StubLinker::StubLinker() #ifdef _DEBUG m_pUnwindInfoCheckLabel = NULL; #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 m_pUnwindInfoList = NULL; m_nUnwindSlots = 0; m_fHaveFramePointer = FALSE; #endif -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 m_fProlog = FALSE; m_cIntRegArgs = 0; m_cVecRegArgs = 0; @@ -1158,7 +1158,7 @@ bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap) #ifdef STUBLINKER_GENERATES_UNWIND_INFO -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) // See RtlVirtualUnwind in base\ntos\rtl\amd64\exdsptch.c @@ -1288,7 +1288,7 @@ UNWIND_CODE *StubLinker::AllocUnwindInfo (UCHAR Op, UCHAR nExtraSlots /*= 0*/) return pUnwindCode; } -#endif // defined(_TARGET_AMD64_) +#endif // defined(TARGET_AMD64) struct FindBlockArgs { @@ -1390,7 +1390,7 @@ bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap) StubUnwindInfoHeader * pUnwindInfoHeader = pStub->GetUnwindInfoHeader(); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 UNWIND_CODE *pDestUnwindCode = &pUnwindInfoHeader->UnwindInfo.UnwindCode[0]; #ifdef _DEBUG @@ -1506,7 +1506,7 @@ bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap) if (sTemp.IsOverflow()) COMPlusThrowArithmetic(); RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value()); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // // Fill in the RUNTIME_FUNCTION struct for this prologue. // @@ -1652,7 +1652,7 @@ bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap) ((int)epilogUnwindCodeIndex << 23)| ((int)codeWordsCount << 28); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (!m_fProlog) { // If EmitProlog isn't called. This is a leaf function which doesn't need any unwindInfo @@ -1832,7 +1832,7 @@ bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap) pNewStubHeapSegment->pbBaseAddress = pbBaseAddress; pNewStubHeapSegment->cbSegment = cbSegment; pNewStubHeapSegment->pUnwindHeaderList = NULL; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 pNewStubHeapSegment->pUnwindInfoTable = NULL; #endif @@ -1860,7 +1860,7 @@ bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap) pHeader->pNext = pStubHeapSegment->pUnwindHeaderList; pStubHeapSegment->pUnwindHeaderList = pHeader; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Publish Unwind info to ETW stack crawler UnwindInfoTable::AddToUnwindInfoTable( &pStubHeapSegment->pUnwindInfoTable, pCurFunction, @@ -1878,7 +1878,7 @@ bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap) } #endif // STUBLINKER_GENERATES_UNWIND_INFO -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM void StubLinker::DescribeProlog(UINT cCalleeSavedRegs, UINT cbStackFrame, BOOL fPushArgRegs) { m_fProlog = TRUE; @@ -1886,7 +1886,7 @@ void StubLinker::DescribeProlog(UINT cCalleeSavedRegs, UINT cbStackFrame, BOOL f m_cbStackFrame = cbStackFrame; m_fPushArgRegs = fPushArgRegs; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) void StubLinker::DescribeProlog(UINT cIntRegArgs, UINT cVecRegArgs, UINT cCalleeSavedRegs, UINT cbStackSpace) { m_fProlog = TRUE; @@ -1911,7 +1911,7 @@ UINT StubLinker::GetStackFrameSize() } -#endif // ifdef _TARGET_ARM_, elif defined(_TARGET_ARM64_) +#endif // ifdef TARGET_ARM, elif defined(TARGET_ARM64) #endif // #ifndef DACCESS_COMPILE @@ -1989,7 +1989,7 @@ VOID Stub::DeleteStub() if (pSegment) { PBYTE pbCode = (PBYTE)GetEntryPointInternal(); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 UnwindInfoTable::RemoveFromUnwindInfoTable(&pSegment->pUnwindInfoTable, (TADDR) pSegment->pbBaseAddress, (TADDR) pbCode); #endif @@ -2038,7 +2038,7 @@ VOID Stub::DeleteStub() if (!pSegment->pUnwindHeaderList) { DeleteEEFunctionTable(pSegment); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (pSegment->pUnwindInfoTable != 0) delete pSegment->pUnwindInfoTable; #endif @@ -2061,7 +2061,7 @@ VOID Stub::DeleteStub() FillMemory(this+1, m_numCodeBytes, 0xcc); #endif -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX DeleteExecutable((BYTE*)GetAllocationBase()); #else delete [] (BYTE*)GetAllocationBase(); @@ -2179,7 +2179,7 @@ Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags) BYTE *pBlock; if (pHeap == NULL) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pBlock = new (executable) BYTE[totalSize]; #else pBlock = new BYTE[totalSize]; @@ -2224,7 +2224,7 @@ void Stub::SetupStub(int numCodeBytes, DWORD flags #ifdef _DEBUG m_signature = kUsedStub; #else -#ifdef BIT64 +#ifdef HOST_64BIT m_pad_code_bytes = 0; #endif #endif diff --git a/src/coreclr/src/vm/stublink.h b/src/coreclr/src/vm/stublink.h index 0d2c7b33e8fcf..9c51988d6419b 100644 --- a/src/coreclr/src/vm/stublink.h +++ b/src/coreclr/src/vm/stublink.h @@ -70,9 +70,9 @@ struct LabelRef; struct CodeElement; struct IntermediateUnwindInfo; -#if !defined(_TARGET_X86_) && !defined(FEATURE_PAL) +#if !defined(TARGET_X86) && !defined(TARGET_UNIX) #define STUBLINKER_GENERATES_UNWIND_INFO -#endif // !_TARGET_X86_ && !FEATURE_PAL +#endif // !TARGET_X86 && !TARGET_UNIX #ifdef STUBLINKER_GENERATES_UNWIND_INFO @@ -108,7 +108,7 @@ struct StubUnwindInfoHeapSegment StubUnwindInfoHeader *pUnwindHeaderList; StubUnwindInfoHeapSegment *pNext; -#ifdef BIT64 +#ifdef HOST_64BIT class UnwindInfoTable* pUnwindInfoTable; // Used to publish unwind info to ETW stack crawler #endif }; @@ -234,9 +234,9 @@ class StubLinker void SetDataOnly(BOOL fDataOnly = TRUE) { LIMITED_METHOD_CONTRACT; m_fDataOnly = fDataOnly; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM void DescribeProlog(UINT cCalleeSavedRegs, UINT cbStackFrame, BOOL fPushArgRegs); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) void DescribeProlog(UINT cIntRegArgs, UINT cVecRegArgs, UINT cCalleeSavedRegs, UINT cbStackFrame); UINT GetSavedRegArgsOffset(); UINT GetStackFrameSize(); @@ -309,23 +309,23 @@ class StubLinker // internals. BOOL m_fDataOnly; // the stub contains only data - does not need FlushInstructionCache -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM protected: BOOL m_fProlog; // True if DescribeProlog has been called UINT m_cCalleeSavedRegs; // Count of callee saved registers (0 == none, 1 == r4, 2 == // r4-r5 etc. up to 8 == r4-r11) UINT m_cbStackFrame; // Count of bytes in the stack frame (excl of saved regs) BOOL m_fPushArgRegs; // If true, r0-r3 are saved before callee saved regs -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 protected: BOOL m_fProlog; // True if DescribeProlog has been called UINT m_cIntRegArgs; // Count of int register arguments (x0 - x7) UINT m_cVecRegArgs; // Count of FP register arguments (v0 - v7) UINT m_cCalleeSavedRegs; // Count of callee saved registers (x19 - x28) UINT m_cbStackSpace; // Additional stack space for return buffer and stack alignment -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #ifdef STUBLINKER_GENERATES_UNWIND_INFO @@ -336,7 +336,7 @@ class StubLinker // code from 14 to 5 bytes. #endif -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 IntermediateUnwindInfo *m_pUnwindInfoList; UINT m_nUnwindSlots; // number of slots to allocate at end, == UNWIND_INFO::CountOfCodes BOOL m_fHaveFramePointer; // indicates stack operations no longer need to be recorded @@ -350,9 +350,9 @@ class StubLinker return sizeof(T_RUNTIME_FUNCTION) + offsetof(UNWIND_INFO, UnwindCode) + m_nUnwindSlots * sizeof(UNWIND_CODE); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #define MAX_UNWIND_CODE_WORDS 5 /* maximum number of 32-bit words to store unwind codes */ // Cache information about the stack frame set up in the prolog and use it in the generation of the // epilog. @@ -369,9 +369,9 @@ class StubLinker return c_nUnwindInfoSize; } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 #define MAX_UNWIND_CODE_WORDS 5 /* maximum number of 32-bit words to store unwind codes */ private: @@ -384,7 +384,7 @@ class StubLinker return c_nUnwindInfoSize; } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 #endif // STUBLINKER_GENERATES_UNWIND_INFO @@ -416,9 +416,9 @@ class StubLinker bool EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap); -#if defined(_TARGET_AMD64_) && defined(STUBLINKER_GENERATES_UNWIND_INFO) +#if defined(TARGET_AMD64) && defined(STUBLINKER_GENERATES_UNWIND_INFO) UNWIND_CODE *AllocUnwindInfo (UCHAR Op, UCHAR nExtraSlots = 0); -#endif // defined(_TARGET_AMD64_) && defined(STUBLINKER_GENERATES_UNWIND_INFO) +#endif // defined(TARGET_AMD64) && defined(STUBLINKER_GENERATES_UNWIND_INFO) }; //************************************************************************ @@ -636,7 +636,7 @@ class Stub TADDR pEntryPoint = dac_cast(GetEntryPointInternal()); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #ifndef THUMB_CODE #define THUMB_CODE 1 @@ -813,11 +813,11 @@ class Stub UINT32 m_signature; #else -#ifdef BIT64 +#ifdef HOST_64BIT //README ALIGNEMENT: in retail mode UINT m_numCodeBytes does not align to 16byte for the code // after the Stub struct. This is to pad properly UINT m_pad_code_bytes; -#endif // BIT64 +#endif // HOST_64BIT #endif // _DEBUG #ifdef _DEBUG diff --git a/src/coreclr/src/vm/stubmgr.cpp b/src/coreclr/src/vm/stubmgr.cpp index 36bef263003a3..23dee8851819a 100644 --- a/src/coreclr/src/vm/stubmgr.cpp +++ b/src/coreclr/src/vm/stubmgr.cpp @@ -1532,9 +1532,9 @@ PCODE RangeSectionStubManager::GetMethodThunkTarget(PCODE stubStartAddress) { WRAPPER_NO_CONTRACT; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) return rel32Decode(stubStartAddress+1); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) TADDR pInstr = PCODEToPINSTR(stubStartAddress); return *dac_cast(pInstr + 2 * sizeof(DWORD)); #else @@ -1824,9 +1824,9 @@ BOOL ILStubManager::TraceManager(Thread *thread, target = (PCODE)arg; // The value is mangled on 64-bit -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 target = target >> 1; // call target is encoded as (addr << 1) | 1 -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Unmanaged CALLI case 0x%x\n", target)); trace->InitForUnmanaged(target); @@ -1928,7 +1928,7 @@ static BOOL IsVarargPInvokeStub(PCODE stubStartAddress) if (stubStartAddress == GetEEFuncEntryPoint(VarargPInvokeStub)) return TRUE; -#if !defined(_TARGET_X86_) && !defined(_TARGET_ARM64_) +#if !defined(TARGET_X86) && !defined(TARGET_ARM64) if (stubStartAddress == GetEEFuncEntryPoint(VarargPInvokeStub_RetBuffArg)) return TRUE; #endif @@ -2115,7 +2115,7 @@ BOOL DelegateInvokeStubManager::CheckIsStub_Internal(PCODE stubStartAddress) bool fIsStub = false; #ifndef DACCESS_COMPILE -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 fIsStub = fIsStub || (stubStartAddress == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub)); #endif #endif // !DACCESS_COMPILE @@ -2163,14 +2163,14 @@ BOOL DelegateInvokeStubManager::TraceManager(Thread *thread, TraceDestination *t pThis = NULL; // Retrieve the this pointer from the context. -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) (*pRetAddr) = *(BYTE **)(size_t)(pContext->Esp); pThis = (BYTE*)(size_t)(pContext->Ecx); destAddr = *(PCODE*)(pThis + DelegateObject::GetOffsetOfMethodPtrAux()); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // // We need to check whether the following is the correct return address. @@ -2217,7 +2217,7 @@ BOOL DelegateInvokeStubManager::TraceManager(Thread *thread, TraceDestination *t } destAddr = orDelegate->GetMethodPtrAux(); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) (*pRetAddr) = (BYTE *)(size_t)(pContext->Lr); pThis = (BYTE*)(size_t)(pContext->R0); @@ -2229,7 +2229,7 @@ BOOL DelegateInvokeStubManager::TraceManager(Thread *thread, TraceDestination *t else offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtrAux(); destAddr = *(PCODE*)(pThis + offsetOfNextDest); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) (*pRetAddr) = (BYTE *)(size_t)(pContext->Lr); pThis = (BYTE*)(size_t)(pContext->X0); @@ -2389,10 +2389,10 @@ BOOL TailCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress) #if !defined(DACCESS_COMPILE) -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) EXTERN_C void STDCALL JIT_TailCallLeave(); EXTERN_C void STDCALL JIT_TailCallVSDLeave(); -#endif // _TARGET_X86_ +#endif // TARGET_X86 BOOL TailCallStubManager::TraceManager(Thread * pThread, TraceDestination * pTrace, @@ -2400,7 +2400,7 @@ BOOL TailCallStubManager::TraceManager(Thread * pThread, BYTE ** ppRetAddr) { WRAPPER_NO_CONTRACT; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) TADDR esp = GetSP(pContext); TADDR ebp = GetFP(pContext); @@ -2450,12 +2450,12 @@ BOOL TailCallStubManager::TraceManager(Thread * pThread, return TRUE; } -#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM) _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCall)); // The target address is the second argument -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 PCODE target = (PCODE)pContext->Rdx; #else PCODE target = (PCODE)pContext->R1; @@ -2464,12 +2464,12 @@ BOOL TailCallStubManager::TraceManager(Thread * pThread, pTrace->InitForStub(target); return TRUE; -#else // !_TARGET_X86_ && !_TARGET_AMD64_ && !_TARGET_ARM_ +#else // !TARGET_X86 && !TARGET_AMD64 && !TARGET_ARM _ASSERTE(!"TCSM::TM - TailCallStubManager should not be necessary on this platform"); return FALSE; -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 } #endif // !DACCESS_COMPILE diff --git a/src/coreclr/src/vm/stubmgr.h b/src/coreclr/src/vm/stubmgr.h index 96d8716ef1cea..6958f677da47c 100644 --- a/src/coreclr/src/vm/stubmgr.h +++ b/src/coreclr/src/vm/stubmgr.h @@ -887,13 +887,13 @@ class StubManagerHelpers public: static PCODE GetReturnAddress(T_CONTEXT * pContext) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return *dac_cast(pContext->Esp); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) return *dac_cast(pContext->Rsp); -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return pContext->Lr; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return pContext->Lr; #else PORTABILITY_ASSERT("StubManagerHelpers::GetReturnAddress"); @@ -903,17 +903,17 @@ class StubManagerHelpers static PTR_Object GetThisPtr(T_CONTEXT * pContext) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return dac_cast(pContext->Ecx); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI return dac_cast(pContext->Rdi); #else return dac_cast(pContext->Rcx); #endif -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return dac_cast((TADDR)pContext->R0); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return dac_cast(pContext->X0); #else PORTABILITY_ASSERT("StubManagerHelpers::GetThisPtr"); @@ -923,13 +923,13 @@ class StubManagerHelpers static PCODE GetTailCallTarget(T_CONTEXT * pContext) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return pContext->Eax; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) return pContext->Rax; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return pContext->R12; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return pContext->X12; #else PORTABILITY_ASSERT("StubManagerHelpers::GetTailCallTarget"); @@ -939,13 +939,13 @@ class StubManagerHelpers static TADDR GetHiddenArg(T_CONTEXT * pContext) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return pContext->Eax; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) return pContext->R10; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return pContext->R12; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return pContext->X12; #else PORTABILITY_ASSERT("StubManagerHelpers::GetHiddenArg"); @@ -966,17 +966,17 @@ class StubManagerHelpers UserCode which invokes multicast delegate <--- */ -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return *((PCODE *)pContext->Ebp + 1); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) T_CONTEXT context(*pContext); Thread::VirtualUnwindCallFrame(&context); Thread::VirtualUnwindCallFrame(&context); return context.Rip; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return *((PCODE *)((TADDR)pContext->R11) + 1); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return *((PCODE *)pContext->Fp + 1); #else PORTABILITY_ASSERT("StubManagerHelpers::GetRetAddrFromMulticastILStubFrame"); @@ -987,17 +987,17 @@ class StubManagerHelpers static TADDR GetSecondArg(T_CONTEXT * pContext) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) return pContext->Edx; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI return pContext->Rsi; #else return pContext->Rdx; #endif -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) return pContext->R1; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return pContext->X1; #else PORTABILITY_ASSERT("StubManagerHelpers::GetSecondArg"); diff --git a/src/coreclr/src/vm/syncblk.cpp b/src/coreclr/src/vm/syncblk.cpp index 9f83fe4d77162..d24b285ef4206 100644 --- a/src/coreclr/src/vm/syncblk.cpp +++ b/src/coreclr/src/vm/syncblk.cpp @@ -56,10 +56,10 @@ SPTR_IMPL (SyncBlockCache, SyncBlockCache, s_pSyncBlockCache); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // static SLIST_HEADER InteropSyncBlockInfo::s_InteropInfoStandbyList; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX InteropSyncBlockInfo::~InteropSyncBlockInfo() { @@ -75,7 +75,7 @@ InteropSyncBlockInfo::~InteropSyncBlockInfo() FreeUMEntryThunkOrInterceptStub(); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Deletes all items in code:s_InteropInfoStandbyList. void InteropSyncBlockInfo::FlushStandbyList() { @@ -97,7 +97,7 @@ void InteropSyncBlockInfo::FlushStandbyList() pEntry = pNextEntry; } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX void InteropSyncBlockInfo::FreeUMEntryThunkOrInterceptStub() { @@ -120,17 +120,17 @@ void InteropSyncBlockInfo::FreeUMEntryThunkOrInterceptStub() } else { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) Stub *pInterceptStub = GetInterceptStub(); if (pInterceptStub != NULL) { // There may be multiple chained stubs pInterceptStub->DecRef(); } -#else // _TARGET_X86_ +#else // TARGET_X86 // Intercept stubs are currently not used on other platforms. _ASSERTE(GetInterceptStub() == NULL); -#endif // _TARGET_X86_ +#endif // TARGET_X86 } } m_pUMEntryThunkOrInterceptStub = NULL; @@ -674,9 +674,9 @@ void SyncBlockCache::Start() SyncBlockCache::GetSyncBlockCache()->m_EphemeralBitmap = bm; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX InitializeSListHead(&InteropSyncBlockInfo::s_InteropInfoStandbyList); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } @@ -993,7 +993,7 @@ void SyncBlockCache::DeleteSyncBlock(SyncBlock *psb) CleanupSyncBlockComData(psb->m_pInteropInfo); #endif // FEATURE_COMINTEROP -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (g_fEEShutDown) { delete psb->m_pInteropInfo; @@ -1003,9 +1003,9 @@ void SyncBlockCache::DeleteSyncBlock(SyncBlock *psb) psb->m_pInteropInfo->~InteropSyncBlockInfo(); InterlockedPushEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList, (PSLIST_ENTRY)psb->m_pInteropInfo); } -#else // !FEATURE_PAL +#else // !TARGET_UNIX delete psb->m_pInteropInfo; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #ifdef EnC_SUPPORTED @@ -1903,13 +1903,13 @@ DEBUG_NOINLINE void ObjHeader::EnterSpinLock() while (TRUE) { #ifdef _DEBUG -#ifdef BIT64 +#ifdef HOST_64BIT // Give 64bit more time because there isn't a remoting fast path now, and we've hit this assert // needlessly in CLRSTRESS. if (i++ > 30000) #else if (i++ > 10000) -#endif // BIT64 +#endif // HOST_64BIT _ASSERTE(!"ObjHeader::EnterLock timed out"); #endif // get the value so that it doesn't get changed under us. @@ -2968,7 +2968,7 @@ void SyncBlock::SetEnCInfo(EnCSyncBlockInfo *pEnCInfo) #endif // EnC_SUPPORTED #endif // !DACCESS_COMPILE -#if defined(BIT64) && defined(_DEBUG) +#if defined(HOST_64BIT) && defined(_DEBUG) void ObjHeader::IllegalAlignPad() { WRAPPER_NO_CONTRACT; @@ -2979,6 +2979,6 @@ void ObjHeader::IllegalAlignPad() #endif _ASSERTE(m_alignpad == 0); } -#endif // BIT64 && _DEBUG +#endif // HOST_64BIT && _DEBUG diff --git a/src/coreclr/src/vm/syncblk.h b/src/coreclr/src/vm/syncblk.h index d08c73af923c2..f30b056d88762 100644 --- a/src/coreclr/src/vm/syncblk.h +++ b/src/coreclr/src/vm/syncblk.h @@ -59,9 +59,9 @@ // // Incidentally, there's a better write-up of all this stuff in the archives. -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #include -#endif // _TARGET_X86_ +#endif // TARGET_X86 // forwards: class SyncBlock; @@ -131,11 +131,11 @@ typedef DPTR(EnCSyncBlockInfo) PTR_EnCSyncBlockInfo; // The GC is highly dependent on SIZE_OF_OBJHEADER being exactly the sizeof(ObjHeader) // We define this macro so that the preprocessor can calculate padding structures. -#ifdef BIT64 +#ifdef HOST_64BIT #define SIZEOF_OBJHEADER 8 -#else // !BIT64 +#else // !HOST_64BIT #define SIZEOF_OBJHEADER 4 -#endif // !BIT64 +#endif // !HOST_64BIT inline void InitializeSpinConstants() @@ -609,10 +609,10 @@ class InteropSyncBlockInfo friend class RCWHolder; public: -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // List of InteropSyncBlockInfo instances that have been freed since the last syncblock cleanup. static SLIST_HEADER s_InteropInfoStandbyList; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX InteropSyncBlockInfo() { @@ -623,10 +623,10 @@ class InteropSyncBlockInfo ~InteropSyncBlockInfo(); #endif -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Deletes all items in code:s_InteropInfoStandbyList. static void FlushStandbyList(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_COMINTEROP @@ -913,7 +913,7 @@ class SyncBlock if (!m_pInteropInfo) { NewHolder pInteropInfo; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX pInteropInfo = (InteropSyncBlockInfo *)InterlockedPopEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList); if (pInteropInfo != NULL) @@ -922,7 +922,7 @@ class SyncBlock new (pInteropInfo) InteropSyncBlockInfo(); } else -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX { pInteropInfo = new InteropSyncBlockInfo(); } @@ -1273,15 +1273,15 @@ class ObjHeader private: // !!! Notice: m_SyncBlockValue *MUST* be the last field in ObjHeader. -#ifdef BIT64 +#ifdef HOST_64BIT DWORD m_alignpad; -#endif // BIT64 +#endif // HOST_64BIT Volatile m_SyncBlockValue; // the Index and the Bits -#if defined(BIT64) && defined(_DEBUG) +#if defined(HOST_64BIT) && defined(_DEBUG) void IllegalAlignPad(); -#endif // BIT64 && _DEBUG +#endif // HOST_64BIT && _DEBUG INCONTRACT(void * GetPtrForLockContract()); @@ -1291,11 +1291,11 @@ class ObjHeader FORCEINLINE DWORD GetHeaderSyncBlockIndex() { LIMITED_METHOD_DAC_CONTRACT; -#if defined(BIT64) && defined(_DEBUG) && !defined(DACCESS_COMPILE) +#if defined(HOST_64BIT) && defined(_DEBUG) && !defined(DACCESS_COMPILE) // On WIN64 this field is never modified, but was initialized to 0 if (m_alignpad != 0) IllegalAlignPad(); -#endif // BIT64 && _DEBUG && !DACCESS_COMPILE +#endif // HOST_64BIT && _DEBUG && !DACCESS_COMPILE // pull the value out before checking it to avoid race condition DWORD value = m_SyncBlockValue.LoadWithoutBarrier(); @@ -1396,11 +1396,11 @@ class ObjHeader LIMITED_METHOD_CONTRACT; SUPPORTS_DAC; -#if defined(BIT64) && defined(_DEBUG) && !defined(DACCESS_COMPILE) +#if defined(HOST_64BIT) && defined(_DEBUG) && !defined(DACCESS_COMPILE) // On WIN64 this field is never modified, but was initialized to 0 if (m_alignpad != 0) IllegalAlignPad(); -#endif // BIT64 && _DEBUG && !DACCESS_COMPILE +#endif // HOST_64BIT && _DEBUG && !DACCESS_COMPILE return m_SyncBlockValue.LoadWithoutBarrier(); } @@ -1560,9 +1560,9 @@ inline DWORD AwareLock::GetSyncBlockIndex() return (m_dwSyncIndex & ~SyncBlock::SyncBlockPrecious); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #include -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // _SYNCBLK_H_ diff --git a/src/coreclr/src/vm/synch.cpp b/src/coreclr/src/vm/synch.cpp index ffc3368c80624..6da9b841f8afa 100644 --- a/src/coreclr/src/vm/synch.cpp +++ b/src/coreclr/src/vm/synch.cpp @@ -588,11 +588,11 @@ void CLRLifoSemaphore::Create(INT32 initialSignalCount, INT32 maximumSignalCount _ASSERTE(initialSignalCount <= maximumSignalCount); _ASSERTE(m_handle == nullptr); -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX HANDLE h = WszCreateSemaphore(nullptr, 0, maximumSignalCount, nullptr); -#else // !FEATURE_PAL +#else // !TARGET_UNIX HANDLE h = CreateIoCompletionPort(INVALID_HANDLE_VALUE, nullptr, 0, maximumSignalCount); -#endif // FEATURE_PAL +#endif // TARGET_UNIX if (h == nullptr) { ThrowOutOfMemory(); @@ -634,12 +634,12 @@ bool CLRLifoSemaphore::WaitForSignal(DWORD timeoutMs) // Wait for a signal BOOL waitSuccessful; { -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // Do a prioritized wait to get LIFO waiter release order DWORD waitResult = PAL_WaitForSingleObjectPrioritized(m_handle, timeoutMs); _ASSERTE(waitResult == WAIT_OBJECT_0 || waitResult == WAIT_TIMEOUT); waitSuccessful = waitResult == WAIT_OBJECT_0; -#else // !FEATURE_PAL +#else // !TARGET_UNIX // I/O completion ports release waiters in LIFO order, see // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198(v=vs.85).aspx DWORD numberOfBytes; @@ -648,7 +648,7 @@ bool CLRLifoSemaphore::WaitForSignal(DWORD timeoutMs) waitSuccessful = GetQueuedCompletionStatus(m_handle, &numberOfBytes, &completionKey, &overlapped, timeoutMs); _ASSERTE(waitSuccessful || GetLastError() == WAIT_TIMEOUT); _ASSERTE(overlapped == nullptr); -#endif // FEATURE_PAL +#endif // TARGET_UNIX } if (!waitSuccessful) @@ -781,7 +781,7 @@ bool CLRLifoSemaphore::Wait(DWORD timeoutMs, UINT32 spinCount, UINT32 processorC counts = countsBeforeUpdate; } -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 // For now, the spinning changes are disabled on ARM64. The spin loop below replicates how UnfairSemaphore used to spin. // Once more tuning is done on ARM64, it should be possible to come up with a spinning scheme that works well everywhere. int spinCountPerProcessor = spinCount; @@ -821,13 +821,13 @@ bool CLRLifoSemaphore::Wait(DWORD timeoutMs, UINT32 spinCount, UINT32 processorC break; } } -#else // !_TARGET_ARM64_ +#else // !TARGET_ARM64 const UINT32 Sleep0Threshold = 10; YieldProcessorNormalizationInfo normalizationInfo; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX // The PAL's wait subsystem is quite slow, spin more to compensate for the more expensive wait spinCount *= 2; -#endif // FEATURE_PAL +#endif // TARGET_UNIX for (UINT32 i = 0; i < spinCount; ++i) { // Wait @@ -872,7 +872,7 @@ bool CLRLifoSemaphore::Wait(DWORD timeoutMs, UINT32 spinCount, UINT32 processorC counts = countsBeforeUpdate; } } -#endif // _TARGET_ARM64_ +#endif // TARGET_ARM64 // Unregister as a spinner, and acquire the semaphore or register as a waiter counts = m_counts.VolatileLoadWithoutBarrier(); @@ -965,10 +965,10 @@ void CLRLifoSemaphore::Release(INT32 releaseCount) } // Wake waiters -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX BOOL released = ReleaseSemaphore(m_handle, countOfWaitersToWake, nullptr); _ASSERTE(released); -#else // !FEATURE_PAL +#else // !TARGET_UNIX while (--countOfWaitersToWake >= 0) { while (!PostQueuedCompletionStatus(m_handle, 0, 0, nullptr)) @@ -977,7 +977,7 @@ void CLRLifoSemaphore::Release(INT32 releaseCount) ClrSleepEx(1, false); } } -#endif // FEATURE_PAL +#endif // TARGET_UNIX } void CLRMutex::Create(LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, LPCTSTR lpName) diff --git a/src/coreclr/src/vm/synch.h b/src/coreclr/src/vm/synch.h index 9c11a9f1cb961..db63e293ecf10 100644 --- a/src/coreclr/src/vm/synch.h +++ b/src/coreclr/src/vm/synch.h @@ -278,9 +278,9 @@ class CLRLifoSemaphore #if defined(DEBUG) UINT32 m_maximumSignalCount; -#endif // _DEBUG && !FEATURE_PAL +#endif // _DEBUG && !TARGET_UNIX - // When FEATURE_PAL is defined, this is a handle to an instance of the PAL's LIFO semaphore. When FEATURE_PAL is not + // When TARGET_UNIX is defined, this is a handle to an instance of the PAL's LIFO semaphore. When TARGET_UNIX is not // defined, this is a handle to an I/O completion port. HANDLE m_handle; }; diff --git a/src/coreclr/src/vm/threads.cpp b/src/coreclr/src/vm/threads.cpp index 4624156330fe9..cd8cb13c8badd 100644 --- a/src/coreclr/src/vm/threads.cpp +++ b/src/coreclr/src/vm/threads.cpp @@ -92,7 +92,7 @@ CrstStatic g_DeadlockAwareCrst; #if defined(_DEBUG) BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId ) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX LIMITED_METHOD_CONTRACT; DWORD id = GetThreadId(h); @@ -100,9 +100,9 @@ BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId ) // OS call GetThreadId may fail, and return 0. In this case we can not // make a decision if the two match or not. Instead, we ignore this check. return id == 0 || id == osId; -#else // !FEATURE_PAL +#else // !TARGET_UNIX return TRUE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } #endif // _DEBUG @@ -448,7 +448,7 @@ void Thread::ChooseThreadCPUGroupAffinity() GC_TRIGGERS; } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups()) return; @@ -468,7 +468,7 @@ void Thread::ChooseThreadCPUGroupAffinity() CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL); m_wCPUGroup = groupAffinity.Group; m_pAffinityMask = groupAffinity.Mask; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } void Thread::ClearThreadCPUGroupAffinity() @@ -479,7 +479,7 @@ void Thread::ClearThreadCPUGroupAffinity() GC_NOTRIGGER; } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups()) return; @@ -497,7 +497,7 @@ void Thread::ClearThreadCPUGroupAffinity() m_wCPUGroup = 0; m_pAffinityMask = 0; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } DWORD Thread::StartThread() @@ -861,16 +861,16 @@ void DestroyThread(Thread *th) #ifdef FEATURE_EH_FUNCLETS ExceptionTracker::PopTrackers((void*)-1); #else // !FEATURE_EH_FUNCLETS -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 PTR_ThreadExceptionState pExState = th->GetExceptionState(); if (pExState->IsExceptionInProgress()) { GCX_COOP(); pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1); } -#else // !_TARGET_X86_ +#else // !TARGET_X86 #error Unsupported platform -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // FEATURE_EH_FUNCLETS if (g_fEEShutDown == 0) @@ -897,16 +897,16 @@ HRESULT Thread::DetachThread(BOOL fDLLThreadDetach) #ifdef FEATURE_EH_FUNCLETS ExceptionTracker::PopTrackers((void*)-1); #else // !FEATURE_EH_FUNCLETS -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 PTR_ThreadExceptionState pExState = GetExceptionState(); if (pExState->IsExceptionInProgress()) { GCX_COOP(); pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1); } -#else // !_TARGET_X86_ +#else // !TARGET_X86 #error Unsupported platform -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // FEATURE_EH_FUNCLETS #ifdef FEATURE_COMINTEROP @@ -1110,7 +1110,7 @@ void InitThreadManager() } #endif // FEATURE_WRITEBARRIER_COPY -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX _ASSERTE(GetThread() == NULL); PTEB Teb = NtCurrentTeb(); @@ -1126,7 +1126,7 @@ void InitThreadManager() g_TlsIndex = (DWORD)(_tls_index + (offsetOfCurrentThreadInfo << 16) + 0x80000000); _ASSERTE(g_TrapReturningThreads == 0); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_INTEROP_DEBUGGING g_debuggerWordTLSIndex = TlsAlloc(); @@ -1418,10 +1418,10 @@ Thread::Thread() m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC; m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC; -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX X86_ONLY(m_LastRedirectIP = 0); X86_ONLY(m_SpinCount = 0); -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX #endif // FEATURE_HIJACK #if defined(_DEBUG) && defined(TRACK_SYNC) @@ -1489,7 +1489,7 @@ Thread::Thread() m_dwAVInRuntimeImplOkayCount = 0; -#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(PLATFORM_UNIX) // GCCOVER +#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(TARGET_UNIX) // GCCOVER m_fPreemptiveGCDisabledForGCStress = false; #endif @@ -1553,10 +1553,10 @@ Thread::Thread() m_fGCSpecial = FALSE; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX m_wCPUGroup = 0; m_pAffinityMask = 0; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX m_pAllLoggedTypes = NULL; @@ -1591,11 +1591,11 @@ BOOL Thread::InitThread(BOOL fInternal) // log will not allocate memory at these critical times an avoid deadlock. STRESS_LOG2(LF_ALWAYS, LL_ALWAYS, "SetupThread managed Thread %p Thread Id = %x\n", this, GetThreadId()); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // workaround: Remove this when we flow impersonation token to host. BOOL reverted = FALSE; HANDLE threadToken = INVALID_HANDLE_VALUE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (m_ThreadHandle == INVALID_HANDLE_VALUE) { @@ -1613,7 +1613,7 @@ BOOL Thread::InitThread(BOOL fInternal) // Thread is created when or after the physical thread started running HANDLE curProcess = ::GetCurrentProcess(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // If we're impersonating on NT, then DuplicateHandle(GetCurrentThread()) is going to give us a handle with only // THREAD_TERMINATE, THREAD_QUERY_INFORMATION, and THREAD_SET_INFORMATION. This doesn't include @@ -1648,7 +1648,7 @@ BOOL Thread::InitThread(BOOL fInternal) EnsureResetThreadToken resetToken(threadToken, reverted); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (::DuplicateHandle(curProcess, ::GetCurrentThread(), curProcess, &hDup, 0 /*ignored*/, FALSE /*inherit*/, DUPLICATE_SAME_ACCESS)) @@ -1677,12 +1677,12 @@ BOOL Thread::InitThread(BOOL fInternal) m_random.Init(); // Set floating point mode to round to nearest -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX (void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR ); m_pTEB = (struct _NT_TIB*)NtCurrentTeb(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (m_CacheStackBase == 0) { @@ -1988,7 +1988,7 @@ void Thread::HandleThreadStartupFailure() RaiseTheExceptionInternalOnly(args.pThrowable, FALSE); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken) { WRAPPER_NO_CONTRACT; @@ -2019,7 +2019,7 @@ void UndoRevert(BOOL bReverted, HANDLE hToken) } return; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // We don't want ::CreateThread() calls scattered throughout the source. So gather @@ -2040,19 +2040,19 @@ BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, voi // _ASSERTE(stackSize <= 0xFFFFFFFF); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HandleHolder token; BOOL bReverted = FALSE; bRet = RevertIfImpersonated(&bReverted, &token); if (bRet != TRUE) return bRet; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX m_StateNC = (ThreadStateNoConcurrency)((ULONG)m_StateNC | TSNC_CLRCreatedThread); bRet = CreateNewOSThread(stackSize, start, args); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX UndoRevert(bReverted, token); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (pName != NULL) SetThreadName(m_ThreadHandle, pName); @@ -2171,7 +2171,7 @@ BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize) } } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (!fSizesGot) { HINSTANCE hInst = WszGetModuleHandle(NULL); @@ -2188,7 +2188,7 @@ BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize) } EX_END_CATCH(SwallowAllExceptions); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (!fSizesGot) { //return some somewhat-reasonable numbers @@ -2210,7 +2210,7 @@ BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUT } CONTRACTL_END; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX SIZE_T ourId = 0; #else DWORD ourId = 0; @@ -2225,14 +2225,14 @@ BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUT sizeToCommitOrReserve = GetDefaultStackSizeSetting(); } -#ifndef FEATURE_PAL // the PAL does its own adjustments as necessary +#ifndef TARGET_UNIX // the PAL does its own adjustments as necessary if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize()) { // On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of // a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB). sizeToCommitOrReserve = GetOsPageSize() + 1; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Make sure we have all our handles, in case someone tries to suspend us // as we are starting up. @@ -2242,7 +2242,7 @@ BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUT return FALSE; } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX h = ::PAL_CreateThread64(NULL /*=SECURITY_ATTRIBUTES*/, #else h = ::CreateThread( NULL /*=SECURITY_ATTRIBUTES*/, @@ -3475,7 +3475,7 @@ DWORD Thread::DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL wa { ThrowOutOfMemory(); } -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX else if (errorCode == ERROR_NOT_SUPPORTED) { // "Wait for any" and "wait for all" operations on multiple wait handles are not supported when a cross-process sync @@ -4683,7 +4683,7 @@ BOOL Thread::PrepareApartmentAndContext() } CONTRACTL_END; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX m_OSThreadId = ::PAL_GetCurrentOSThreadId(); #else m_OSThreadId = ::GetCurrentThreadId(); @@ -4929,7 +4929,7 @@ Thread::ApartmentState Thread::SetApartment(ApartmentState state, BOOL fFireMDAO { // We should never be attempting to CoUninitialize another thread than // the currently running thread. -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX _ASSERTE(m_OSThreadId == ::PAL_GetCurrentOSThreadId()); #else _ASSERTE(m_OSThreadId == ::GetCurrentThreadId()); @@ -4983,7 +4983,7 @@ Thread::ApartmentState Thread::SetApartment(ApartmentState state, BOOL fFireMDAO // Don't use the TS_Unstarted state bit to check for this, it's cleared far // too late in the day for us. Instead check whether we're in the correct // thread context. -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX if (m_OSThreadId != ::PAL_GetCurrentOSThreadId()) #else if (m_OSThreadId != ::GetCurrentThreadId()) @@ -6036,7 +6036,7 @@ BOOL StartUniqueStackMap () return StartUniqueStackMapHelper(); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX size_t UpdateStackHash(size_t hash, size_t retAddr) { @@ -6058,7 +6058,7 @@ size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, siz size_t hash = 0; int idx = 0; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 static size_t moduleBase = (size_t) -1; static size_t moduleTop = (size_t) -1; @@ -6103,7 +6103,7 @@ size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, siz stackTop++; } -#else // _TARGET_X86_ +#else // TARGET_X86 CONTEXT ctx; ClrCaptureContext(&ctx); @@ -6154,7 +6154,7 @@ size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, siz stackTrace [idx] = uRetAddrForHash; } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 stackTrace [0] = idx; @@ -6227,7 +6227,7 @@ BOOL Thread::UniqueStack(void* stackStart) void* stopPoint = pThread->m_CacheStackBase; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Find the stop point (most jitted function) Frame* pFrame = pThread->GetFrame(); for(;;) @@ -6245,7 +6245,7 @@ BOOL Thread::UniqueStack(void* stackStart) } pFrame = pFrame->Next(); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Get hash of all return addresses between here an the top most jitted function stackTraceHash = getStackHash (stackTrace, (size_t*) stackStart, (size_t*) stopPoint, @@ -6295,14 +6295,14 @@ BOOL Thread::UniqueStack(void* stackStart) return fUnique; } -#else // !FEATURE_PAL +#else // !TARGET_UNIX BOOL Thread::UniqueStack(void* stackStart) { return FALSE; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // STRESS_HEAP @@ -6325,7 +6325,7 @@ void * Thread::GetStackLowerBound() STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; - #ifndef FEATURE_PAL + #ifndef TARGET_UNIX MEMORY_BASIC_INFORMATION lowerBoundMemInfo; SIZE_T dwRes; @@ -6339,9 +6339,9 @@ void * Thread::GetStackLowerBound() { return NULL; } -#else // !FEATURE_PAL +#else // !TARGET_UNIX return PAL_GetStackLimit(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } /* @@ -6387,11 +6387,11 @@ BOOL Thread::SetStackLimits(SetStackLimitScope scope) // be sufficient to allow a typical non-recursive call chain to execute, including potential exception handling and // garbage collection. Used for probing for available stack space through RuntimeImports.EnsureSufficientExecutionStack, // among other things. -#ifdef BIT64 +#ifdef HOST_64BIT const UINT_PTR MinExecutionStackSize = 128 * 1024; -#else // !BIT64 +#else // !HOST_64BIT const UINT_PTR MinExecutionStackSize = 64 * 1024; -#endif // BIT64 +#endif // HOST_64BIT _ASSERTE(m_CacheStackBase >= m_CacheStackLimit); if ((reinterpret_cast(m_CacheStackBase) - reinterpret_cast(m_CacheStackLimit)) > MinExecutionStackSize) @@ -6457,7 +6457,7 @@ HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope) } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // TODO: we need to measure what the stack usage needs are at the limits in the hosted scenario for host callbacks if (Thread::IsSetThreadStackGuaranteeInUse(fScope)) @@ -6465,7 +6465,7 @@ HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope) // Tune this as needed ULONG uGuardSize = SIZEOF_DEFAULT_STACK_GUARANTEE; int EXTRA_PAGES = 0; -#if defined(BIT64) +#if defined(HOST_64BIT) // Free Build EH Stack Stats: // -------------------------------- // currently the maximum stack usage we'll face while handling a SO includes: @@ -6493,11 +6493,11 @@ HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope) uGuardSize += (ThreadGuardPages * GetOsPageSize()); } -#else // BIT64 +#else // HOST_64BIT #ifdef _DEBUG uGuardSize += (1 * GetOsPageSize()); // one extra page for debug infrastructure #endif // _DEBUG -#endif // BIT64 +#endif // HOST_64BIT LOG((LF_EH, LL_INFO10000, "STACKOVERFLOW: setting thread stack guarantee to 0x%x\n", uGuardSize)); @@ -6507,7 +6507,7 @@ HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope) } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return S_OK; } @@ -6544,9 +6544,9 @@ UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit) // allowed to fault at the very end of that page. So, as a result, the last normal stack address is // one page sooner. return StackLimit + (cbStackGuarantee -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX + GetOsPageSize() -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX + HARD_GUARD_REGION_SIZE); } @@ -6562,7 +6562,7 @@ static void DebugLogMBIFlags(UINT uState, UINT uProtect) } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define LOG_FLAG(flags, name) \ if (flags & name) \ @@ -6607,7 +6607,7 @@ static void DebugLogMBIFlags(UINT uState, UINT uProtect) } #undef LOG_FLAG -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } @@ -6723,11 +6723,11 @@ BOOL Thread::IsSPBeyondLimit() NOINLINE void AllocateSomeStack(){ LIMITED_METHOD_CONTRACT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 const size_t size = 0x200; -#else //_TARGET_X86_ +#else //TARGET_X86 const size_t size = 0x400; -#endif //_TARGET_X86_ +#endif //TARGET_X86 INT8* mem = (INT8*)_alloca(size); // Actually touch the memory we just allocated so the compiler can't @@ -6736,7 +6736,7 @@ NOINLINE void AllocateSomeStack(){ VolatileStore(mem, 0); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // static // private BOOL Thread::DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress) @@ -6778,7 +6778,7 @@ BOOL Thread::DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddr return FALSE; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX /* * DetermineIfGuardPagePresent @@ -6809,7 +6809,7 @@ BOOL Thread::DetermineIfGuardPagePresent() } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL bStackGuarded = FALSE; UINT_PTR uStackBase = (UINT_PTR)GetCachedStackBase(); UINT_PTR uStackLimit = (UINT_PTR)GetCachedStackLimit(); @@ -6823,9 +6823,9 @@ BOOL Thread::DetermineIfGuardPagePresent() LOG((LF_EH, LL_INFO10000, "Thread::DetermineIfGuardPagePresent: stack guard page: %s\n", bStackGuarded ? "PRESENT" : "MISSING")); return bStackGuarded; -#else // !FEATURE_PAL +#else // !TARGET_UNIX return TRUE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } /* @@ -6864,7 +6864,7 @@ UINT_PTR Thread::GetStackGuarantee() { WRAPPER_NO_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // There is a new API available on new OS's called SetThreadStackGuarantee. It allows you to change the size of // the guard region on a per-thread basis. If we're running on an OS that supports the API, then we must query // it to see if someone has changed the size of the guard region for this thread. @@ -6881,12 +6881,12 @@ UINT_PTR Thread::GetStackGuarantee() { return cbNewStackGuarantee; } -#endif // FEATURE_PAL +#endif // TARGET_UNIX return SIZEOF_DEFAULT_STACK_GUARANTEE; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // // MarkPageAsGuard @@ -7023,7 +7023,7 @@ VOID Thread::RestoreGuardPage() EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // #ifndef DACCESS_COMPILE @@ -7060,12 +7060,12 @@ bool Thread::InitRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, bool validCo if (!ret) { SetIP(pctx, 0); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 pRD->ControlPC = pctx->Eip; pRD->PCTAddr = (TADDR)&(pctx->Eip); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) // nothing more to do here, on Win64 setting the IP to 0 is enough. -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // nothing more to do here, on Win64 setting the IP to 0 is enough. #else PORTABILITY_ASSERT("NYI for platform Thread::InitRegDisplay"); @@ -7090,12 +7090,12 @@ void Thread::FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx) ::FillRegDisplay(pRD, pctx); -#if defined(DEBUG_REGDISPLAY) && !defined(_TARGET_X86_) +#if defined(DEBUG_REGDISPLAY) && !defined(TARGET_X86) CONSISTENCY_CHECK(!pRD->_pThread || pRD->_pThread == this); pRD->_pThread = this; CheckRegDisplaySP(pRD); -#endif // defined(DEBUG_REGDISPLAY) && !defined(_TARGET_X86_) +#endif // defined(DEBUG_REGDISPLAY) && !defined(TARGET_X86) } @@ -7267,9 +7267,9 @@ void Thread::DoExtraWorkForFinalizer() if (RequireSyncBlockCleanup()) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX InteropSyncBlockInfo::FlushStandbyList(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef FEATURE_COMINTEROP RCW::FlushStandbyList(); diff --git a/src/coreclr/src/vm/threads.h b/src/coreclr/src/vm/threads.h index 11db798f93292..1810c3ee0d878 100644 --- a/src/coreclr/src/vm/threads.h +++ b/src/coreclr/src/vm/threads.h @@ -460,10 +460,10 @@ typedef Thread::ForbidSuspendThreadHolder ForbidSuspendThreadHolder; #else // CROSSGEN_COMPILE -#if (defined(_TARGET_ARM_) && defined(FEATURE_EMULATE_SINGLESTEP)) +#if (defined(TARGET_ARM) && defined(FEATURE_EMULATE_SINGLESTEP)) #include "armsinglestepper.h" #endif -#if (defined(_TARGET_ARM64_) && defined(FEATURE_EMULATE_SINGLESTEP)) +#if (defined(TARGET_ARM64) && defined(FEATURE_EMULATE_SINGLESTEP)) #include "arm64singlestepper.h" #endif @@ -510,14 +510,14 @@ EXTERN_C void LeaveSyncHelper (UINT_PTR caller, void *pAwareLock); // Used to capture information about the state of execution of a *SUSPENDED* thread. struct ExecutionState; -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX // This is the type of the start function of a redirected thread pulled from // a HandledJITCase during runtime suspension typedef void (__stdcall *PFN_REDIRECTTARGET)(); // Describes the weird argument sets during hijacking struct HijackArgs; -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX #endif // FEATURE_HIJACK @@ -621,9 +621,9 @@ void InitThreadManager(); #ifdef FEATURE_HIJACK EXTERN_C void WINAPI OnHijackTripThread(); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 EXTERN_C void WINAPI OnHijackFPTripThread(); // hijacked JIT code is returning an FP value -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // FEATURE_HIJACK @@ -998,9 +998,9 @@ class Thread // MapWin32FaultToCOMPlusException needs access to Thread::IsAddrOfRedirectFunc() friend DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord); friend void STDCALL OnHijackWorker(HijackArgs * pArgs); -#ifdef PLATFORM_UNIX +#ifdef TARGET_UNIX friend void HandleGCSuspensionForInterruptedThread(CONTEXT *interruptedContext); -#endif // PLATFORM_UNIX +#endif // TARGET_UNIX #endif // FEATURE_HIJACK @@ -1591,7 +1591,7 @@ class Thread // we fire the AllocationTick event. It's only for tooling purpose. TypeHandle m_thAllocContextObj; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX private: _NT_TIB *m_pTEB; public: @@ -1603,7 +1603,7 @@ class Thread WRAPPER_NO_CONTRACT; return &GetTEB()->ExceptionList; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX inline void SetTHAllocContextObj(TypeHandle th) {LIMITED_METHOD_CONTRACT; m_thAllocContextObj = th; } @@ -2426,9 +2426,9 @@ class Thread STR_SwitchedOut, }; -#if defined(FEATURE_HIJACK) && defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && defined(TARGET_UNIX) bool InjectGcSuspension(); -#endif // FEATURE_HIJACK && PLATFORM_UNIX +#endif // FEATURE_HIJACK && TARGET_UNIX #ifndef DISABLE_THREADSUSPEND // SuspendThread @@ -2675,9 +2675,9 @@ class Thread BOOL IsRudeAbort(); BOOL IsFuncEvalAbort(); -#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK) +#if defined(TARGET_AMD64) && defined(FEATURE_HIJACK) BOOL IsSafeToInjectThreadAbort(PTR_CONTEXT pContextToCheck); -#endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK) +#endif // defined(TARGET_AMD64) && defined(FEATURE_HIJACK) inline BOOL IsAbortRequested() { @@ -2773,10 +2773,10 @@ class Thread return s_NextSelfAbortEndTime; } -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) // Tricks for resuming threads from fully interruptible code with a ThreadStop. BOOL ResumeUnderControl(T_CONTEXT *pCtx); -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX enum InducedThrowReason { InducedThreadStop = 1, @@ -2839,7 +2839,7 @@ class Thread // ARM64 unix doesn't currently support any reliable hardware mechanism for single-stepping. // For each we emulate single step in software. This support is used only by the debugger. private: -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) ArmSingleStepper m_singleStepper; #else Arm64SingleStepper m_singleStepper; @@ -2858,7 +2858,7 @@ class Thread void BypassWithSingleStep(const void* ip ARM_ARG(WORD opcode1) ARM_ARG(WORD opcode2) ARM64_ARG(uint32_t opcode)) { -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) m_singleStepper.Bypass((DWORD)ip, opcode1, opcode2); #else m_singleStepper.Bypass((uint64_t)ip, opcode); @@ -3166,7 +3166,7 @@ class Thread static void SetCulture(OBJECTREF *CultureObj, BOOL bUICulture); private: -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) // Used in suspension code to redirect a thread at a HandledJITCase BOOL RedirectThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt); BOOL RedirectCurrentThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt, T_CONTEXT *pCurrentThreadCtx); @@ -3187,7 +3187,7 @@ class Thread private: bool m_fPreemptiveGCDisabledForGCStress; #endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX public: @@ -3333,7 +3333,7 @@ class Thread // space to restore the guard page, so make sure you know what you're doing when you decide to call this. VOID RestoreGuardPage(); -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) private: // Redirecting of threads in managed code at suspension @@ -3354,7 +3354,7 @@ class Thread #endif // defined(HAVE_GCCOVER) && USE_REDIRECT_FOR_GCSTRESS friend void CPFH_AdjustContextForThreadSuspensionRace(T_CONTEXT *pContext, Thread *pThread); -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX private: //------------------------------------------------------------- @@ -3477,15 +3477,15 @@ class Thread VOID **m_ppvHJRetAddrPtr; // place we bashed a new return address MethodDesc *m_HijackedFunction; // remember what we hijacked -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX BOOL HandledJITCase(BOOL ForTaskSwitchIn = FALSE); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 PCODE m_LastRedirectIP; ULONG m_SpinCount; -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX #endif // FEATURE_HIJACK @@ -4038,11 +4038,11 @@ class Thread this fast, the table is not perfect (there can be collisions), but this should not cause false positives, but it may allow errors to go undetected */ -#ifdef BIT64 +#ifdef HOST_64BIT #define OBJREF_HASH_SHIFT_AMOUNT 3 -#else // BIT64 +#else // HOST_64BIT #define OBJREF_HASH_SHIFT_AMOUNT 2 -#endif // BIT64 +#endif // HOST_64BIT // For debugging, you may want to make this number very large, (8K) // should basically insure that no collisions happen @@ -4643,10 +4643,10 @@ class Thread PTR_GCFrame m_pGCFrame; // The topmost GC Frame -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX WORD m_wCPUGroup; DWORD_PTR m_pAffinityMask; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX public: void ChooseThreadCPUGroupAffinity(); void ClearThreadCPUGroupAffinity(); diff --git a/src/coreclr/src/vm/threadsuspend.cpp b/src/coreclr/src/vm/threadsuspend.cpp index 0e063a4abbdfa..2348a72f686b5 100644 --- a/src/coreclr/src/vm/threadsuspend.cpp +++ b/src/coreclr/src/vm/threadsuspend.cpp @@ -45,13 +45,13 @@ extern "C" void RedirectedHandledJITCaseForUserSuspend_Stub(void); #define GetRedirectHandlerForUserSuspend() \ ((PFN_REDIRECTTARGET) GetEEFuncEntryPoint(RedirectedHandledJITCaseForUserSuspend_Stub)) -#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER extern "C" void RedirectedHandledJITCaseForGCStress_Stub(void); #define GetRedirectHandlerForGCStress() \ ((PFN_REDIRECTTARGET) GetEEFuncEntryPoint(RedirectedHandledJITCaseForGCStress_Stub)) #endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS -#endif // _TARGET_AMD64_ || _TARGET_ARM_ +#endif // TARGET_AMD64 || TARGET_ARM // Every PING_JIT_TIMEOUT ms, check to see if a thread in JITted code has wandered @@ -368,9 +368,9 @@ Thread::SuspendThreadResult Thread::SuspendThread(BOOL fOneTryOnly, DWORD *pdwSu else { // Our callers generally expect that STR_Failure means that // the thread has exited. -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX _ASSERTE(NtCurrentTeb()->LastStatusValue != STATUS_SUSPEND_COUNT_EXCEEDED); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX str = STR_Failure; break; } @@ -569,7 +569,7 @@ BOOL EESetThreadContext(Thread *pThread, const CONTEXT *pContext) } CONTRACTL_END; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 _ASSERTE(CheckSuspended(pThread)); #endif @@ -787,7 +787,7 @@ StackWalkAction TAStackCrawlCallBack(CrawlFrame* pCf, void* data) } frameAction = DiscardLatchedFrame; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // On X86 the IL stub method is reported to us before the frame with the actual interop method. We need to // swap the order because if the worker saw the IL stub - which is a CER root - first, it would terminate the // stack walk and wouldn't allow the thread to be aborted, regardless of how the interop method is annotated. @@ -823,7 +823,7 @@ StackWalkAction TAStackCrawlCallBack(CrawlFrame* pCf, void* data) frameAction = LatchCurrentFrame; } } -#else // _TARGET_X86_ +#else // TARGET_X86 // On 64-bit the IL stub method is reported after the actual interop method so we don't have to swap them. // However, we still want to discard the interop method frame if the call is unbreakable by convention. if (pData->fHaveLatchedCF) @@ -853,7 +853,7 @@ StackWalkAction TAStackCrawlCallBack(CrawlFrame* pCf, void* data) frameAction = LatchCurrentFrame; } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // Execute the "frame action". StackWalkAction action; @@ -937,7 +937,7 @@ BOOL Thread::IsExecutingWithinCer() return sContext.fWithinCer; } -#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK) +#if defined(TARGET_AMD64) && defined(FEATURE_HIJACK) BOOL Thread::IsSafeToInjectThreadAbort(PTR_CONTEXT pContextToCheck) { CONTRACTL @@ -971,9 +971,9 @@ BOOL Thread::IsSafeToInjectThreadAbort(PTR_CONTEXT pContextToCheck) return TRUE; } } -#endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK) +#endif // defined(TARGET_AMD64) && defined(FEATURE_HIJACK) -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // CONTEXT_CONTROL does not include any nonvolatile registers that might be the frame pointer. #define CONTEXT_MIN_STACKWALK (CONTEXT_CONTROL | CONTEXT_INTEGER) #else @@ -1034,7 +1034,7 @@ BOOL Thread::ReadyForAsyncException() pStartFrame = pFrameAddr; } } -#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK) +#if defined(TARGET_AMD64) && defined(FEATURE_HIJACK) else if (ThrewControlForThread() == Thread::InducedThreadRedirect) { if (!IsSafeToInjectThreadAbort(m_OSContext)) @@ -1043,7 +1043,7 @@ BOOL Thread::ReadyForAsyncException() return FALSE; } } -#endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK) +#endif // defined(TARGET_AMD64) && defined(FEATURE_HIJACK) } else { @@ -1176,9 +1176,9 @@ BOOL Thread::IsContextSafeToRedirect(CONTEXT* pContext) BOOL isSafeToRedirect = TRUE; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) // In some cases (x86 WOW64, ARM32 on ARM64) Windows will not set the CONTEXT_EXCEPTION_REPORTING flag // if the thread is executing in kernel mode (i.e. in the middle of a syscall or exception handling). // Therefore, we should treat the absence of the CONTEXT_EXCEPTION_REPORTING flag as an indication that @@ -1186,7 +1186,7 @@ BOOL Thread::IsContextSafeToRedirect(CONTEXT* pContext) // Note: the x86 WOW64 case is already handled in GetSafelyRedirectableThreadContext; in addition, this // flag is never set on Windows7 x86 WOW64. So this check is valid for non-x86 architectures only. isSafeToRedirect = (pContext->ContextFlags & CONTEXT_EXCEPTION_REPORTING) != 0; -#endif // !defined(_TARGET_X86_) +#endif // !defined(TARGET_X86) if (pContext->ContextFlags & CONTEXT_EXCEPTION_REPORTING) { @@ -1198,7 +1198,7 @@ BOOL Thread::IsContextSafeToRedirect(CONTEXT* pContext) } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return isSafeToRedirect; } @@ -1707,7 +1707,7 @@ Thread::UserAbort(ThreadAbortRequester requester, | TS_Detached | TS_Unstarted))); -#if defined(_TARGET_X86_) && !defined(FEATURE_EH_FUNCLETS) +#if defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS) // TODO WIN64: consider this if there is a way to detect of managed code on stack. if ((m_pFrame == FRAME_TOP) && (GetFirstCOMPlusSEHRecord(this) == EXCEPTION_CHAIN_END) @@ -1724,15 +1724,15 @@ Thread::UserAbort(ThreadAbortRequester requester, SetAborted(); return S_OK; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 if (!m_fPreemptiveGCDisabled) { if ((m_pFrame != FRAME_TOP) && m_pFrame->IsTransitionToNativeFrame() -#if defined(_TARGET_X86_) && !defined(FEATURE_EH_FUNCLETS) +#if defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS) && ((size_t) GetFirstCOMPlusSEHRecord(this) > ((size_t) m_pFrame) - 20) -#endif // _TARGET_X86_ +#endif // TARGET_X86 ) { fOutOfRuntime = TRUE; @@ -1744,12 +1744,12 @@ Thread::UserAbort(ThreadAbortRequester requester, { fNeedStackCrawl = TRUE; } -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) else { HandleJITCaseForAbort(); } -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX #ifndef DISABLE_THREADSUSPEND // The thread is not suspended now. @@ -3002,9 +3002,9 @@ void RedirectedThreadFrame::ExceptionUnwind() m_Regs = NULL; } -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 //**************************************************************************************** // This will check who caused the exception. If it was caused by the the redirect function, // the reason is to resume the thread back at the point it was redirected in the first @@ -3114,7 +3114,7 @@ int RedirectedHandledJITCaseExceptionFilter( // Resume execution at point where thread was originally redirected return (EXCEPTION_CONTINUE_EXECUTION); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 void NotifyHostOnGCSuspension() { @@ -3167,7 +3167,7 @@ void __stdcall Thread::RedirectedHandledJITCase(RedirectReason reason) STRESS_LOG5(LF_SYNC, LL_INFO1000, "In RedirectedHandledJITcase reason 0x%x pFrame = %p pc = %p sp = %p fp = %p", reason, &frame, GetIP(pCtx), GetSP(pCtx), GetFP(pCtx)); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // This will indicate to the exception filter whether or not the exception is caused // by us or the client. BOOL fDone = FALSE; @@ -3177,7 +3177,7 @@ void __stdcall Thread::RedirectedHandledJITCase(RedirectReason reason) // some unrelated exception. __try -#endif // _TARGET_X86_ +#endif // TARGET_X86 { // Make sure this thread doesn't reuse the context memory in re-entrancy cases _ASSERTE(pThread->GetSavedRedirectContext() != NULL); @@ -3216,7 +3216,7 @@ void __stdcall Thread::RedirectedHandledJITCase(RedirectReason reason) GCX_PREEMP_NO_DTOR_END(); } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 pThread->HandleThreadAbort(); // Might throw an exception. // Indicate that the call to the service went without an exception, and that @@ -3232,7 +3232,7 @@ void __stdcall Thread::RedirectedHandledJITCase(RedirectReason reason) RaiseException(EXCEPTION_HIJACK, 0, 0, NULL); -#else // _TARGET_X86_ +#else // TARGET_X86 #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER // @@ -3258,13 +3258,13 @@ void __stdcall Thread::RedirectedHandledJITCase(RedirectReason reason) SetIP(pThread->m_OSContext, uResumePC); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // Save the original resume PC in Lr pCtx->Lr = uResumePC; // Since we have set a new IP, we have to clear conditional execution flags too. ClearITState(pThread->m_OSContext); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM SetIP(pCtx, uAbortAddr); } @@ -3302,9 +3302,9 @@ void __stdcall Thread::RedirectedHandledJITCase(RedirectReason reason) RtlRestoreContext(pCtx, NULL); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 __except (++filter_count == 1 ? RedirectedHandledJITCaseExceptionFilter(GetExceptionInformation(), &frame, fDone, pCtx) : EXCEPTION_CONTINUE_SEARCH) @@ -3312,7 +3312,7 @@ void __stdcall Thread::RedirectedHandledJITCase(RedirectReason reason) _ASSERTE(!"Reached body of __except in Thread::RedirectedHandledJITCase"); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 } //**************************************************************************************** @@ -3389,7 +3389,7 @@ void __stdcall Thread::RedirectedHandledJITCaseForGCStress() // own stack. // -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #define CONTEXT_COMPLETE (CONTEXT_FULL | CONTEXT_FLOATING_POINT | \ CONTEXT_DEBUG_REGISTERS | CONTEXT_EXTENDED_REGISTERS | CONTEXT_EXCEPTION_REQUEST) #else @@ -3470,7 +3470,7 @@ BOOL Thread::RedirectThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt) // Temporarily set the IP of the context to the target for SetThreadContext PCODE dwOrigEip = GetIP(pCtx); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Redirection can be required when in IT Block. // In that case must reset the IT state before redirection. DWORD dwOrigCpsr = pCtx->Cpsr; @@ -3488,7 +3488,7 @@ BOOL Thread::RedirectThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt) // Restore original IP SetIP(pCtx, dwOrigEip); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // restore IT State in the context pCtx->Cpsr = dwOrigCpsr; #endif @@ -3577,7 +3577,7 @@ BOOL Thread::RedirectCurrentThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt, CONT SetIP(pCurrentThreadCtx, (PCODE)pTgt); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Redirection can be required when in IT Block // Clear the IT State before redirecting ClearITState(pCurrentThreadCtx); @@ -3681,7 +3681,7 @@ BOOL Thread::CheckForAndDoRedirectForGCStress (CONTEXT *pCurrentThreadCtx) } #endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX #endif // FEATURE_HIJACK @@ -3894,18 +3894,18 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason) FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_GCSuspendPending); countThreads++; -#if defined(FEATURE_HIJACK) && defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && defined(TARGET_UNIX) bool gcSuspensionSignalSuccess = thread->InjectGcSuspension(); if (!gcSuspensionSignalSuccess) { STRESS_LOG1(LF_SYNC, LL_INFO1000, "Thread::SuspendRuntime() - Failed to raise GC suspension signal for thread %p.\n", thread); } -#endif // FEATURE_HIJACK && PLATFORM_UNIX +#endif // FEATURE_HIJACK && TARGET_UNIX } #else // DISABLE_THREADSUSPEND -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) DWORD dwSwitchCount = 0; RetrySuspension: #endif @@ -3947,7 +3947,7 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason) // is where we try to hijack/redirect the thread. If it's in VM code, we have to just let the VM // finish what it's doing. -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) // Only check for HandledJITCase if we actually suspended the thread. if (str == Thread::STR_Success) { @@ -3984,7 +3984,7 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason) STRESS_LOG1(LF_SYNC, LL_INFO1000, "Thread::SuspendRuntime() - Thread %p redirected().\n", thread); } } -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_GCSuspendPending); @@ -4192,7 +4192,7 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason) } #endif -#if defined(FEATURE_HIJACK) && defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && defined(TARGET_UNIX) _ASSERTE (thread == NULL); while ((thread = ThreadStore::GetThreadList(thread)) != NULL) { @@ -4233,7 +4233,7 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason) if (!thread->m_fPreemptiveGCDisabled) continue; -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) RetrySuspension2: #endif // We can not allocate memory after we suspend a thread. @@ -4257,7 +4257,7 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason) g_SuspendStatistics.cntFailedSuspends++; #endif -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) // Only check HandledJITCase if we actually suspended the thread, and // the thread is in cooperative mode. // See comment at the previous invocation of HandledJITCase - it does @@ -4284,7 +4284,7 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason) #endif } } -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX if (str == Thread::STR_Success) thread->ResumeThread(); @@ -4364,18 +4364,18 @@ void Thread::CommitGCStressInstructionUpdate() assert(pbDestCode != NULL); assert(pbSrcCode != NULL); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) *pbDestCode = *pbSrcCode; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if (GetARMInstructionLength(pbDestCode) == 2) *(WORD*)pbDestCode = *(WORD*)pbSrcCode; else *(DWORD*)pbDestCode = *(DWORD*)pbSrcCode; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) *(DWORD*)pbDestCode = *(DWORD*)pbSrcCode; @@ -4478,8 +4478,8 @@ void ThreadSuspend::ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded) STRESS_LOG0(LF_SYNC, LL_INFO1000, "Thread::ResumeRuntime() - End\n"); } -#ifndef FEATURE_PAL -#ifdef _TARGET_X86_ +#ifndef TARGET_UNIX +#ifdef TARGET_X86 //**************************************************************************************** // This will resume the thread at the location of redirection. // @@ -4544,7 +4544,7 @@ int RedirectedThrowControlExceptionFilter( return (EXCEPTION_CONTINUE_EXECUTION); } #endif -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Resume a thread at this location, to persuade it to throw a ThreadStop. The // exception handler needs a reasonable idea of how large this method is, so don't @@ -4613,7 +4613,7 @@ ThrowControlForThread( RaiseComPlusException(); } -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) // This function is called by UserAbort. // It forces a thread to abort if allowed and the thread is running managed code. BOOL Thread::HandleJITCaseForAbort() @@ -4703,7 +4703,7 @@ BOOL Thread::ResumeUnderControl(CONTEXT *pCtx) SetThrowControlForThread(InducedThreadRedirect); STRESS_LOG1(LF_SYNC, LL_INFO100, "ResumeUnderControl for Thread %p\n", this); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // We need to establish the return value on the stack in the redirection stub, to // achieve crawlability. We use 'rcx' as the way to communicate the return value. // However, we are going to crawl in ReadyForAbort and we are going to resume in @@ -4713,9 +4713,9 @@ BOOL Thread::ResumeUnderControl(CONTEXT *pCtx) UINT_PTR keepRcx = m_OSContext->Rcx; m_OSContext->Rcx = (UINT_PTR)resumePC; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // We save the original ControlPC in LR on ARM. UINT_PTR originalLR = m_OSContext->Lr; m_OSContext->Lr = (UINT_PTR)resumePC; @@ -4723,20 +4723,20 @@ BOOL Thread::ResumeUnderControl(CONTEXT *pCtx) // Since we have set a new IP, we have to clear conditional execution flags too. UINT_PTR originalCpsr = m_OSContext->Cpsr; ClearITState(m_OSContext); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM EESetThreadContext(this, m_OSContext); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Restore the original LR now that the OS context has been updated to resume @ redirection function. m_OSContext->Lr = originalLR; m_OSContext->Cpsr = originalCpsr; -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // and restore. m_OSContext->Rcx = keepRcx; -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 SetIP(m_OSContext, resumePC); @@ -4749,7 +4749,7 @@ BOOL Thread::ResumeUnderControl(CONTEXT *pCtx) return fSuccess; } -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX PCONTEXT Thread::GetAbortContext () @@ -4842,10 +4842,10 @@ bool Thread::SysStartSuspendForDebug(AppDomain *pAppDomain) // switch back and forth during a debug suspension -- until we // can get their Pending bit set. -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) DWORD dwSwitchCount = 0; RetrySuspension: -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX // We can not allocate memory after we suspend a thread. // Otherwise, we may deadlock the process when CLR is hosted. @@ -4870,7 +4870,7 @@ bool Thread::SysStartSuspendForDebug(AppDomain *pAppDomain) if (thread->m_fPreemptiveGCDisabled && str == STR_Success) { -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) WorkingOnThreadContextHolder workingOnThreadContext(thread); if (workingOnThreadContext.Acquired() && thread->HandledJITCase()) { @@ -4886,7 +4886,7 @@ bool Thread::SysStartSuspendForDebug(AppDomain *pAppDomain) goto RetrySuspension; } } -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX // Remember that this thread will be running to a safe point FastInterlockIncrement(&m_DebugWillSyncCount); @@ -5027,7 +5027,7 @@ bool Thread::SysSweepThreadsForDebug(bool forceSync) #else // DISABLE_THREADSUSPEND // Suspend the thread -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) DWORD dwSwitchCount = 0; #endif @@ -5075,7 +5075,7 @@ bool Thread::SysSweepThreadsForDebug(bool forceSync) goto Label_MarkThreadAsSynced; } -#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX) // If the thread is in jitted code, HandledJitCase will try to hijack it; and the hijack // will toggle the GC. else @@ -5103,7 +5103,7 @@ bool Thread::SysSweepThreadsForDebug(bool forceSync) goto Label_MarkThreadAsSynced; } } -#endif // FEATURE_HIJACK && !PLATFORM_UNIX +#endif // FEATURE_HIJACK && !TARGET_UNIX // If we didn't take the thread out of the set, then resume it and give it another chance to reach a safe // point. @@ -5491,7 +5491,7 @@ StackWalkAction SWCB_GetExecutionState(CrawlFrame *pCF, VOID *pData) { // We already have the caller context available at this point _ASSERTE(pRDT->IsCallerContextValid); -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) // Why do we use CallerContextPointers below? // @@ -5547,9 +5547,9 @@ StackWalkAction SWCB_GetExecutionState(CrawlFrame *pCF, VOID *pData) // in the caller of the current non-interruptible frame. pES->m_ppvRetAddrPtr = (void **) pRDT->pCallerContextPointers->Lr; } -#elif defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#elif defined(TARGET_X86) || defined(TARGET_AMD64) pES->m_ppvRetAddrPtr = (void **) (EECodeManager::GetCallerSp(pRDT) - sizeof(void*)); -#else // _TARGET_X86_ || _TARGET_AMD64_ +#else // TARGET_X86 || TARGET_AMD64 PORTABILITY_ASSERT("Platform NYI"); #endif // _TARGET_???_ } @@ -5565,7 +5565,7 @@ StackWalkAction SWCB_GetExecutionState(CrawlFrame *pCF, VOID *pData) } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 STRESS_LOG2(LF_SYNC, LL_INFO1000, "Not in Jitted code at EIP = %p, &EIP = %p\n", GetControlPC(pCF->GetRegisterSet()), pCF->GetRegisterSet()->PCTAddr); #else STRESS_LOG1(LF_SYNC, LL_INFO1000, "Not in Jitted code at pc = %p\n", GetControlPC(pCF->GetRegisterSet())); @@ -5586,7 +5586,7 @@ StackWalkAction SWCB_GetExecutionState(CrawlFrame *pCF, VOID *pData) } else { -#if defined(_TARGET_X86_) && !defined(FEATURE_EH_FUNCLETS) +#if defined(TARGET_X86) && !defined(FEATURE_EH_FUNCLETS) // Second pass, looking for the address of the return address so we can // hijack: @@ -5687,17 +5687,17 @@ VOID * GetHijackAddr(Thread *pThread, EECodeInfo *codeInfo) ReturnKind returnKind = GetReturnKind(pThread, codeInfo); pThread->SetHijackReturnKind(returnKind); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (returnKind == RT_Float) { return reinterpret_cast(OnHijackFPTripThread); } -#endif // _TARGET_X86_ +#endif // TARGET_X86 return reinterpret_cast(OnHijackTripThread); } -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX // Get the ExecutionState for the specified SwitchIn thread. Note that this is // a 'StackWalk' call back (PSTACKWALKFRAMESCALLBACK). @@ -5714,7 +5714,7 @@ StackWalkAction SWCB_GetExecutionStateForSwitchIn(CrawlFrame *pCF, VOID *pData) if (pES->m_FirstPass) { if (pCF->IsFrameless()) { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 pES->m_FirstPass = FALSE; #else _ASSERTE(!"Platform NYI"); @@ -5730,7 +5730,7 @@ StackWalkAction SWCB_GetExecutionStateForSwitchIn(CrawlFrame *pCF, VOID *pData) } } else { -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (pCF->IsFrameless()) { PREGDISPLAY pRDT = pCF->GetRegisterSet(); if (pRDT) { @@ -5839,11 +5839,11 @@ StackWalkAction SWCB_GetExecutionStateForSwitchIn(CrawlFrame *pCF, VOID *pData) // the IP is an "int 3". // -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #define WORKAROUND_RACES_WITH_KERNEL_MODE_EXCEPTION_HANDLING -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef WORKAROUND_RACES_WITH_KERNEL_MODE_EXCEPTION_HANDLING BOOL ThreadCaughtInKernelModeExceptionHandling(Thread *pThread, CONTEXT *ctx) @@ -5947,7 +5947,7 @@ BOOL ThreadCaughtInKernelModeExceptionHandling(Thread *pThread, CONTEXT *ctx) return TRUE; } #endif //WORKAROUND_RACES_WITH_KERNEL_MODE_EXCEPTION_HANDLING -#endif //_TARGET_X86_ +#endif //TARGET_X86 //--------------------------------------------------------------------------------------- // @@ -6007,7 +6007,7 @@ BOOL Thread::GetSafelyRedirectableThreadContext(DWORD dwOptions, CONTEXT * pCtx, // If the OS *does* support trap frame reporting, then the call to IsContextSafeToRedirect below will return FALSE if we run // into this race. // -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (!(pCtx->ContextFlags & CONTEXT_EXCEPTION_REPORTING) && ((dwOptions & kPerfomLastRedirectIPCheck) != 0)) { @@ -6178,7 +6178,7 @@ BOOL Thread::HandledJITCase(BOOL ForTaskSwitchIn) return ret; } -#endif // !PLATFORM_UNIX +#endif // !TARGET_UNIX #endif // FEATURE_HIJACK @@ -6591,7 +6591,7 @@ void ThreadSuspend::SuspendEE(SUSPEND_REASON reason) #endif //TIME_SUSPEND } -#if defined(FEATURE_HIJACK) && defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && defined(TARGET_UNIX) // This function is called by PAL to check if the specified instruction pointer // is in a function where we can safely inject activation. @@ -6728,12 +6728,12 @@ bool Thread::InjectGcSuspension() return false; } -#endif // FEATURE_HIJACK && PLATFORM_UNIX +#endif // FEATURE_HIJACK && TARGET_UNIX // Initialize thread suspension support void ThreadSuspend::Initialize() { -#if defined(FEATURE_HIJACK) && defined(PLATFORM_UNIX) +#if defined(FEATURE_HIJACK) && defined(TARGET_UNIX) ::PAL_SetActivationFunction(HandleGCSuspensionForInterruptedThread, CheckActivationSafePoint); #endif } diff --git a/src/coreclr/src/vm/util.cpp b/src/coreclr/src/vm/util.cpp index 1309614331d99..dd3857827d169 100644 --- a/src/coreclr/src/vm/util.cpp +++ b/src/coreclr/src/vm/util.cpp @@ -547,7 +547,7 @@ SIZE_T GetRegOffsInCONTEXT(ICorDebugInfo::RegNum regNum) STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 switch(regNum) { case ICorDebugInfo::REGNUM_EAX: return offsetof(T_CONTEXT,Eax); @@ -567,7 +567,7 @@ SIZE_T GetRegOffsInCONTEXT(ICorDebugInfo::RegNum regNum) case ICorDebugInfo::REGNUM_EDI: return offsetof(T_CONTEXT,Edi); default: _ASSERTE(!"Bad regNum"); return (SIZE_T) -1; } -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) switch(regNum) { case ICorDebugInfo::REGNUM_RAX: return offsetof(CONTEXT, Rax); @@ -588,7 +588,7 @@ SIZE_T GetRegOffsInCONTEXT(ICorDebugInfo::RegNum regNum) case ICorDebugInfo::REGNUM_R15: return offsetof(CONTEXT, R15); default: _ASSERTE(!"Bad regNum"); return (SIZE_T)(-1); } -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) switch(regNum) { @@ -611,7 +611,7 @@ SIZE_T GetRegOffsInCONTEXT(ICorDebugInfo::RegNum regNum) case ICorDebugInfo::REGNUM_AMBIENT_SP: return offsetof(T_CONTEXT, Sp); default: _ASSERTE(!"Bad regNum"); return (SIZE_T)(-1); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) switch(regNum) { @@ -654,7 +654,7 @@ SIZE_T GetRegOffsInCONTEXT(ICorDebugInfo::RegNum regNum) #else PORTABILITY_ASSERT("GetRegOffsInCONTEXT is not implemented on this platform."); return (SIZE_T) -1; -#endif // _TARGET_X86_ +#endif // TARGET_X86 } SIZE_T DereferenceByRefVar(SIZE_T addr) @@ -877,7 +877,7 @@ SIZE_T *NativeVarStackAddr(const ICorDebugInfo::VarLoc & varLoc, } -#if defined(BIT64) +#if defined(HOST_64BIT) void GetNativeVarValHelper(SIZE_T* dstAddrLow, SIZE_T* dstAddrHigh, SIZE_T* srcAddr, SIZE_T size) { if (size == 1) @@ -899,7 +899,7 @@ void GetNativeVarValHelper(SIZE_T* dstAddrLow, SIZE_T* dstAddrHigh, SIZE_T* srcA UNREACHABLE(); } } -#endif // BIT64 +#endif // HOST_64BIT bool GetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, @@ -915,7 +915,7 @@ bool GetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, switch(varLoc.vlType) { -#if !defined(BIT64) +#if !defined(HOST_64BIT) SIZE_T regOffs; case ICorDebugInfo::VLT_REG: @@ -962,7 +962,7 @@ bool GetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, case ICorDebugInfo::VLT_FPSTK: _ASSERTE(!"NYI"); break; -#else // BIT64 +#else // HOST_64BIT case ICorDebugInfo::VLT_REG: case ICorDebugInfo::VLT_REG_FP: case ICorDebugInfo::VLT_STK: @@ -974,7 +974,7 @@ bool GetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, _ASSERTE(!"GNVV: This function should not be called for value types"); break; -#endif // BIT64 +#endif // HOST_64BIT default: _ASSERTE(!"Bad locType"); break; @@ -984,7 +984,7 @@ bool GetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, } -#if defined(BIT64) +#if defined(HOST_64BIT) void SetNativeVarValHelper(SIZE_T* dstAddr, SIZE_T valueLow, SIZE_T valueHigh, SIZE_T size) { if (size == 1) @@ -1006,7 +1006,7 @@ void SetNativeVarValHelper(SIZE_T* dstAddr, SIZE_T valueLow, SIZE_T valueHigh, S UNREACHABLE(); } } -#endif // BIT64 +#endif // HOST_64BIT bool SetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, @@ -1021,7 +1021,7 @@ bool SetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, switch(varLoc.vlType) { -#if !defined(BIT64) +#if !defined(HOST_64BIT) SIZE_T regOffs; case ICorDebugInfo::VLT_REG: @@ -1068,7 +1068,7 @@ bool SetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, case ICorDebugInfo::VLT_FPSTK: _ASSERTE(!"NYI"); break; -#else // BIT64 +#else // HOST_64BIT case ICorDebugInfo::VLT_REG: case ICorDebugInfo::VLT_REG_FP: case ICorDebugInfo::VLT_STK: @@ -1080,7 +1080,7 @@ bool SetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, _ASSERTE(!"GNVV: This function should not be called for value types"); break; -#endif // BIT64 +#endif // HOST_64BIT default: _ASSERTE(!"Bad locType"); break; @@ -1092,7 +1092,7 @@ bool SetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc, #ifndef CROSSGEN_COMPILE //----------------------------------------------------------------------------- -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // This function checks to see if GetLogicalProcessorInformation API is supported. // On success, this function allocates a SLPI array, sets nEntries to number @@ -1191,7 +1191,7 @@ size_t GetLogicalProcessorCacheSizeFromOS() return cache_size; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // This function returns the number of logical processors on a given physical chip. If it cannot // determine the number of logical cpus, or the machine is not populated uniformly with the same @@ -1207,9 +1207,9 @@ DWORD GetLogicalCpuCountFromOS() static DWORD val = 0; DWORD retVal = 0; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX retVal = PAL_GetLogicalCpuCountFromOS(); -#else // FEATURE_PAL +#else // TARGET_UNIX DWORD nEntries = 0; @@ -1241,7 +1241,7 @@ DWORD GetLogicalCpuCountFromOS() // (which would be best), but there are variants faster than these: // See http://en.wikipedia.org/wiki/Hamming_weight. // This is the naive implementation. -#if !BIT64 +#if !HOST_64BIT count = (pmask & 0x55555555) + ((pmask >> 1) & 0x55555555); count = (count & 0x33333333) + ((count >> 2) & 0x33333333); count = (count & 0x0F0F0F0F) + ((count >> 4) & 0x0F0F0F0F); @@ -1255,7 +1255,7 @@ DWORD GetLogicalCpuCountFromOS() pmask = (pmask & 0x0000ffff0000ffffull) + ((pmask >> 16) & 0x0000ffff0000ffffull); pmask = (pmask & 0x00000000ffffffffull) + ((pmask >> 32) & 0x00000000ffffffffull); count = static_cast(pmask); -#endif // !BIT64 else +#endif // !HOST_64BIT else assert (count > 0); if (prevcount) @@ -1280,12 +1280,12 @@ DWORD GetLogicalCpuCountFromOS() { delete[] pslpi; // release the memory allocated for the SLPI array } -#endif // FEATURE_PAL +#endif // TARGET_UNIX return retVal; } -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) #define CACHE_WAY_BITS 0xFFC00000 // number of cache WAYS-Associativity is returned in EBX[31:22] (10 bits) using cpuid function 4 #define CACHE_PARTITION_BITS 0x003FF000 // number of cache Physical Partitions is returned in EBX[21:12] (10 bits) using cpuid function 4 @@ -1543,9 +1543,9 @@ DWORD GetLogicalCpuCountFallback() return retVal; } -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 -#if defined (_TARGET_X86_) || defined (_TARGET_AMD64_) +#if defined (TARGET_X86) || defined (TARGET_AMD64) static size_t GetCacheSizeFromCpuId() { STATIC_CONTRACT_NOTHROW; @@ -1675,7 +1675,7 @@ static size_t GetCacheSizeFromCpuId() return param.maxSize; } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // fix this if/when AMD does multicore or SMT size_t GetCacheSizePerLogicalCpu(BOOL bTrueSize) @@ -1696,16 +1696,16 @@ size_t GetCacheSizePerLogicalCpu(BOOL bTrueSize) size_t maxTrueSize = 0; // For x86, always get from cpuid. -#if !defined (_TARGET_X86_) +#if !defined (TARGET_X86) maxSize = maxTrueSize = GetLogicalProcessorCacheSizeFromOS() ; // Returns the size of the highest level processor cache #endif -#if defined (_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined (TARGET_X86) || defined(TARGET_AMD64) if (maxSize == 0) { maxSize = maxTrueSize = GetCacheSizeFromCpuId(); } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // Bigger gen0 size helps arm64 targets maxSize = maxTrueSize * 3; #endif @@ -1729,7 +1729,7 @@ CLRMapViewOfFile( ) { #ifdef _DEBUG -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 char *tmp = new (nothrow) char; if (!tmp) @@ -1739,7 +1739,7 @@ CLRMapViewOfFile( } delete tmp; -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // _DEBUG LPVOID pv = MapViewOfFileEx(hFileMappingObject,dwDesiredAccess,dwFileOffsetHigh,dwFileOffsetLow,dwNumberOfBytesToMap,lpBaseAddress); @@ -1753,7 +1753,7 @@ CLRMapViewOfFile( } #ifdef _DEBUG -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (pv && g_pConfig && g_pConfig->ShouldInjectFault(INJECTFAULT_MAPVIEWOFFILE)) { MEMORY_BASIC_INFORMATION mbi; @@ -1768,7 +1768,7 @@ CLRMapViewOfFile( pv = ClrVirtualAlloc(lpBaseAddress, mbi.RegionSize, MEM_RESERVE, PAGE_NOACCESS); } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // _DEBUG { } @@ -1787,13 +1787,13 @@ CLRUnmapViewOfFile( STATIC_CONTRACT_ENTRY_POINT; #ifdef _DEBUG -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (g_pConfig && g_pConfig->ShouldInjectFault(INJECTFAULT_MAPVIEWOFFILE)) { return ClrVirtualFree((LPVOID)lpBaseAddress, 0, MEM_RELEASE); } else -#endif // _TARGET_X86_ +#endif // TARGET_X86 #endif // _DEBUG { BOOL result = UnmapViewOfFile(lpBaseAddress); @@ -1841,7 +1841,7 @@ HMODULE CLRLoadLibrary(LPCWSTR lpLibFileName) return hmod; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static HMODULE CLRLoadLibraryExWorker(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags, DWORD *pLastError) @@ -1880,7 +1880,7 @@ HMODULE CLRLoadLibraryEx(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags) return hmod; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX BOOL CLRFreeLibrary(HMODULE hModule) { @@ -2457,7 +2457,7 @@ void DACNotify::DoJITPitchingNotification(MethodDesc *MethodDescPtr) } CONTRACTL_END; -#if defined(FEATURE_GDBJIT) && defined(FEATURE_PAL) && !defined(CROSSGEN_COMPILE) +#if defined(FEATURE_GDBJIT) && defined(TARGET_UNIX) && !defined(CROSSGEN_COMPILE) NotifyGdb::MethodPitched(MethodDescPtr); #endif TADDR Args[2] = { JIT_PITCHING_NOTIFICATION, (TADDR) MethodDescPtr }; @@ -2732,7 +2732,7 @@ int __cdecl stricmpUTF8(const char* szStr1, const char* szStr2) // // -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX /*============================GetCharacterInfoHelper============================ **Determines character type info (digit, whitespace, etc) for the given char. **Args: c is the character on which to operate. @@ -2752,7 +2752,7 @@ INT32 GetCharacterInfoHelper(WCHAR c, INT32 CharInfoType) } return(INT32)result; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX /*==============================nativeIsWhiteSpace============================== **The locally available version of IsWhiteSpace. Designed to be called by other @@ -2765,7 +2765,7 @@ BOOL COMCharacter::nativeIsWhiteSpace(WCHAR c) { WRAPPER_NO_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (c <= (WCHAR) 0x7F) // common case { BOOL result = (c == ' ') || (c == '\r') || (c == '\n') || (c == '\t') || (c == '\f') || (c == (WCHAR) 0x0B); @@ -2777,9 +2777,9 @@ BOOL COMCharacter::nativeIsWhiteSpace(WCHAR c) // GetCharacterInfoHelper costs around 160 instructions return((GetCharacterInfoHelper(c, CT_CTYPE1) & C1_SPACE)!=0); -#else // !FEATURE_PAL +#else // !TARGET_UNIX return iswspace(c); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } /*================================nativeIsDigit================================= @@ -2792,11 +2792,11 @@ BOOL COMCharacter::nativeIsWhiteSpace(WCHAR c) BOOL COMCharacter::nativeIsDigit(WCHAR c) { WRAPPER_NO_CONTRACT; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX return((GetCharacterInfoHelper(c, CT_CTYPE1) & C1_DIGIT)!=0); -#else // !FEATURE_PAL +#else // !TARGET_UNIX return iswdigit(c); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } BOOL RuntimeFileNotFound(HRESULT hr) @@ -2805,7 +2805,7 @@ BOOL RuntimeFileNotFound(HRESULT hr) return Assembly::FileNotFound(hr); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HRESULT GetFileVersion( // S_OK or error LPCWSTR wszFilePath, // Path to the executable. ULARGE_INTEGER* pFileVersion) // Put file version here. @@ -2859,6 +2859,6 @@ HRESULT GetFileVersion( // S_OK or error return S_OK; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif // !DACCESS_COMPILE diff --git a/src/coreclr/src/vm/util.hpp b/src/coreclr/src/vm/util.hpp index 05e9b45960672..d2b5639ab286c 100644 --- a/src/coreclr/src/vm/util.hpp +++ b/src/coreclr/src/vm/util.hpp @@ -42,7 +42,7 @@ #define WszMessageBox __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE") // Hot cache lines need to be aligned to cache line size to improve performance -#if defined(_ARM64_) +#if defined(HOST_ARM64) #define MAX_CACHE_LINE_SIZE 128 #else #define MAX_CACHE_LINE_SIZE 64 @@ -99,10 +99,10 @@ FORCEINLINE void FastInterlockAnd(DWORD RAW_KEYWORD(volatile) *p, const int msk) InterlockedAnd((LONG *)p, msk); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Copied from malloc.h: don't want to bring in the whole header file. void * __cdecl _alloca(size_t); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifdef _PREFAST_ // Suppress prefast warning #6255: alloca indicates failure by raising a stack overflow exception @@ -597,9 +597,9 @@ inline BOOL CLRHosted() return g_fHostConfig; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HMODULE CLRLoadLibraryEx(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX HMODULE CLRLoadLibrary(LPCWSTR lpLibFileName); @@ -626,14 +626,14 @@ typedef Wrapper CLRMapViewHolder; typedef Wrapper CLRMapViewHolder; #endif -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX #ifndef DACCESS_COMPILE FORCEINLINE void VoidPALUnloadPEFile(void *ptr) { PAL_LOADUnloadPEFile(ptr); } typedef Wrapper PALPEFileHolder; #else typedef Wrapper PALPEFileHolder; #endif -#endif // FEATURE_PAL +#endif // TARGET_UNIX #define SetupThreadForComCall(OOMRetVal) \ MAKE_CURRENT_THREAD_AVAILABLE_EX(GetThreadNULLOk()); \ @@ -655,7 +655,7 @@ FORCEINLINE void VoidFreeNativeLibrary(NATIVE_LIBRARY_HANDLE h) if (h == NULL) return; -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX PAL_FreeLibraryDirect(h); #else FreeLibrary(h); @@ -664,7 +664,7 @@ FORCEINLINE void VoidFreeNativeLibrary(NATIVE_LIBRARY_HANDLE h) typedef Wrapper, VoidFreeNativeLibrary, NULL> NativeLibraryHandleHolder; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // A holder for memory blocks allocated by Windows. This holder (and any OS APIs you call // that allocate objects on your behalf) should not be used when the CLR is memory-hosted. @@ -685,7 +685,7 @@ FORCEINLINE void VoidFreeWinAllocatedBlock(LPVOID pv) typedef Wrapper, VoidFreeWinAllocatedBlock, NULL> WinAllocatedBlockHolder; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // For debugging, we can track arbitrary Can't-Stop regions. // In V1.0, this was on the Thread object, but we need to track this for threads w/o a Thread object. @@ -746,7 +746,7 @@ extern void InitializeClrNotifications(); GPTR_DECL(JITNotification, g_pNotificationTable); GVAL_DECL(ULONG32, g_dacNotificationFlags); -#if defined(FEATURE_PAL) && !defined(DACCESS_COMPILE) +#if defined(TARGET_UNIX) && !defined(DACCESS_COMPILE) inline void InitializeJITNotificationTable() @@ -754,7 +754,7 @@ InitializeJITNotificationTable() g_pNotificationTable = new (nothrow) JITNotification[1001]; } -#endif // FEATURE_PAL && !DACCESS_COMPILE +#endif // TARGET_UNIX && !DACCESS_COMPILE class JITNotifications { @@ -958,10 +958,10 @@ class COMCharacter { #define FORCEINLINE_NONDEBUG FORCEINLINE #endif -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // Extract the file version from an executable. HRESULT GetFileVersion(LPCWSTR wszFilePath, ULARGE_INTEGER* pFileVersion); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #endif /* _H_UTIL */ diff --git a/src/coreclr/src/vm/vars.cpp b/src/coreclr/src/vm/vars.cpp index d4d8af3453fee..2e3e8e9c89b64 100644 --- a/src/coreclr/src/vm/vars.cpp +++ b/src/coreclr/src/vm/vars.cpp @@ -197,10 +197,10 @@ GVAL_IMPL(bool, g_fProcessDetach); GVAL_IMPL_INIT(DWORD, g_fEEShutDown, 0); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX GVAL_IMPL(SIZE_T, g_runtimeLoadedBaseAddress); GVAL_IMPL(SIZE_T, g_runtimeVirtualSize); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifndef DACCESS_COMPILE diff --git a/src/coreclr/src/vm/vars.hpp b/src/coreclr/src/vm/vars.hpp index 72cd3add7b4b7..7ea536b52c972 100644 --- a/src/coreclr/src/vm/vars.hpp +++ b/src/coreclr/src/vm/vars.hpp @@ -20,7 +20,7 @@ typedef LPVOID DictionaryEntry; /* Define the implementation dependent size types */ #ifndef _INTPTR_T_DEFINED -#ifdef BIT64 +#ifdef HOST_64BIT typedef __int64 intptr_t; #else typedef int intptr_t; @@ -29,7 +29,7 @@ typedef int intptr_t; #endif #ifndef _UINTPTR_T_DEFINED -#ifdef BIT64 +#ifdef HOST_64BIT typedef unsigned __int64 uintptr_t; #else typedef unsigned int uintptr_t; @@ -38,7 +38,7 @@ typedef unsigned int uintptr_t; #endif #ifndef _PTRDIFF_T_DEFINED -#ifdef BIT64 +#ifdef HOST_64BIT typedef __int64 ptrdiff_t; #else typedef int ptrdiff_t; @@ -48,7 +48,7 @@ typedef int ptrdiff_t; #ifndef _SIZE_T_DEFINED -#ifdef BIT64 +#ifdef HOST_64BIT typedef unsigned __int64 size_t; #else typedef unsigned int size_t; @@ -523,9 +523,9 @@ enum FWStatus EXTERN DWORD g_FinalizerWaiterStatus; -#if defined(FEATURE_PAL) && defined(FEATURE_EVENT_TRACE) +#if defined(TARGET_UNIX) && defined(FEATURE_EVENT_TRACE) extern Volatile g_TriggerHeapDump; -#endif // FEATURE_PAL +#endif // TARGET_UNIX #ifndef DACCESS_COMPILE // @@ -638,10 +638,10 @@ inline bool CORDebuggerAttached() // EXTERN HINSTANCE g_hInstShim; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX GVAL_DECL(SIZE_T, g_runtimeLoadedBaseAddress); GVAL_DECL(SIZE_T, g_runtimeVirtualSize); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #ifndef MAXULONG diff --git a/src/coreclr/src/vm/virtualcallstub.cpp b/src/coreclr/src/vm/virtualcallstub.cpp index c8114e9a8d4bf..8b32443f4b777 100644 --- a/src/coreclr/src/vm/virtualcallstub.cpp +++ b/src/coreclr/src/vm/virtualcallstub.cpp @@ -84,7 +84,7 @@ UINT32 g_bucket_space_dead = 0; //# of bytes of abandoned buckets not ye // This is the number of times a successful chain lookup will occur before the // entry is promoted to the front of the chain. This is declared as extern because // the default value (CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT) is defined in the header. -#ifdef _TARGET_ARM64_ +#ifdef TARGET_ARM64 extern "C" size_t g_dispatch_cache_chain_success_counter; #else extern size_t g_dispatch_cache_chain_success_counter; @@ -557,7 +557,7 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA vtable_heap_commit_size = 8; vtable_heap_reserve_size = 8; } -#ifdef BIT64 +#ifdef HOST_64BIT // If we're on 64-bit, there's a ton of address space, so reserve more space to // try to avoid getting into the situation where the resolve heap is more than // a rel32 jump away from the dispatch heap, since this will cause us to produce @@ -579,7 +579,7 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA lookup_heap_commit_size *= sizeof(LookupHolder); DWORD dispatchHolderSize = sizeof(DispatchHolder); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 dispatchHolderSize = static_cast(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_SHORT)); #endif @@ -1041,12 +1041,12 @@ BOOL VirtualCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress) BOOL fIsOwner = isStub(stubStartAddress); -#if defined(_TARGET_X86_) && defined(FEATURE_PREJIT) +#if defined(TARGET_X86) && defined(FEATURE_PREJIT) if (!fIsOwner && parentDomain->IsDefaultDomain()) { fIsOwner = (stubStartAddress == GetEEFuncEntryPoint(StubDispatchFixupStub)); } -#endif // defined(_TARGET_X86_) && defined(FEATURE_PREJIT) +#endif // defined(TARGET_X86) && defined(FEATURE_PREJIT) return fIsOwner; } @@ -1563,7 +1563,7 @@ ResolveCacheElem* __fastcall VirtualCallStubManager::PromoteChainEntry(ResolveCa PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 , UINT_PTR flags #endif ) @@ -1606,7 +1606,7 @@ PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock, _ASSERTE(!"Throw returned"); } -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 if (flags & SDF_ResolvePromoteChain) { ResolveCacheElem * pElem = (ResolveCacheElem *)token; @@ -1654,7 +1654,7 @@ PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock, VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget, &stubKind); PREFIX_ASSUME(pMgr != NULL); -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 // Have we failed the dispatch stub too many times? if (flags & SDF_ResolveBackPatch) { @@ -1698,7 +1698,7 @@ void VirtualCallStubManager::BackPatchWorkerStatic(PCODE returnAddress, TADDR si END_ENTRYPOINT_VOIDRET; } -#if defined(_TARGET_X86_) && defined(FEATURE_PAL) +#if defined(TARGET_X86) && defined(TARGET_UNIX) void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect) { VirtualCallStubManager::BackPatchWorkerStatic(returnAddr, siteAddrForRegisterIndirect); @@ -1903,12 +1903,12 @@ PCODE VirtualCallStubManager::ResolveWorker(StubCallSite* pCallSite, PCODE pBackPatchFcn; PCODE pResolverFcn; -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // Only X86 implementation needs a BackPatch function pBackPatchFcn = (PCODE) GetEEFuncEntryPoint(BackPatchWorkerAsmStub); -#else // !_TARGET_X86_ +#else // !TARGET_X86 pBackPatchFcn = NULL; -#endif // !_TARGET_X86_ +#endif // !TARGET_X86 #ifdef CHAIN_LOOKUP pResolverFcn = (PCODE) GetEEFuncEntryPoint(ResolveWorkerChainLookupAsmStub); @@ -2725,7 +2725,7 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE ad size_t dispatchHolderSize = sizeof(DispatchHolder); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // See comment around m_fShouldAllocateLongJumpDispatchStubs for explanation. if (m_fShouldAllocateLongJumpDispatchStubs INDEBUG(|| g_pConfig->ShouldGenerateLongJumpDispatchStub())) @@ -2744,7 +2744,7 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE ad DispatchHolder * holder = (DispatchHolder*) (void*) dispatch_heap->AllocAlignedMem(dispatchHolderSize, CODE_SIZE_ALIGN); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (!DispatchHolder::CanShortJumpDispatchStubReachFailTarget(addrOfFail, (LPCBYTE)holder)) { m_fShouldAllocateLongJumpDispatchStubs = TRUE; @@ -2755,7 +2755,7 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE ad holder->Initialize(addrOfCode, addrOfFail, (size_t)pMTExpected -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 , DispatchStub::e_TYPE_SHORT #endif ); @@ -2790,7 +2790,7 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE ad RETURN (holder); } -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 //---------------------------------------------------------------------------- /* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's are the addresses the stub is to transfer to depending on the test with pMTExpected @@ -2867,7 +2867,7 @@ ResolveHolder *VirtualCallStubManager::GenerateResolveStub(PCODE addr GC_TRIGGERS; INJECT_FAULT(COMPlusThrowOM();); PRECONDITION(addrOfResolver != NULL); -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) PRECONDITION(addrOfPatcher != NULL); #endif POSTCONDITION(CheckPointer(RETVAL)); @@ -3843,11 +3843,11 @@ void DispatchCache::LogStats() 2. For every bit we try to have half one bits and half zero bits 3. Adjacent entries when xor-ed should have 5,6 or 7 bits that are different */ -#ifdef BIT64 +#ifdef HOST_64BIT static const UINT16 tokenHashBits[64] = -#else // !BIT64 +#else // !HOST_64BIT static const UINT16 tokenHashBits[32] = -#endif // !BIT64 +#endif // !HOST_64BIT { 0xcd5, 0x8b9, 0x875, 0x439, 0xbf0, 0x38d, 0xa5b, 0x6a7, @@ -3858,7 +3858,7 @@ static const UINT16 tokenHashBits[32] = 0xf05, 0x994, 0x472, 0x626, 0x15c, 0x3a8, 0x56e, 0xe2d, -#ifdef BIT64 +#ifdef HOST_64BIT 0xe3c, 0xbe2, 0x58e, 0x0f3, 0x54d, 0x70f, 0xf88, 0xe2b, 0x353, 0x153, 0x4a5, 0x943, @@ -3867,7 +3867,7 @@ static const UINT16 tokenHashBits[32] = 0x0f7, 0x49a, 0xdd0, 0x366, 0xd84, 0xba5, 0x4c5, 0x6bc, 0x8ec, 0x0b9, 0x617, 0x85c, -#endif // BIT64 +#endif // HOST_64BIT }; /*static*/ UINT16 DispatchCache::HashToken(size_t token) diff --git a/src/coreclr/src/vm/virtualcallstub.h b/src/coreclr/src/vm/virtualcallstub.h index e2ceb6bd0628d..cca8df3b19d3c 100644 --- a/src/coreclr/src/vm/virtualcallstub.h +++ b/src/coreclr/src/vm/virtualcallstub.h @@ -19,7 +19,7 @@ #define CHAIN_LOOKUP -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // If this is uncommented, leaves a file "StubLog_.log" with statistics on the behavior // of stub-based interface dispatch. //#define STUB_LOGGING @@ -54,14 +54,14 @@ extern "C" PCODE STDCALL StubDispatchFixupWorker(TransitionBlock * pTransitionBl extern "C" PCODE STDCALL VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 , UINT_PTR flags #endif ); ///////////////////////////////////////////////////////////////////////////////////// -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) typedef INT32 DISPL; #endif @@ -133,11 +133,11 @@ struct StubCallSite public: -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) StubCallSite(TADDR siteAddrForRegisterIndirect, PCODE returnAddr); PCODE GetCallerAddress(); -#else // !defined(_TARGET_X86_) +#else // !defined(TARGET_X86) // On platforms where we always use an indirection cell things // are much simpler - the siteAddr always stores a pointer to a // value that in turn points to the indirection cell. @@ -146,7 +146,7 @@ struct StubCallSite { LIMITED_METHOD_CONTRACT; m_siteAddr = dac_cast(siteAddr); m_returnAddr = returnAddr; } PCODE GetCallerAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; } -#endif // !defined(_TARGET_X86_) +#endif // !defined(TARGET_X86) PCODE GetSiteTarget() { WRAPPER_NO_CONTRACT; return *(GetIndirectCell()); } void SetSiteTarget(PCODE newTarget); @@ -165,12 +165,12 @@ extern "C" void StubDispatchFixupStub(); // for lazy fixup of ngen extern "C" void ResolveWorkerAsmStub(); // resolve a token and transfer control to that method extern "C" void ResolveWorkerChainLookupAsmStub(); // for chaining of entries in the cache -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 extern "C" void BackPatchWorkerAsmStub(); // backpatch a call site to point to a different stub -#ifdef FEATURE_PAL +#ifdef TARGET_UNIX extern "C" void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); -#endif // FEATURE_PAL -#endif // _TARGET_X86_ +#endif // TARGET_UNIX +#endif // TARGET_X86 typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager; @@ -289,7 +289,7 @@ class VirtualCallStubManager : public StubManager lookup_heap(NULL), dispatch_heap(NULL), resolve_heap(NULL), -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 m_fShouldAllocateLongJumpDispatchStubs(FALSE), #endif lookups(NULL), @@ -498,7 +498,7 @@ class VirtualCallStubManager : public StubManager size_t dispatchToken, bool *pMayHaveReenteredCooperativeGCMode); -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // Used to allocate a long jump dispatch stub. See comment around // m_fShouldAllocateLongJumpDispatchStubs for explaination. DispatchHolder *GenerateDispatchStubLong(PCODE addrOfCode, @@ -590,12 +590,12 @@ class VirtualCallStubManager : public StubManager friend PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock, TADDR siteAddrForRegisterIndirect, size_t token -#ifndef _TARGET_X86_ +#ifndef TARGET_X86 , UINT_PTR flags #endif ); -#if defined(_TARGET_X86_) && defined(FEATURE_PAL) +#if defined(TARGET_X86) && defined(TARGET_UNIX) friend void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect); #endif @@ -734,7 +734,7 @@ class VirtualCallStubManager : public StubManager PTR_LoaderHeap resolve_heap; // resolve stubs go here PTR_LoaderHeap vtable_heap; // vtable-based jump stubs go here -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // When we layout the stub heaps, we put them close together in a sequential order // so that we maximize performance with respect to branch predictions. On AMD64, // dispatch stubs use a rel32 jump on failure to the resolve stub. This works for diff --git a/src/coreclr/src/vm/win32threadpool.cpp b/src/coreclr/src/vm/win32threadpool.cpp index b4bc6231e7d0c..b50cf6b4566df 100644 --- a/src/coreclr/src/vm/win32threadpool.cpp +++ b/src/coreclr/src/vm/win32threadpool.cpp @@ -35,7 +35,7 @@ Revision History: #include "configuration.h" -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX #ifndef DACCESS_COMPILE // APIs that must be accessed through dynamic linking. @@ -72,7 +72,7 @@ typedef BOOL (WINAPI * SetWaitableTimerExProc) ( SetWaitableTimerExProc g_pufnSetWaitableTimerEx = NULL; #endif // !DACCESS_COMPILE -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX BOOL ThreadpoolMgr::InitCompletionPortThreadpool = FALSE; HANDLE ThreadpoolMgr::GlobalCompletionPort; // used for binding io completions on file handles @@ -342,16 +342,16 @@ BOOL ThreadpoolMgr::Initialize() UnManagedPerAppDomainTPCount* pADTPCount; pADTPCount = PerAppDomainTPCountList::GetUnmanagedTPCount(); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //ThreadPool_CPUGroup CPUGroupInfo::EnsureInitialized(); if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups()) NumberOfProcessors = CPUGroupInfo::GetNumActiveProcessors(); else NumberOfProcessors = GetCurrentProcessCpuCount(); -#else // !FEATURE_PAL +#else // !TARGET_UNIX NumberOfProcessors = GetCurrentProcessCpuCount(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX InitPlatformVariables(); EX_TRY @@ -381,15 +381,15 @@ BOOL ThreadpoolMgr::Initialize() RetiredWorkerSemaphore = new CLRLifoSemaphore(); RetiredWorkerSemaphore->Create(0, ThreadCounter::MaxPossibleCount); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //ThreadPool_CPUGroup if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups()) RecycledLists.Initialize( CPUGroupInfo::GetNumActiveProcessors() ); else RecycledLists.Initialize( g_SystemInfo.dwNumberOfProcessors ); -#else // !FEATURE_PAL +#else // !TARGET_UNIX RecycledLists.Initialize( PAL_GetTotalCpuCount() ); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } EX_CATCH { @@ -447,14 +447,14 @@ BOOL ThreadpoolMgr::Initialize() counts.MaxWorking = MinLimitTotalCPThreads; CPThreadCounter.counts.AsLongLong = counts.AsLongLong; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX { GlobalCompletionPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, /*ignored for invalid handle value*/ NumberOfProcessors); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX HillClimbingInstance.Initialize(); @@ -473,7 +473,7 @@ void ThreadpoolMgr::InitPlatformVariables() } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HINSTANCE hNtDll; HINSTANCE hCoreSynch; { @@ -1091,7 +1091,7 @@ BOOL ThreadpoolMgr::PostQueuedCompletionStatus(LPOVERLAPPED lpOverlapped, } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX EnsureInitialized(); _ASSERTE(GlobalCompletionPort != NULL); @@ -1128,7 +1128,7 @@ BOOL ThreadpoolMgr::PostQueuedCompletionStatus(LPOVERLAPPED lpOverlapped, #else SetLastError(ERROR_CALL_NOT_IMPLEMENTED); return FALSE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } @@ -1148,7 +1148,7 @@ void ThreadpoolMgr::WaitIOCompletionCallback( DWORD ret = AsyncCallbackCompletion((PVOID)lpOverlapped); } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // We need to make sure that the next jobs picked up by a completion port thread // is inserted into the queue after we start cleanup. The cleanup starts when a completion // port thread processes a special overlapped (overlappedForInitiateCleanup). @@ -1160,7 +1160,7 @@ void ThreadpoolMgr::WaitIOCompletionCallback( // job. OVERLAPPED overlappedForInitiateCleanup; OVERLAPPED overlappedForContinueCleanup; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX Volatile g_fCompletionPortDrainNeeded = FALSE; @@ -1178,7 +1178,7 @@ VOID ThreadpoolMgr::CallbackForContinueDrainageOfCompletionPortQueue( } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX CounterHolder hldNumCPIT(&NumCPInfrastructureThreads); // It is OK if this overlapped is from a previous round. @@ -1201,7 +1201,7 @@ VOID ThreadpoolMgr::CallbackForContinueDrainageOfCompletionPortQueue( __SwitchToThread(100, CALLER_LIMITS_SPINNING); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } @@ -1212,7 +1212,7 @@ ThreadpoolMgr::CallbackForInitiateDrainageOfCompletionPortQueue( LPOVERLAPPED lpOverlapped ) { - #ifndef FEATURE_PAL + #ifndef TARGET_UNIX CONTRACTL { NOTHROW; @@ -1292,7 +1292,7 @@ ThreadpoolMgr::CallbackForInitiateDrainageOfCompletionPortQueue( } FastInterlockAnd(&g_fCompletionPortDrainNeeded, 0); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } extern void WINAPI BindIoCompletionCallbackStub(DWORD ErrorCode, @@ -1304,7 +1304,7 @@ void HostIOCompletionCallback( DWORD numBytesTransferred, LPOVERLAPPED lpOverlapped) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (lpOverlapped == &overlappedForInitiateCleanup) { ThreadpoolMgr::CallbackForInitiateDrainageOfCompletionPortQueue ( @@ -1326,12 +1326,12 @@ void HostIOCompletionCallback( numBytesTransferred, lpOverlapped); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } BOOL ThreadpoolMgr::DrainCompletionPortQueue() { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX CONTRACTL { NOTHROW; @@ -1351,7 +1351,7 @@ BOOL ThreadpoolMgr::DrainCompletionPortQueue() &overlappedForInitiateCleanup); #else return FALSE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } @@ -1764,13 +1764,13 @@ Thread* ThreadpoolMgr::CreateUnimpersonatedThread(LPTHREAD_START_ROUTINE lpStart W(".NET ThreadPool Worker")); } else { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HandleHolder token; BOOL bReverted = FALSE; bOK = RevertIfImpersonated(&bReverted, &token); if (bOK != TRUE) return NULL; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX threadHandle = CreateThread(NULL, // security descriptor 0, // default stack size lpStartAddress, @@ -1779,9 +1779,9 @@ Thread* ThreadpoolMgr::CreateUnimpersonatedThread(LPTHREAD_START_ROUTINE lpStart &threadId); SetThreadName(threadHandle, W(".NET ThreadPool Worker")); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX UndoRevert(bReverted, token); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } if (*pIsCLRThread && !bOK) @@ -2509,7 +2509,7 @@ DWORD ThreadpoolMgr::MinimumRemainingWait(LIST_ENTRY* waitInfo, unsigned int num } #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT #pragma warning (disable : 4716) #else #pragma warning (disable : 4715) @@ -2716,7 +2716,7 @@ DWORD WINAPI ThreadpoolMgr::WaitThreadStart(LPVOID lpArgs) #endif #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT #pragma warning (default : 4716) #else #pragma warning (default : 4715) @@ -2759,11 +2759,11 @@ void ThreadpoolMgr::ProcessWaitCompletion(WaitInfo* waitInfo, InterlockedIncrement(&waitInfo->refCount); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (FALSE == PostQueuedCompletionStatus((LPOVERLAPPED)asyncCallback, (LPOVERLAPPED_COMPLETION_ROUTINE)WaitIOCompletionCallback)) -#else // FEATURE_PAL +#else // TARGET_UNIX if (FALSE == QueueUserWorkItem(AsyncCallbackCompletion, asyncCallback, QUEUE_ONLY)) -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX ReleaseAsyncCallback(asyncCallback); } } @@ -2829,7 +2829,7 @@ DWORD WINAPI ThreadpoolMgr::AsyncCallbackCompletion(PVOID pArgs) ((WAITORTIMERCALLBACKFUNC) waitInfo->Callback) ( waitInfo->Context, asyncCallback->waitTimedOut != FALSE); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX Thread::IncrementIOThreadPoolCompletionCount(pThread); #endif } @@ -3120,7 +3120,7 @@ BOOL ThreadpoolMgr::BindIoCompletionCallback(HANDLE FileHandle, } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX errCode = S_OK; @@ -3147,13 +3147,13 @@ BOOL ThreadpoolMgr::BindIoCompletionCallback(HANDLE FileHandle, _ASSERTE(h == GlobalCompletionPort); return TRUE; -#else // FEATURE_PAL +#else // TARGET_UNIX SetLastError(ERROR_CALL_NOT_IMPLEMENTED); return FALSE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX BOOL ThreadpoolMgr::CreateCompletionPortThread(LPVOID lpArgs) { CONTRACTL @@ -3454,7 +3454,7 @@ DWORD WINAPI ThreadpoolMgr::CompletionPortThreadStart(LPVOID lpArgs) for (;;) { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (g_fCompletionPortDrainNeeded && pThread) { // The thread is not going to process IO job now. @@ -3463,7 +3463,7 @@ DWORD WINAPI ThreadpoolMgr::CompletionPortThreadStart(LPVOID lpArgs) pThread->MarkCompletionPortDrained(); } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX DWORD status = SafeWait(RetiredCPWakeupEvent,CP_THREAD_PENDINGIO_WAIT,FALSE); _ASSERTE(status == WAIT_TIMEOUT || status == WAIT_OBJECT_0); @@ -3833,7 +3833,7 @@ void ThreadpoolMgr::GrowCompletionPortThreadpoolIfNeeded() } } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Returns true if there is pending io on the thread. BOOL ThreadpoolMgr::IsIoPending() @@ -3846,7 +3846,7 @@ BOOL ThreadpoolMgr::IsIoPending() } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX int Status; ULONG IsIoPending; @@ -3867,12 +3867,12 @@ BOOL ThreadpoolMgr::IsIoPending() return TRUE; #else return FALSE; -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX -#ifdef BIT64 +#ifdef HOST_64BIT #pragma warning (disable : 4716) #else #pragma warning (disable : 4715) @@ -3889,7 +3889,7 @@ int ThreadpoolMgr::GetCPUBusyTime_NT(PROCESS_CPU_INFORMATION* pOldInfo) if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups()) { -#if !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL) +#if !defined(FEATURE_REDHAWK) && !defined(TARGET_UNIX) FILETIME newIdleTime, newKernelTime, newUserTime; CPUGroupInfo::GetSystemTimes(&newIdleTime, &newKernelTime, &newUserTime); @@ -3947,14 +3947,14 @@ int ThreadpoolMgr::GetCPUBusyTime_NT(PROCESS_CPU_INFORMATION* pOldInfo) return (int)reading; } -#else // !FEATURE_PAL +#else // !TARGET_UNIX int ThreadpoolMgr::GetCPUBusyTime_NT(PAL_IOCP_CPU_INFORMATION* pOldInfo) { return PAL_GetCPUBusyTime(pOldInfo); } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // // A timer that ticks every GATE_THREAD_DELAY milliseconds. @@ -3963,7 +3963,7 @@ int ThreadpoolMgr::GetCPUBusyTime_NT(PAL_IOCP_CPU_INFORMATION* pOldInfo) // class GateThreadTimer { -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX HANDLE m_hTimer; public: @@ -4013,7 +4013,7 @@ class GateThreadTimer } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX public: void Wait() @@ -4025,11 +4025,11 @@ class GateThreadTimer } CONTRACTL_END; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (m_hTimer) WaitForSingleObject(m_hTimer, INFINITE); else -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX __SwitchToThread(GATE_THREAD_DELAY, CALLER_LIMITS_SPINNING); } }; @@ -4054,7 +4054,7 @@ DWORD WINAPI ThreadpoolMgr::GateThreadStart(LPVOID lpArgs) // TODO: do we need to do this? timer.Wait(); // delay getting initial CPU reading -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX PROCESS_CPU_INFORMATION prevCPUInfo; if (!g_pufnNtQuerySystemInformation) @@ -4063,10 +4063,10 @@ DWORD WINAPI ThreadpoolMgr::GateThreadStart(LPVOID lpArgs) return 0; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX //GateThread can start before EESetup, so ensure CPU group information is initialized; CPUGroupInfo::EnsureInitialized(); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // initialize CPU usage information structure; prevCPUInfo.idleTime.QuadPart = 0; prevCPUInfo.kernelTime.QuadPart = 0; @@ -4082,7 +4082,7 @@ DWORD WINAPI ThreadpoolMgr::GateThreadStart(LPVOID lpArgs) * 2. GCCpuGroups=1, CLR creates GC threads for all processors in all CPU groups * thus, the threadpool thread would use a whole CPU group (if Thread_UseAllCpuGroups is not set). * ==> use g_SystemInfo.dwNumberOfProcessors. - * 3. !defined(FEATURE_PAL) but defined(FEATURE_CORESYSTEM), GetCurrentProcessCpuCount() + * 3. !defined(TARGET_UNIX) but defined(FEATURE_CORESYSTEM), GetCurrentProcessCpuCount() * returns g_SystemInfo.dwNumberOfProcessors ==> use g_SystemInfo.dwNumberOfProcessors; * Other cases: * 1. Normal case: the mask is all or a subset of all processors in a CPU group; @@ -4117,10 +4117,10 @@ DWORD WINAPI ThreadpoolMgr::GateThreadStart(LPVOID lpArgs) memset((void *)prevCPUInfo.usageBuffer, 0, prevCPUInfo.usageBufferSize); //must clear it with 0s GetCPUBusyTime_NT(&prevCPUInfo); -#else // !FEATURE_PAL +#else // !TARGET_UNIX PAL_IOCP_CPU_INFORMATION prevCPUInfo; GetCPUBusyTime_NT(&prevCPUInfo); // ignore return value the first time -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX BOOL IgnoreNextSample = FALSE; @@ -4165,7 +4165,7 @@ DWORD WINAPI ThreadpoolMgr::GateThreadStart(LPVOID lpArgs) IgnoreNextSample = TRUE; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // don't mess with CP thread pool settings if not initialized yet if (InitCompletionPortThreadpool) { @@ -4257,7 +4257,7 @@ DWORD WINAPI ThreadpoolMgr::GateThreadStart(LPVOID lpArgs) } } } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX if (0 == CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_DisableStarvationDetection)) { @@ -4359,7 +4359,7 @@ BOOL ThreadpoolMgr::SufficientDelaySinceLastDequeue() #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT #pragma warning (default : 4716) #else #pragma warning (default : 4715) @@ -4467,7 +4467,7 @@ BOOL ThreadpoolMgr::CreateTimerQueueTimer(PHANDLE phNewTimer, } #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT #pragma warning (disable : 4716) #else #pragma warning (disable : 4715) @@ -4574,7 +4574,7 @@ void ThreadpoolMgr::TimerThreadFire() } #ifdef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT #pragma warning (default : 4716) #else #pragma warning (default : 4715) diff --git a/src/coreclr/src/vm/win32threadpool.h b/src/coreclr/src/vm/win32threadpool.h index a9f489c42c458..b30fcd60257e5 100644 --- a/src/coreclr/src/vm/win32threadpool.h +++ b/src/coreclr/src/vm/win32threadpool.h @@ -51,7 +51,7 @@ const int MaxFreeCPThreadsPerCPU=2; // upper limit on number of const int CpuUtilizationHigh=95; // remove threads when above this const int CpuUtilizationLow =80; // inject more threads if below this -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX extern HANDLE (WINAPI *g_pufnCreateIoCompletionPort)(HANDLE FileHandle, HANDLE ExistingCompletionPort, ULONG_PTR CompletionKey, @@ -67,7 +67,7 @@ extern int (WINAPI * g_pufnNtQuerySystemInformation) (SYSTEM_INFORMATION_CLASS S PVOID SystemInformation, ULONG SystemInformationLength, PULONG ReturnLength OPTIONAL); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX #define FILETIME_TO_INT64(t) (*(__int64*)&(t)) #define MILLI_TO_100NANO(x) ((x) * 10000) // convert from milliseond to 100 nanosecond unit @@ -141,10 +141,10 @@ class ThreadpoolMgr Counts GetCleanCounts() { LIMITED_METHOD_CONTRACT; -#ifdef BIT64 +#ifdef HOST_64BIT // VolatileLoad x64 bit read is atomic return DangerousGetDirtyCounts(); -#else // !BIT64 +#else // !HOST_64BIT // VolatileLoad may result in torn read Counts result; #ifndef DACCESS_COMPILE @@ -154,7 +154,7 @@ class ThreadpoolMgr result.AsLongLong = 0; //prevents prefast warning for DAC builds #endif return result; -#endif // !BIT64 +#endif // !HOST_64BIT } // @@ -321,10 +321,10 @@ class ThreadpoolMgr static BOOL HaveTimerInfosToFlush() { return TimerInfosToBeRecycled != NULL; } -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX static LPOVERLAPPED CompletionPortDispatchWorkWithinAppDomain(Thread* pThread, DWORD* pErrorCode, DWORD* pNumBytes, size_t* pKey); static void StoreOverlappedInfoInThread(Thread* pThread, DWORD dwErrorCode, DWORD dwNumBytes, size_t key, LPOVERLAPPED lpOverlapped); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX // Enable filtering of correlation ETW events for cases handled at a higher abstraction level @@ -617,7 +617,7 @@ class ThreadpoolMgr Volatile lock; // this is the spin lock DWORD count; // count of number of elements in the list Entry* root; // ptr to first element of recycled list -#ifndef BIT64 +#ifndef HOST_64BIT DWORD filler; // Pad the structure to a multiple of the 16. #endif @@ -739,21 +739,21 @@ class ThreadpoolMgr DWORD processorNumber = 0; -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups()) processorNumber = CPUGroupInfo::CalculateCurrentProcessorNumber(); else // Turns out GetCurrentProcessorNumber can return a value greater than the number of processors reported by // GetSystemInfo, if we're running in WOW64 on a machine with >32 processors. processorNumber = GetCurrentProcessorNumber()%NumberOfProcessors; -#else // !FEATURE_PAL +#else // !TARGET_UNIX if (PAL_HasGetCurrentProcessorNumber()) { // On linux, GetCurrentProcessorNumber which uses sched_getcpu() can return a value greater than the number // of processors reported by sysconf(_SC_NPROCESSORS_ONLN) when using OpenVZ kernel. processorNumber = GetCurrentProcessorNumber()%NumberOfProcessors; } -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX return pRecycledListPerProcessor[processorNumber][memType]; } }; @@ -912,7 +912,7 @@ class ThreadpoolMgr static void WINAPI DeregisterWait(WaitInfo* pArgs); -#ifndef FEATURE_PAL +#ifndef TARGET_UNIX // holds the aggregate of system cpu usage of all processors typedef struct _PROCESS_CPU_INFORMATION { @@ -940,7 +940,7 @@ class ThreadpoolMgr #else static int GetCPUBusyTime_NT(PAL_IOCP_CPU_INFORMATION* pOldInfo); -#endif // !FEATURE_PAL +#endif // !TARGET_UNIX private: static BOOL IsIoPending(); diff --git a/src/coreclr/src/zap/common.h b/src/coreclr/src/zap/common.h index 37de3e1c26371..5f033e54c1e0d 100644 --- a/src/coreclr/src/zap/common.h +++ b/src/coreclr/src/zap/common.h @@ -21,13 +21,13 @@ #include #include -#if !defined(_TARGET_X86_) || defined(FEATURE_PAL) +#if !defined(TARGET_X86) || defined(TARGET_UNIX) #ifndef FEATURE_EH_FUNCLETS #define FEATURE_EH_FUNCLETS #endif -#endif // !_TARGET_X86_ || FEATURE_PAL +#endif // !TARGET_X86 || TARGET_UNIX -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT typedef unsigned __int64 TARGET_POINTER_TYPE; #else typedef unsigned int TARGET_POINTER_TYPE; diff --git a/src/coreclr/src/zap/zapcode.cpp b/src/coreclr/src/zap/zapcode.cpp index 11e5b319ed5d3..c1fefd912041d 100644 --- a/src/coreclr/src/zap/zapcode.cpp +++ b/src/coreclr/src/zap/zapcode.cpp @@ -800,7 +800,7 @@ void ZapImage::AddRelocsForEHClauses(ZapExceptionInfo * pExceptionInfo) // ZapMethodHeader // -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 DWORD ZapCodeBlob::ComputeRVA(ZapWriter * pZapWriter, DWORD dwPos) { void * pData = GetData(); @@ -891,7 +891,7 @@ ZapCodeBlob * ZapCodeBlob::NewAlignedBlob(ZapWriter * pWriter, PVOID pData, SIZE return NULL; } } -#endif // _TARGET_X86_ +#endif // TARGET_X86 // See function prototype for details on why this iterator is "partial" BOOL ZapMethodHeader::PartialTargetMethodIterator::GetNext(CORINFO_METHOD_HANDLE *pHnd) @@ -1022,9 +1022,9 @@ ZapNode * ZapMethodEntryPointTable::CanDirectCall(ZapMethodEntryPoint * pMethodE if (m_pImage->canIntraModuleDirectCall(caller, callee, &reason, pMethodEntryPoint->GetAccessFlags())) { ZapNode * pCode = m_pImage->GetCompiledMethod(callee)->GetCode(); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM pCode = m_pImage->GetInnerPtr(pCode, THUMB_CODE); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM return pCode; } else @@ -1067,10 +1067,10 @@ ZapGCInfo * ZapGCInfo::NewGCInfo(ZapWriter * pWriter, PVOID pGCInfo, SIZE_T cbGC memcpy(pZapGCInfo->GetGCInfo(), pGCInfo, cbGCInfo); memcpy(pZapGCInfo->GetUnwindInfo(), pUnwindInfo, cbUnwindInfo); -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) // Make sure the personality routine thunk is created pZapGCInfo->GetPersonalityRoutine(ZapImage::GetImage(pWriter)); -#endif // !defined(_TARGET_X86_) +#endif // !defined(TARGET_X86) return pZapGCInfo; } #else @@ -1097,17 +1097,17 @@ void ZapUnwindInfo::Save(ZapWriter * pZapWriter) { T_RUNTIME_FUNCTION runtimeFunction; -#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) RUNTIME_FUNCTION__SetBeginAddress(&runtimeFunction, GetStartAddress()); runtimeFunction.UnwindData = m_pUnwindData->GetRVA(); -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) runtimeFunction.BeginAddress = GetStartAddress(); runtimeFunction.EndAddress = GetEndAddress(); ULONG unwindData = m_pUnwindData->GetRVA(); if (m_pUnwindData->GetType() == ZapNodeType_UnwindInfo) // Chained unwind info unwindData |= RUNTIME_FUNCTION_INDIRECT; runtimeFunction.UnwindData = unwindData; -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) runtimeFunction.BeginAddress = GetStartAddress(); ULONG unwindData = m_pUnwindData->GetRVA(); if (m_pUnwindData->GetType() == ZapNodeType_UnwindInfo) // Chained unwind info @@ -1143,7 +1143,7 @@ int __cdecl ZapUnwindInfo::CompareUnwindInfo(const void* a_, const void* b_) return 0; } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) UINT ZapUnwindData::GetAlignment() { @@ -1192,7 +1192,7 @@ void ZapUnwindData::Save(ZapWriter * pZapWriter) #endif //REDHAWK } -#elif defined(_TARGET_X86_) +#elif defined(TARGET_X86) UINT ZapUnwindData::GetAlignment() { @@ -1214,7 +1214,7 @@ void ZapUnwindData::Save(ZapWriter * pZapWriter) pZapWriter->Write(pData, dwSize); } -#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) UINT ZapUnwindData::GetAlignment() { @@ -1298,10 +1298,10 @@ ZapUnwindData * ZapUnwindData::NewUnwindData(ZapWriter * pWriter, PVOID pData, S memcpy((void*)(pZapUnwindData + 1), pData, cbSize); -#if !defined(_TARGET_X86_) +#if !defined(TARGET_X86) // Make sure the personality routine thunk is created pZapUnwindData->GetPersonalityRoutine(ZapImage::GetImage(pWriter)); -#endif // !defined(_TARGET_X86_) +#endif // !defined(TARGET_X86) return pZapUnwindData; } @@ -1714,7 +1714,7 @@ DWORD ZapLazyHelperThunk::SaveWorker(ZapWriter * pZapWriter) BYTE buffer[42]; // Buffer big enough to hold any reasonable helper thunk sequence BYTE * p = buffer; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // mov edx, module *p++ = 0xBA; if (pImage != NULL) @@ -1726,7 +1726,7 @@ DWORD ZapLazyHelperThunk::SaveWorker(ZapWriter * pZapWriter) if (pImage != NULL) pImage->WriteReloc(buffer, (int)(p - buffer), m_pTarget, 0, IMAGE_REL_BASED_REL32); p += 4; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) *p++ = 0x48; *p++ = 0x8D; #ifdef UNIX_AMD64_ABI @@ -1745,7 +1745,7 @@ DWORD ZapLazyHelperThunk::SaveWorker(ZapWriter * pZapWriter) if (pImage != NULL) pImage->WriteReloc(buffer, (int)(p - buffer), m_pTarget, 0, IMAGE_REL_BASED_REL32); p += 4; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // movw r1, module *(WORD *)(p + 0) = 0xf240; *(WORD *)(p + 2) = 1 << 8; @@ -1762,7 +1762,7 @@ DWORD ZapLazyHelperThunk::SaveWorker(ZapWriter * pZapWriter) if (pImage != NULL) pImage->WriteReloc(buffer, (int)(p - buffer), m_pTarget, 0, IMAGE_REL_BASED_THUMB_BRANCH24); p += 4; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) // ldr x1, [PC+8] *(DWORD *)(p) =0x58000041; p += 4; diff --git a/src/coreclr/src/zap/zapcode.h b/src/coreclr/src/zap/zapcode.h index 82059bca85f03..6a1ad86f76a92 100644 --- a/src/coreclr/src/zap/zapcode.h +++ b/src/coreclr/src/zap/zapcode.h @@ -187,7 +187,7 @@ class ZapMethodHeader : public ZapNode }; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) class ZapCodeBlob : public ZapBlobWithRelocs { protected: diff --git a/src/coreclr/src/zap/zapheaders.cpp b/src/coreclr/src/zap/zapheaders.cpp index d25955dff184f..068c7d3176bd4 100644 --- a/src/coreclr/src/zap/zapheaders.cpp +++ b/src/coreclr/src/zap/zapheaders.cpp @@ -37,7 +37,7 @@ void ZapImage::SaveCorHeader() corHeader.MinorRuntimeVersion = VAL16(COR_VERSION_MINOR); corHeader.Flags = VAL32(COMIMAGE_FLAGS_IL_LIBRARY); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 if (IsReadyToRunCompilation()) { // Mark the ready-to-run image as x86-specific diff --git a/src/coreclr/src/zap/zapimage.cpp b/src/coreclr/src/zap/zapimage.cpp index f85062357c914..379dda06f93fe 100644 --- a/src/coreclr/src/zap/zapimage.cpp +++ b/src/coreclr/src/zap/zapimage.cpp @@ -425,7 +425,7 @@ void ZapImage::AllocateVirtualSections() // // .text section // -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // for ARM, put the resource section at the end if it's very large - this // is because b and bl instructions have a limited distance range of +-16MB // which we should not exceed if we can avoid it. @@ -516,7 +516,7 @@ void ZapImage::AllocateVirtualSections() // m_pHotGCSection = NewVirtualSection(pTextSection, IBCProfiledSection | WarmRange | GCInfoSection, sizeof(DWORD)); -#if !defined(_TARGET_ARM_) +#if !defined(TARGET_ARM) // For ARM, put these sections more towards the end because bl/b instructions have limited displacement // IL @@ -524,9 +524,9 @@ void ZapImage::AllocateVirtualSections() //ILMetadata/Resources sections are reported as a statically known warm ranges for now. m_pILMetaDataSection = NewVirtualSection(pTextSection, IBCProfiledSection | HotColdSortedRange | ILMetadataSection, sizeof(DWORD)); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if (!bigResourceSection) // for ARM, put the resource section at the end if it's very large - see comment above #endif m_pResourcesSection = NewVirtualSection(pTextSection, IBCUnProfiledSection | WarmRange | ResourcesSection); @@ -578,7 +578,7 @@ void ZapImage::AllocateVirtualSections() m_pColdCodeSection = NewVirtualSection(pTextSection, IBCProfiledSection | IBCUnProfiledSection | ColdRange | CodeSection, codeSectionAlign); m_pColdCodeSection->SetDefaultFill(DEFAULT_CODE_BUFFER_INIT); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) // For ARM, put these sections more towards the end because bl/b instructions have limited displacement // IL @@ -589,7 +589,7 @@ void ZapImage::AllocateVirtualSections() if (bigResourceSection) // for ARM, put the resource section at the end if it's very large - see comment above m_pResourcesSection = NewVirtualSection(pTextSection, IBCUnProfiledSection | WarmRange | ResourcesSection); -#endif // _TARGET_ARM_ +#endif // TARGET_ARM m_pColdCodeMapSection = NewVirtualSection(pTextSection, IBCProfiledSection | IBCUnProfiledSection | ColdRange | CodeManagerSection, sizeof(DWORD)); #if !defined(FEATURE_EH_FUNCLETS) @@ -1227,10 +1227,10 @@ void ZapImage::CalculateZapBaseAddress() if (!m_ModuleDecoder.IsDll()) { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // We use 30000000 for an exe baseAddress = 0x30000000; -#elif defined(_TARGET_64BIT_) +#elif defined(TARGET_64BIT) // We use 04000000 for an exe // which is remapped to 0x644`88000000 on x64 baseAddress = 0x04000000; @@ -1238,10 +1238,10 @@ void ZapImage::CalculateZapBaseAddress() } else { -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // We start a 31000000 for the main assembly with the manifest baseAddress = 0x31000000; -#elif defined(_TARGET_64BIT_) +#elif defined(TARGET_64BIT) // We start a 05000000 for the main assembly with the manifest // which is remapped to 0x644`8A000000 on x64 baseAddress = 0x05000000; @@ -1298,7 +1298,7 @@ void ZapImage::CalculateZapBaseAddress() // Now we remap the 32-bit address range used for x86 and PE32 images into the // upper address range used on 64-bit platforms // -#if USE_UPPER_ADDRESS // Implies _TARGET_64BIT_ +#if USE_UPPER_ADDRESS // Implies TARGET_64BIT if (baseAddress < 0x80000000) { if (baseAddress < 0x40000000) @@ -1510,11 +1510,11 @@ void ZapImage::OutputTables() { USHORT dllCharacteristics = 0; -#ifndef _TARGET_64BIT_ +#ifndef TARGET_64BIT dllCharacteristics |= IMAGE_DLLCHARACTERISTICS_NO_SEH; #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Images without NX compat bit set fail to load on ARM dllCharacteristics |= IMAGE_DLLCHARACTERISTICS_NX_COMPAT; #endif @@ -1528,7 +1528,7 @@ void ZapImage::OutputTables() #endif // _DEBUG { dllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE; -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Large address aware, required for High Entry VA, is always enabled for 64bit native images. dllCharacteristics |= IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA; #endif @@ -1544,10 +1544,10 @@ void ZapImage::OutputTables() SetSizeOfStackCommit(m_ModuleDecoder.GetSizeOfStackCommit()); } -#if defined(FEATURE_PAL) && !defined(_TARGET_64BIT_) +#if defined(TARGET_UNIX) && !defined(TARGET_64BIT) // To minimize wasted VA space on 32-bit systems, align file to page boundaries (presumed to be 4K). SetFileAlignment(0x1000); -#elif defined(_TARGET_ARM_) && defined(FEATURE_CORESYSTEM) +#elif defined(TARGET_ARM) && defined(FEATURE_CORESYSTEM) if (!IsReadyToRunCompilation()) { // On ARM CoreSys builds, crossgen will use 4k file alignment, as requested by Phone perf team @@ -3619,7 +3619,7 @@ ZapNode * ZapImage::GetHelperThunk(CorInfoHelpFunc ftnNum) if (pHelperThunk == NULL) { pHelperThunk = new (GetHeap()) ZapHelperThunk(ftnNum); -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM pHelperThunk = GetInnerPtr(pHelperThunk, THUMB_CODE); #endif m_pHelperThunks[ftnNum] = pHelperThunk; diff --git a/src/coreclr/src/zap/zapimage.h b/src/coreclr/src/zap/zapimage.h index 86b7acfa8b0c6..68b833bd0f44d 100644 --- a/src/coreclr/src/zap/zapimage.h +++ b/src/coreclr/src/zap/zapimage.h @@ -48,13 +48,13 @@ class ZapperStats; #undef SAFERELEASE #define SAFERELEASE(p) if ((p) != NULL) { IUnknown * _ = (p); (p) = NULL; _->Release(); }; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) #define DEFAULT_CODE_BUFFER_INIT 0xcc // breakpoint #else #define DEFAULT_CODE_BUFFER_INIT 0 #endif -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT // Optimize for speed #define DEFAULT_CODE_ALIGN 16 #else @@ -62,9 +62,9 @@ class ZapperStats; #define DEFAULT_CODE_ALIGN 4 #endif -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM #define MINIMUM_CODE_ALIGN 2 -#elif _TARGET_ARM64_ +#elif TARGET_ARM64 #define MINIMUM_CODE_ALIGN 4 #else #define MINIMUM_CODE_ALIGN 1 @@ -120,7 +120,7 @@ enum ZapImportSectionType ZapImportSectionType_Handle, // Unspecified handle ZapImportSectionType_TypeHandle, // Type and method handles have to have their own section so we can restore them correctly ZapImportSectionType_MethodHandle, -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM ZapImportSectionType_PCode, // Code pointers have to be in a own section on ARM because of they are tagged differently #endif ZapImportSectionType_StringHandle, // String handles require special handling for interning diff --git a/src/coreclr/src/zap/zapimport.cpp b/src/coreclr/src/zap/zapimport.cpp index 745c091f8525d..f8a527127d3f9 100644 --- a/src/coreclr/src/zap/zapimport.cpp +++ b/src/coreclr/src/zap/zapimport.cpp @@ -109,7 +109,7 @@ c_ImportSectionProperties[ZapImportSectionType_Count] = { /* ZapImportSectionType_Handle, */ CORCOMPILE_IMPORT_TYPE_UNKNOWN, 0, 0 }, { /* ZapImportSectionType_TypeHandle, */ CORCOMPILE_IMPORT_TYPE_TYPE_HANDLE, TARGET_POINTER_SIZE, 0 }, { /* ZapImportSectionType_MethodHandle, */ CORCOMPILE_IMPORT_TYPE_METHOD_HANDLE, TARGET_POINTER_SIZE, 0 }, -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM { /* ZapImportSectionType_PCode, */ CORCOMPILE_IMPORT_TYPE_UNKNOWN, 0, CORCOMPILE_IMPORT_FLAGS_PCODE }, #endif { /* ZapImportSectionType_StringHandle, */ CORCOMPILE_IMPORT_TYPE_STRING_HANDLE, TARGET_POINTER_SIZE, 0 }, @@ -489,11 +489,11 @@ void ZapExternalMethodThunk::Save(ZapWriter * pZapWriter) CORCOMPILE_EXTERNAL_METHOD_THUNK thunk; memset(&thunk, DEFAULT_CODE_BUFFER_INIT, sizeof(thunk)); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) thunk.callJmp[0] = 0xE8; // call rel32 pImage->WriteReloc(&thunk, 1, helper, 0, IMAGE_REL_BASED_REL32); thunk.precodeType = _PRECODE_EXTERNAL_METHOD_THUNK; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Setup the call to ExternalMethodFixupStub // // mov r12, pc @@ -515,7 +515,7 @@ void ZapExternalMethodThunk::Save(ZapWriter * pZapWriter) // Setup the initial target to be our assembly helper. pImage->WriteReloc(&thunk, offsetof(CORCOMPILE_EXTERNAL_METHOD_THUNK, m_pTarget), helper, 0, IMAGE_REL_BASED_PTR); -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) thunk.m_rgCode[0] = 0x1000000C; //adr x12, #0 thunk.m_rgCode[1] = 0xF940098A; //ldr x10, [x12, #16] @@ -801,13 +801,13 @@ void ZapVirtualMethodThunk::Save(ZapWriter * pZapWriter) _ASSERTE(FitsIn((SIZE_T)GetHandle2() - 1)); USHORT slotNum = (USHORT)((SIZE_T)GetHandle2() - 1); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) thunk.callJmp[0] = 0xE8; // call rel32 pImage->WriteReloc(&thunk, 1, helper, 0, IMAGE_REL_BASED_REL32); // Mark this as a Virtual Import Thunk thunk.precodeType = _PRECODE_VIRTUAL_IMPORT_THUNK; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Setup the call to VirtualMethodFixupStub // // mov r12, pc @@ -830,7 +830,7 @@ void ZapVirtualMethodThunk::Save(ZapWriter * pZapWriter) // Slot ID is setup below, so now setup the initial target // to be our assembly helper. pImage->WriteReloc(&thunk, offsetof(CORCOMPILE_VIRTUAL_IMPORT_THUNK, m_pTarget), helper, 0, IMAGE_REL_BASED_PTR); - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) thunk.m_rgCode[0] = 0x1000000C; //adr x12, #0 thunk.m_rgCode[1] = 0xF940098A; //ldr x10, [x12, #16] @@ -1232,7 +1232,7 @@ class ZapFunctionEntryImport : public ZapImport return ZapNodeType_Import_FunctionEntry; } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM virtual ZapImportSectionType ComputePlacement(ZapImage * pImage, BOOL * pfIsEager, BOOL * pfNeedsSignature) { ZapImport::ComputePlacement(pImage, pfIsEager, pfNeedsSignature); @@ -1974,7 +1974,7 @@ class ZapIndirectHelperThunk : public ZapImport } }; -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM static void MovRegImm(BYTE* p, int reg) { *(WORD *)(p + 0) = 0xF240; @@ -1982,7 +1982,7 @@ static void MovRegImm(BYTE* p, int reg) *(WORD *)(p + 4) = 0xF2C0; *(WORD *)(p + 6) = (UINT16)(reg << 8); } -#endif // _TARGET_ARM_ +#endif // TARGET_ARM DWORD ZapIndirectHelperThunk::SaveWorker(ZapWriter * pZapWriter) { @@ -1991,7 +1991,7 @@ DWORD ZapIndirectHelperThunk::SaveWorker(ZapWriter * pZapWriter) BYTE buffer[44]; BYTE * p = buffer; -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) if (IsDelayLoadHelper()) { // xor eax, eax @@ -2027,7 +2027,7 @@ DWORD ZapIndirectHelperThunk::SaveWorker(ZapWriter * pZapWriter) if (pImage != NULL) pImage->WriteReloc(buffer, (int) (p - buffer), pImage->GetImportTable()->GetHelperImport(GetReadyToRunHelper()), 0, IMAGE_REL_BASED_PTR); p += 4; -#elif defined(_TARGET_AMD64_) +#elif defined(TARGET_AMD64) if (IsDelayLoadHelper()) { if (m_pCell != NULL) @@ -2090,7 +2090,7 @@ DWORD ZapIndirectHelperThunk::SaveWorker(ZapWriter * pZapWriter) if (pImage != NULL) pImage->WriteReloc(buffer, (int) (p - buffer), pImage->GetImportTable()->GetHelperImport(GetReadyToRunHelper()), 0, IMAGE_REL_BASED_REL32); p += 4; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) if (IsDelayLoadHelper()) { // r4 contains indirection cell @@ -2164,7 +2164,7 @@ DWORD ZapIndirectHelperThunk::SaveWorker(ZapWriter * pZapWriter) *(WORD *)p = 0x4760; p += 2; } -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) if (IsDelayLoadHelper()) { // x11 contains indirection cell @@ -2250,7 +2250,7 @@ void ZapImportTable::PlaceIndirectHelperThunk(ZapNode * pImport) ZapNode * ZapImportTable::GetIndirectHelperThunk(ReadyToRunHelper helperNum, PVOID pArg) { ZapNode * pImport = GetImport((void *)helperNum, pArg); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) pImport = m_pImage->GetInnerPtr(pImport, THUMB_CODE); #endif return pImport; @@ -2273,7 +2273,7 @@ ZapNode * ZapImportTable::GetPlacedIndirectHelperThunk(ReadyToRunHelper helperNu } if (!pImport->IsPlaced()) PlaceIndirectHelperThunk(pImport); -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) pImport = m_pImage->GetInnerPtr(pImport, THUMB_CODE); #endif return pImport; diff --git a/src/coreclr/src/zap/zapinfo.cpp b/src/coreclr/src/zap/zapinfo.cpp index 69476d0798171..eef524a167aad 100644 --- a/src/coreclr/src/zap/zapinfo.cpp +++ b/src/coreclr/src/zap/zapinfo.cpp @@ -45,7 +45,7 @@ ZapInfo::ZapInfo(ZapImage * pImage, mdMethodDef md, CORINFO_METHOD_HANDLE handle m_pUnwindInfo(NULL), m_pUnwindInfoFragments(NULL), -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) m_pChainedColdUnwindInfo(NULL), #endif #endif // FEATURE_EH_FUNCLETS @@ -114,7 +114,7 @@ void ZapInfo::ResetForJitRetry() #ifdef FEATURE_EH_FUNCLETS m_pUnwindInfoFragments = NULL; m_pUnwindInfo = NULL; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) m_pChainedColdUnwindInfo = NULL; #endif #endif // FEATURE_EH_FUNCLETS @@ -454,7 +454,7 @@ void ZapInfo::CompileMethod() } #endif -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) if (methodAttribs & CORINFO_FLG_JIT_INTRINSIC) { // Skip generating hardware intrinsic method bodies. @@ -536,7 +536,7 @@ void ZapInfo::CompileMethod() MethodCompileComplete(m_currentMethodInfo.ftn); -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 // The x86 JIT over estimates the code size. Trim the blob size down to // the actual size. // We can do this only for non-split code. Adjusting the code size for split @@ -817,13 +817,13 @@ void ZapInfo::PublishCompiledMethod() // Set the combined GCInfo + UnwindInfo blob m_pUnwindInfo->SetUnwindData(pMethod->m_pGCInfo); -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) if (m_pChainedColdUnwindInfo != NULL) { // Chain the cold unwind info with the hot unwind info m_pChainedColdUnwindInfo->SetUnwindData(m_pUnwindInfo); } -#endif // _TARGET_AMD64_ +#endif // TARGET_AMD64 #endif // FEATURE_EH_FUNCLETS @@ -1158,12 +1158,12 @@ void * ZapInfo::allocGCInfo(size_t size) { _ASSERTE(m_pGCInfo == NULL); -#ifdef BIT64 +#ifdef HOST_64BIT if (size & 0xFFFFFFFF80000000LL) { IfFailThrow(CORJIT_OUTOFMEM); } -#endif // BIT64 +#endif // HOST_64BIT m_pGCInfo = new BYTE[size]; m_cbGCInfo = size; @@ -1364,7 +1364,7 @@ void ZapInfo::allocUnwindInfo ( _ASSERTE(m_pUnwindInfo == NULL); m_pUnwindInfo = pUnwindInfo; } -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) else if (funcKind == CORJIT_FUNC_ROOT && pColdCode != NULL) { @@ -1814,7 +1814,7 @@ void * ZapInfo::getHelperFtn (CorInfoHelpFunc ftnNum, void **ppIndirection) case CORINFO_HELP_PROF_FCN_TAILCALL: *ppIndirection = m_pImage->GetInnerPtr(GetProfilingHandleImport(), kZapProfilingHandleImportValueIndexTailcallAddr * TARGET_POINTER_SIZE); return NULL; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 case CORINFO_HELP_STOP_FOR_GC: // Force all calls in ngen images for this helper to use an indirect call. // We cannot use a jump stub to reach this helper because @@ -1837,7 +1837,7 @@ void * ZapInfo::getHelperFtn (CorInfoHelpFunc ftnNum, void **ppIndirection) { pHelperThunk = new (m_pImage->GetHeap()) ZapHelperThunk(dwHelper); } -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) if ((dwHelper & CORCOMPILE_HELPER_PTR) == 0) pHelperThunk = m_pImage->GetInnerPtr(pHelperThunk, THUMB_CODE); #endif @@ -1927,7 +1927,7 @@ PVOID ZapInfo::embedDirectCall(CORINFO_METHOD_HANDLE ftn, pEntryPointOrThunkToEmbed = m_pImage->GetImportTable()->GetExternalMethodThunk(ftn); } -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM pEntryPointOrThunkToEmbed = m_pImage->GetInnerPtr(pEntryPointOrThunkToEmbed, THUMB_CODE); #endif @@ -2149,9 +2149,9 @@ DWORD FilterNamedIntrinsicMethodAttribs(DWORD attribs, CORINFO_METHOD_HANDLE ftn bool fIsHWIntrinsic = false; bool fTreatAsRegularMethodCall = false; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) fIsPlatformHWIntrinsic = strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0; -#elif _TARGET_ARM64_ +#elif TARGET_ARM64 fIsPlatformHWIntrinsic = strcmp(namespaceName, "System.Runtime.Intrinsics.Arm.Arm64") == 0; #endif @@ -2169,7 +2169,7 @@ DWORD FilterNamedIntrinsicMethodAttribs(DWORD attribs, CORINFO_METHOD_HANDLE ftn // answer for the CPU the code is running on. fTreatAsRegularMethodCall = (fIsGetIsSupportedMethod && fIsPlatformHWIntrinsic) || (!fIsPlatformHWIntrinsic && fIsHWIntrinsic); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) if (fIsPlatformHWIntrinsic) { // Simplify the comparison logic by grabbing the name of the ISA @@ -2210,7 +2210,7 @@ DWORD FilterNamedIntrinsicMethodAttribs(DWORD attribs, CORINFO_METHOD_HANDLE ftn fTreatAsRegularMethodCall = strcmp(methodName, "Round") == 0; } } -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) if (fTreatAsRegularMethodCall) { @@ -2660,13 +2660,13 @@ void ZapInfo::recordRelocation(void *location, void *target, { case IMAGE_REL_BASED_ABSOLUTE: case IMAGE_REL_BASED_PTR: -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) case IMAGE_REL_BASED_REL32: -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 location = (PBYTE)location + slotNum; break; -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) case IMAGE_REL_BASED_THUMB_MOV32: case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL: case IMAGE_REL_BASED_THUMB_BRANCH24: @@ -2690,7 +2690,7 @@ void ZapInfo::recordRelocation(void *location, void *target, break; #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) case IMAGE_REL_ARM64_BRANCH26: case IMAGE_REL_ARM64_PAGEBASE_REL21: case IMAGE_REL_ARM64_PAGEOFFSET_12A: @@ -2781,13 +2781,13 @@ void ZapInfo::recordRelocation(void *location, void *target, *(UNALIGNED TARGET_POINTER_TYPE *)location = (TARGET_POINTER_TYPE)targetOffset; break; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) case IMAGE_REL_BASED_REL32: *(UNALIGNED INT32 *)location = targetOffset + addlDelta; break; -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) case IMAGE_REL_BASED_THUMB_MOV32: case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL: PutThumb2Mov32((UINT16 *)location, targetOffset); @@ -2800,7 +2800,7 @@ void ZapInfo::recordRelocation(void *location, void *target, break; #endif -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) case IMAGE_REL_ARM64_BRANCH26: if (!FitsInRel28(targetOffset)) ThrowHR(COR_E_OVERFLOW); @@ -2839,15 +2839,15 @@ void ZapInfo::recordRelocation(void *location, void *target, WORD ZapInfo::getRelocTypeHint(void * target) { -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 // There should be no external pointers return IMAGE_REL_BASED_REL32; -#elif defined(_TARGET_ARM_) +#elif defined(TARGET_ARM) // Use full 32-bit branch targets when retrying compilation on ARM if (m_zapper->m_pOpt->m_fNGenLastRetry) return (WORD)-1; return IMAGE_REL_BASED_THUMB_BRANCH24; -#elif defined(_TARGET_ARM64_) +#elif defined(TARGET_ARM64) return IMAGE_REL_ARM64_BRANCH26; #else // No hints @@ -4004,7 +4004,7 @@ CorInfoIntrinsics ZapInfo::getIntrinsicID(CORINFO_METHOD_HANDLE method, { CorInfoIntrinsics intrinsicID = m_pEEJitInfo->getIntrinsicID(method, pMustExpand); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) if ((intrinsicID == CORINFO_INTRINSIC_Ceiling) || (intrinsicID == CORINFO_INTRINSIC_Floor)) { // These are normally handled via the SSE4.1 instructions ROUNDSS/ROUNDSD. @@ -4012,7 +4012,7 @@ CorInfoIntrinsics ZapInfo::getIntrinsicID(CORINFO_METHOD_HANDLE method, // fallback to the method call implementation instead. intrinsicID = CORINFO_INTRINSIC_Illegal; } -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) return intrinsicID; } @@ -4030,7 +4030,7 @@ CorInfoUnmanagedCallConv ZapInfo::getUnmanagedCallConv(CORINFO_METHOD_HANDLE met BOOL ZapInfo::pInvokeMarshalingRequired(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig) { -#if defined(_TARGET_X86_) && defined(PLATFORM_UNIX) +#if defined(TARGET_X86) && defined(TARGET_UNIX) // FUTURE ReadyToRun: x86 pinvoke stubs on Unix platforms if (IsReadyToRunCompilation()) return TRUE; diff --git a/src/coreclr/src/zap/zapinfo.h b/src/coreclr/src/zap/zapinfo.h index 338ad74911db2..a81a213c2e9ea 100644 --- a/src/coreclr/src/zap/zapinfo.h +++ b/src/coreclr/src/zap/zapinfo.h @@ -166,7 +166,7 @@ class ZapInfo ZapUnwindInfo * m_pUnwindInfo; ZapUnwindInfo * m_pUnwindInfoFragments; -#if defined(_TARGET_AMD64_) +#if defined(TARGET_AMD64) ZapUnwindInfo * m_pChainedColdUnwindInfo; #endif #endif // FEATURE_EH_FUNCLETS diff --git a/src/coreclr/src/zap/zapper.cpp b/src/coreclr/src/zap/zapper.cpp index 81a1cadaa9d9d..fc5ba32d95260 100644 --- a/src/coreclr/src/zap/zapper.cpp +++ b/src/coreclr/src/zap/zapper.cpp @@ -265,11 +265,11 @@ void ZapperOptions::SetCompilerFlags(void) m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELOC); m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_PREJIT); -#if defined(_TARGET_ARM_) -# if defined(PLATFORM_UNIX) +#if defined(TARGET_ARM) +# if defined(TARGET_UNIX) m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS); -# endif // defined(PLATFORM_UNIX) -#endif // defined(_TARGET_ARM_) +# endif // defined(TARGET_UNIX) +#endif // defined(TARGET_ARM) } /* --------------------------------------------------------------------------- * @@ -361,7 +361,7 @@ Zapper::Zapper(NGenOptions *pOptions, bool fromDllHost) // zo->m_ignoreProfileData = true; // ignore any IBC profile data -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // // On ARM, we retry compilation for large images when we hit overflow of IMAGE_REL_BASED_THUMB_BRANCH24 relocations. // Disable procedure spliting for retry because of it depends on IMAGE_REL_BASED_THUMB_BRANCH24 relocations. @@ -393,7 +393,7 @@ void Zapper::Init(ZapperOptions *pOptions, bool fFreeZapperOptions) m_pJitCompiler = NULL; m_pMetaDataDispenser = NULL; m_hJitLib = NULL; -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 m_hJitLegacy = NULL; #endif @@ -662,7 +662,7 @@ Zapper::~Zapper() if (m_pEECompileInfo != NULL) { #ifndef FEATURE_MERGE_JIT_AND_ENGINE -#ifdef _TARGET_AMD64_ +#ifdef TARGET_AMD64 if (m_hJitLegacy != NULL) { _ASSERTE(m_hJitLib != NULL); @@ -740,7 +740,7 @@ void Zapper::CleanupAssembly() //********************************************************************** // To be used with GetSpecificCpuInfo() -#ifdef _TARGET_X86_ +#ifdef TARGET_X86 #define CPU_X86_FAMILY(cpuType) (((cpuType) & 0x0F00) >> 8) #define CPU_X86_MODEL(cpuType) (((cpuType) & 0x00F0) >> 4) @@ -1165,7 +1165,7 @@ void Zapper::InitializeCompilerFlags(CORCOMPILE_VERSION_INFO * pVersionInfo) m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_MIN_OPT); } -#if defined(_TARGET_X86_) +#if defined(TARGET_X86) // @TODO: This is a copy of SetCpuInfo() in vm\codeman.cpp. Unify the implementaion @@ -1188,9 +1188,9 @@ void Zapper::InitializeCompilerFlags(CORCOMPILE_VERSION_INFO * pVersionInfo) // .NET Core requires SSE2. m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE2); -#endif // _TARGET_X86_ +#endif // TARGET_X86 -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) // If we're crossgenning CoreLib, allow generating non-VEX intrinsics. The generated code might // not actually be supported by the processor at runtime so we compensate for it by // not letting the get_IsSupported method to be intrinsically expanded in crossgen @@ -1202,7 +1202,7 @@ void Zapper::InitializeCompilerFlags(CORCOMPILE_VERSION_INFO * pVersionInfo) { m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD); -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AES); m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_PCLMULQDQ); m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE3); @@ -1215,9 +1215,9 @@ void Zapper::InitializeCompilerFlags(CORCOMPILE_VERSION_INFO * pVersionInfo) // CORJIT_FLAGS::CORJIT_FLAG_USE_BMI2 on purpose - these require VEX encodings // and the JIT doesn't support generating code for methods with mixed encodings. m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_LZCNT); -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) } -#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) +#endif // defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) if ( m_pOpt->m_compilerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_INFO) && m_pOpt->m_compilerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE) @@ -1288,7 +1288,7 @@ void Zapper::DefineOutputAssembly(SString& strAssemblyName, ULONG * pHashAlgId) ThrowHR(HRESULT_FROM_WIN32(ERROR_INVALID_NAME)); } -#ifndef PLATFORM_UNIX +#ifndef TARGET_UNIX // // We always need a hash since our assembly module is separate from the manifest. // Use MD5 by default. diff --git a/src/coreclr/src/zap/zaprelocs.cpp b/src/coreclr/src/zap/zaprelocs.cpp index 357aebf2d8a89..e9207ab072a11 100644 --- a/src/coreclr/src/zap/zaprelocs.cpp +++ b/src/coreclr/src/zap/zaprelocs.cpp @@ -44,7 +44,7 @@ void ZapBaseRelocs::WriteReloc(PVOID pSrc, int offset, ZapNode * pTarget, int ta return; case IMAGE_REL_BASED_PTR: -#ifdef _TARGET_ARM_ +#ifdef TARGET_ARM // Misaligned relocs disable ASLR on ARM. We should never ever emit them. _ASSERTE(IS_ALIGNED(rva, TARGET_POINTER_SIZE)); #endif @@ -67,7 +67,7 @@ void ZapBaseRelocs::WriteReloc(PVOID pSrc, int offset, ZapNode * pTarget, int ta // IMAGE_REL_BASED_RELPTR32 does not need base reloc entry return; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) case IMAGE_REL_BASED_REL32: { TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva; @@ -75,9 +75,9 @@ void ZapBaseRelocs::WriteReloc(PVOID pSrc, int offset, ZapNode * pTarget, int ta } // IMAGE_REL_BASED_REL32 does not need base reloc entry return; -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) case IMAGE_REL_BASED_THUMB_MOV32: { PutThumb2Mov32((UINT16 *)pLocation, (UINT32)pActualTarget); @@ -122,8 +122,8 @@ void ZapBaseRelocs::WriteReloc(PVOID pSrc, int offset, ZapNode * pTarget, int ta } // IMAGE_REL_BASED_THUMB_BRANCH24 does not need base reloc entry return; -#endif // defined(_TARGET_ARM_) -#if defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM) +#if defined(TARGET_ARM64) case IMAGE_REL_ARM64_BRANCH26: { TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva; @@ -290,13 +290,13 @@ void ZapBlobWithRelocs::Save(ZapWriter * pZapWriter) targetOffset = (int)*(UNALIGNED INT32 *)pLocation; break; -#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) +#if defined(TARGET_X86) || defined(TARGET_AMD64) case IMAGE_REL_BASED_REL32: targetOffset = *(UNALIGNED INT32 *)pLocation; break; -#endif // _TARGET_X86_ || _TARGET_AMD64_ +#endif // TARGET_X86 || TARGET_AMD64 -#if defined(_TARGET_ARM_) +#if defined(TARGET_ARM) case IMAGE_REL_BASED_THUMB_MOV32: case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL: targetOffset = (int)GetThumb2Mov32((UINT16 *)pLocation); @@ -305,9 +305,9 @@ void ZapBlobWithRelocs::Save(ZapWriter * pZapWriter) case IMAGE_REL_BASED_THUMB_BRANCH24: targetOffset = GetThumb2BlRel24((UINT16 *)pLocation); break; -#endif // defined(_TARGET_ARM_) +#endif // defined(TARGET_ARM) -#if defined(_TARGET_ARM64_) +#if defined(TARGET_ARM64) case IMAGE_REL_ARM64_BRANCH26: targetOffset = (int)GetArm64Rel28((UINT32*)pLocation); break; @@ -320,7 +320,7 @@ void ZapBlobWithRelocs::Save(ZapWriter * pZapWriter) targetOffset = (int)GetArm64Rel12((UINT32*)pLocation); break; -#endif // defined(_TARGET_ARM64_) +#endif // defined(TARGET_ARM64) default: _ASSERTE(!"Unknown reloc type"); diff --git a/src/coreclr/src/zap/zaprelocs.h b/src/coreclr/src/zap/zaprelocs.h index a288122fbabcb..25f46a9fe4938 100644 --- a/src/coreclr/src/zap/zaprelocs.h +++ b/src/coreclr/src/zap/zaprelocs.h @@ -26,7 +26,7 @@ typedef BYTE ZapRelocationType; // IMAGE_REL_XXX enum #define IMAGE_REL_INVALID 0xFF // IMAGE_REL_BASED_PTR is architecture specific reloc of virtual address -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT #define IMAGE_REL_BASED_PTR IMAGE_REL_BASED_DIR64 #else #define IMAGE_REL_BASED_PTR IMAGE_REL_BASED_HIGHLOW diff --git a/src/coreclr/src/zap/zapwriter.cpp b/src/coreclr/src/zap/zapwriter.cpp index 122cda7ac2fe9..6d03176b6b0e5 100644 --- a/src/coreclr/src/zap/zapwriter.cpp +++ b/src/coreclr/src/zap/zapwriter.cpp @@ -55,7 +55,7 @@ void ZapWriter::Initialize() m_FileAlignment = 0x200; } -#if defined(FEATURE_PAL) && defined(_TARGET_64BIT_) +#if defined(TARGET_UNIX) && defined(TARGET_64BIT) #define SECTION_ALIGNMENT m_FileAlignment #define PAL_MAX_PAGE_SIZE 0x10000 #else diff --git a/src/coreclr/src/zap/zapwriter.h b/src/coreclr/src/zap/zapwriter.h index addd013637775..10542476bf239 100644 --- a/src/coreclr/src/zap/zapwriter.h +++ b/src/coreclr/src/zap/zapwriter.h @@ -319,11 +319,11 @@ class ZapWriter : public IStream BOOL Is64Bit() { -#ifdef _TARGET_64BIT_ +#ifdef TARGET_64BIT return TRUE; -#else // !_TARGET_64BIT_ +#else // !TARGET_64BIT return FALSE; -#endif // !_TARGET_64BIT_ +#endif // !TARGET_64BIT } USHORT GetMachine() diff --git a/src/coreclr/tests/src/Common/Platform/platformdefines.cpp b/src/coreclr/tests/src/Common/Platform/platformdefines.cpp index 2e81d5f3ee5bd..bc251276ebd06 100644 --- a/src/coreclr/tests/src/Common/Platform/platformdefines.cpp +++ b/src/coreclr/tests/src/Common/Platform/platformdefines.cpp @@ -460,7 +460,7 @@ BSTR CoreClrBStrAlloc(LPCWSTR psz, size_t len) if(bstr != NULL){ -#if defined(BIT64) +#if defined(HOST_64BIT) // NOTE: There are some apps which peek back 4 bytes to look at the size of the BSTR. So, in case of 64-bit code, // we need to ensure that the BSTR length can be found by looking one DWORD before the BSTR pointer. *(DWORD_PTR *)bstr = (DWORD_PTR) 0; @@ -494,7 +494,7 @@ BSTR CoreClrBStrAlloc(LPCSTR psz, size_t len) bstr = (BSTR)CoreClrAlloc(cbTotal); if (bstr != NULL) { -#if defined(BIT64) +#if defined(HOST_64BIT) *(DWORD *)((char *)bstr + sizeof (DWORD)) = (DWORD)len; #else *(DWORD *)bstr = (DWORD)len; diff --git a/src/coreclr/tests/src/Common/Platform/platformdefines.h b/src/coreclr/tests/src/Common/Platform/platformdefines.h index 8c0c35423dc71..0e2a681341c37 100644 --- a/src/coreclr/tests/src/Common/Platform/platformdefines.h +++ b/src/coreclr/tests/src/Common/Platform/platformdefines.h @@ -76,11 +76,11 @@ typedef unsigned int ULONG, *PULONG; #endif // RC_INVOKED #define E_INVALIDARG _HRESULT_TYPEDEF_(0x80070057L) -#ifdef BIT64 +#ifdef HOST_64BIT #define __int64 long -#else // BIT64 +#else // HOST_64BIT #define __int64 long long -#endif // BIT64 +#endif // HOST_64BIT #define UInt32x32To64(a, b) ((unsigned __int64)((ULONG)(a)) * (unsigned __int64)((ULONG)(b))) diff --git a/src/coreclr/tests/src/Interop/IJW/NativeVarargs/IjwNativeVarargs.cpp b/src/coreclr/tests/src/Interop/IJW/NativeVarargs/IjwNativeVarargs.cpp index da7e42860a2bc..e6c8d22baacd2 100644 --- a/src/coreclr/tests/src/Interop/IJW/NativeVarargs/IjwNativeVarargs.cpp +++ b/src/coreclr/tests/src/Interop/IJW/NativeVarargs/IjwNativeVarargs.cpp @@ -123,7 +123,7 @@ public ref class TestClass { failedTests->Add(TestCases::SumHFAs); } -#if BIT64 +#if HOST_64BIT if (!RunDoublesInIntegerRegistersTest()) { failedTests->Add(TestCases::DoublesInIntegerRegisters); diff --git a/src/coreclr/tests/src/Interop/IJW/ijwhostmock/mscoree.cpp b/src/coreclr/tests/src/Interop/IJW/ijwhostmock/mscoree.cpp index 7c398cb1a3e30..a884b23c55b32 100644 --- a/src/coreclr/tests/src/Interop/IJW/ijwhostmock/mscoree.cpp +++ b/src/coreclr/tests/src/Interop/IJW/ijwhostmock/mscoree.cpp @@ -8,7 +8,7 @@ std::set g_modulesQueried = {}; -#if defined _X86_ +#if defined HOST_X86 // We need to use a double-underscore here because the VC linker drops the first underscore // to help people who are exporting cdecl functions to easily export the right thing. #pragma comment(linker, "/export:__CorDllMain=__CorDllMain@12") diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/CMakeLists.txt b/src/coreclr/tests/src/Interop/PInvoke/Generics/CMakeLists.txt index a667affd92175..1aceaf5d91bfd 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/CMakeLists.txt +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/CMakeLists.txt @@ -1,17 +1,17 @@ project (GenericsNative) include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake") if(CLR_CMAKE_TARGET_ARCH_I386) - add_definitions(-D_TARGET_X86_) - add_definitions(-D_TARGET_XARCH_) + add_definitions(-DTARGET_X86) + add_definitions(-DTARGET_XARCH) elseif(CLR_CMAKE_TARGET_ARCH_AMD64) - add_definitions(-D_TARGET_AMD64_) - add_definitions(-D_TARGET_XARCH_) + add_definitions(-DTARGET_AMD64) + add_definitions(-DTARGET_XARCH) elseif(CLR_CMAKE_TARGET_ARCH_ARM) - add_definitions(-D_TARGET_ARM_) - add_definitions(-D_TARGET_ARMARCH_) + add_definitions(-DTARGET_ARM) + add_definitions(-DTARGET_ARMARCH) elseif(CLR_CMAKE_TARGET_ARCH_ARM64) - add_definitions(-D_TARGET_ARM64_) - add_definitions(-D_TARGET_ARMARCH_) + add_definitions(-DTARGET_ARM64) + add_definitions(-DTARGET_ARMARCH) endif() set(SOURCES GenericsNative.IUnknown.cpp diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128B.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128B.cpp index 0bbcb419a9e4c..69fe6e455fbf3 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128B.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128B.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m128i Vector128B; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -79,7 +79,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector128BOut(bool e00, bool e01 { Vector128B value = GetVector128B(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_si128(pValue, value); #else *pValue = value; diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128C.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128C.cpp index 99af0b34dfba5..55d042ed85ece 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128C.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128C.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m128i Vector128C; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -63,7 +63,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector128COut(char16_t e00, char { Vector128C value = GetVector128C(e00, e01, e02, e03, e04, e05, e06, e07); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_si128(pValue, value); #else *pValue = value; diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128D.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128D.cpp index 5a3f354b58184..88eae0c61d4c1 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128D.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128D.cpp @@ -7,20 +7,20 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m128d Vector128D; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include typedef __n128 float64x2_t; #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -53,7 +53,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector128DOut(double e00, double { Vector128D value = GetVector128D(e00, e01); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_pd((double*)pValue, value); #else *pValue = value; diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128F.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128F.cpp index 7242c9aecda30..bba2abd3afcfe 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128F.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128F.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m128 Vector128F; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -55,7 +55,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector128FOut(float e00, float e { Vector128F value = GetVector128F(e00, e01, e02, e03); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_ps((float*)pValue, value); #else *pValue = value; diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128L.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128L.cpp index 680f2d65ea69e..06a46ac75af69 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128L.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128L.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m128i Vector128L; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -51,7 +51,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector128LOut(int64_t e00, int64 { Vector128L value = GetVector128L(e00, e01); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_si128(pValue, value); #else *pValue = value; diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128U.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128U.cpp index d63b9e10ec957..0fb0b63cdc4c5 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128U.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector128U.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m128i Vector128U; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -55,7 +55,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector128UOut(uint32_t e00, uint { Vector128U value = GetVector128U(e00, e01, e02, e03); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_si128(pValue, value); #else *pValue = value; diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256B.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256B.cpp index 8d276d886a8b8..bf518e5ce663e 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256B.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256B.cpp @@ -7,11 +7,11 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m256i Vector256B; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) typedef struct { bool e00; bool e01; @@ -99,7 +99,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector256BOut(bool e00, bool e01 { Vector256B value = GetVector256B(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_si128(((__m128i*)pValue) + 0, *(((__m128i*)&value) + 0)); _mm_storeu_si128(((__m128i*)pValue) + 1, *(((__m128i*)&value) + 1)); #else diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256C.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256C.cpp index 1dbb72e8022d3..f2eccc20ea0b1 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256C.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256C.cpp @@ -7,11 +7,11 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m256i Vector256C; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) typedef struct { char16_t e00; char16_t e01; @@ -67,7 +67,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector256COut(char16_t e00, char { Vector256C value = GetVector256C(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_si128(((__m128i*)pValue) + 0, *(((__m128i*)&value) + 0)); _mm_storeu_si128(((__m128i*)pValue) + 1, *(((__m128i*)&value) + 1)); #else diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256D.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256D.cpp index ccca259f1b622..85abb3d683d3d 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256D.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256D.cpp @@ -7,11 +7,11 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m256d Vector256D; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) typedef struct { double e00; double e01; @@ -43,7 +43,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector256DOut(double e00, double { Vector256D value = GetVector256D(e00, e01, e02, e03); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_pd((double*)(((__m128d*)pValue) + 0), *(((__m128d*)&value) + 0)); _mm_storeu_pd((double*)(((__m128d*)pValue) + 1), *(((__m128d*)&value) + 1)); #else diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256F.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256F.cpp index 5c151b09c3493..974f32b18f0b3 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256F.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256F.cpp @@ -7,11 +7,11 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m256 Vector256F; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) typedef struct { float e00; float e01; @@ -51,7 +51,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector256FOut(float e00, float e { Vector256F value = GetVector256F(e00, e01, e02, e03, e04, e05, e06, e07); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_ps((float*)(((__m128*)pValue) + 0), *(((__m128*)&value) + 0)); _mm_storeu_ps((float*)(((__m128*)pValue) + 1), *(((__m128*)&value) + 1)); #else diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256L.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256L.cpp index a17c960041697..645c8b7195f4e 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256L.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256L.cpp @@ -7,11 +7,11 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m256i Vector256L; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) typedef struct { int64_t e00; int64_t e01; @@ -43,7 +43,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector256LOut(int64_t e00, int64 { Vector256L value = GetVector256L(e00, e01, e02, e03); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_si128(((__m128i*)pValue) + 0, *(((__m128i*)&value) + 0)); _mm_storeu_si128(((__m128i*)pValue) + 1, *(((__m128i*)&value) + 1)); #else diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256U.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256U.cpp index c22f3d99c0d8a..0f8664edfc87d 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256U.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector256U.cpp @@ -7,11 +7,11 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m256i Vector256U; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) typedef struct { uint32_t e00; uint32_t e01; @@ -51,7 +51,7 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector256UOut(uint32_t e00, uint { Vector256U value = GetVector256U(e00, e01, e02, e03, e04, e05, e06, e07); -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) _mm_storeu_si128(((__m128i*)pValue) + 0, *(((__m128i*)&value) + 0)); _mm_storeu_si128(((__m128i*)pValue) + 1, *(((__m128i*)&value) + 1)); #else diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64B.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64B.cpp index 98717b7122fd5..6a68817674577 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64B.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64B.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m64 Vector64B; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -63,9 +63,9 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector64BOut(bool e00, bool e01, { *pValue = GetVector64B(e00, e01, e02, e03, e04, e05, e06, e07); -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) _mm_empty(); -#endif // _MSC_VER && _TARGET_X86_ +#endif // _MSC_VER && TARGET_X86 } extern "C" DLL_EXPORT const Vector64B* STDMETHODCALLTYPE GetVector64BPtr(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07) diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64C.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64C.cpp index f95c04eba24e7..37e6caa14a8ae 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64C.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64C.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m64 Vector64C; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -55,9 +55,9 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector64COut(char16_t e00, char1 { *pValue = GetVector64C(e00, e01, e02, e03); -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) _mm_empty(); -#endif // _MSC_VER && _TARGET_X86_ +#endif // _MSC_VER && TARGET_X86 } extern "C" DLL_EXPORT const Vector64C* STDMETHODCALLTYPE GetVector64CPtr(char16_t e00, char16_t e01, char16_t e02, char16_t e03) diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64D.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64D.cpp index 44f345aaa4775..7f7a30b65aae8 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64D.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64D.cpp @@ -7,20 +7,20 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m64 Vector64D; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif typedef __n64 float64x1_t; - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -51,9 +51,9 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector64DOut(double e00, Vector6 { *pValue = GetVector64D(e00); -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) _mm_empty(); -#endif // _MSC_VER && _TARGET_X86_ +#endif // _MSC_VER && TARGET_X86 } extern "C" DLL_EXPORT const Vector64D* STDMETHODCALLTYPE GetVector64DPtr(double e00) diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64F.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64F.cpp index 22ba27ea24b07..27b8ececcca3b 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64F.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64F.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m64 Vector64F; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -51,9 +51,9 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector64FOut(float e00, float e0 { *pValue = GetVector64F(e00, e01); -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) _mm_empty(); -#endif // _MSC_VER && _TARGET_X86_ +#endif // _MSC_VER && TARGET_X86 } extern "C" DLL_EXPORT const Vector64F* STDMETHODCALLTYPE GetVector64FPtr(float e00, float e01) diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64L.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64L.cpp index 45988fedd743e..f36d8b08d8556 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64L.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64L.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m64 Vector64L; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -49,9 +49,9 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector64LOut(int64_t e00, Vector { *pValue = GetVector64L(e00); -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) _mm_empty(); -#endif // _MSC_VER && _TARGET_X86_ +#endif // _MSC_VER && TARGET_X86 } extern "C" DLL_EXPORT const Vector64L* STDMETHODCALLTYPE GetVector64LPtr(int64_t e00) diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64U.cpp b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64U.cpp index 68019710f4446..de2b5201e9895 100644 --- a/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64U.cpp +++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/GenericsNative.Vector64U.cpp @@ -7,18 +7,18 @@ #include #include -#if defined(_TARGET_XARCH_) +#if defined(TARGET_XARCH) #include typedef __m64 Vector64U; -#elif defined(_TARGET_ARMARCH_) +#elif defined(TARGET_ARMARCH) #if defined(_MSC_VER) - #if defined(_TARGET_ARM64_) + #if defined(TARGET_ARM64) #include #else #include #endif - #elif defined(_TARGET_ARM64_) + #elif defined(TARGET_ARM64) #include #else typedef struct { @@ -51,9 +51,9 @@ extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVector64UOut(uint32_t e00, uint3 { *pValue = GetVector64U(e00, e01); -#if defined(_MSC_VER) && defined(_TARGET_X86_) +#if defined(_MSC_VER) && defined(TARGET_X86) _mm_empty(); -#endif // _MSC_VER && _TARGET_X86_ +#endif // _MSC_VER && TARGET_X86 } extern "C" DLL_EXPORT const Vector64U* STDMETHODCALLTYPE GetVector64UPtr(uint32_t e00, uint32_t e01) diff --git a/src/coreclr/tests/src/Interop/StructMarshalling/PInvoke/MarshalStructAsParamDLL.h b/src/coreclr/tests/src/Interop/StructMarshalling/PInvoke/MarshalStructAsParamDLL.h index abf47cfae45bc..a300af22d18f1 100644 --- a/src/coreclr/tests/src/Interop/StructMarshalling/PInvoke/MarshalStructAsParamDLL.h +++ b/src/coreclr/tests/src/Interop/StructMarshalling/PInvoke/MarshalStructAsParamDLL.h @@ -174,7 +174,7 @@ union InnerArrayExplicit #ifdef WINDOWS -#ifdef BIT64 +#ifdef HOST_64BIT #pragma warning(push) #pragma warning(disable: 4201) // nonstandard extension used: nameless struct/union union OUTER3 diff --git a/src/coreclr/tests/src/Interop/StructMarshalling/ReversePInvoke/MarshalExpStruct/ExpStructAsParamNative.h b/src/coreclr/tests/src/Interop/StructMarshalling/ReversePInvoke/MarshalExpStruct/ExpStructAsParamNative.h index 567687fe88a0a..026656771e3a0 100644 --- a/src/coreclr/tests/src/Interop/StructMarshalling/ReversePInvoke/MarshalExpStruct/ExpStructAsParamNative.h +++ b/src/coreclr/tests/src/Interop/StructMarshalling/ReversePInvoke/MarshalExpStruct/ExpStructAsParamNative.h @@ -195,7 +195,7 @@ union InnerArrayExplicit // size = 32 bytes }; -#ifdef BIT64 +#ifdef HOST_64BIT union OUTER3 // size = 32 bytes { struct InnerSequential arr[2]; diff --git a/src/coreclr/tests/src/JIT/Directed/StructABI/StructABI.c b/src/coreclr/tests/src/JIT/Directed/StructABI/StructABI.c index f56eaf2a6af20..959ca9afda52d 100644 --- a/src/coreclr/tests/src/JIT/Directed/StructABI/StructABI.c +++ b/src/coreclr/tests/src/JIT/Directed/StructABI/StructABI.c @@ -7,11 +7,11 @@ #define DLLEXPORT __declspec(dllexport) #else #define DLLEXPORT __attribute__((visibility("default"))) -#ifdef BIT64 +#ifdef HOST_64BIT #define __int64 long -#else // BIT64 +#else // HOST_64BIT #define __int64 long long -#endif // BIT64 +#endif // HOST_64BIT #define __int32 int #define __int16 short int diff --git a/src/coreclr/tests/src/JIT/Directed/arglist/varargnative.c b/src/coreclr/tests/src/JIT/Directed/arglist/varargnative.c index 54a778f38b579..21ecf866f3af8 100644 --- a/src/coreclr/tests/src/JIT/Directed/arglist/varargnative.c +++ b/src/coreclr/tests/src/JIT/Directed/arglist/varargnative.c @@ -22,11 +22,11 @@ #define __int16 short int #define __int8 char // assumes char is signed -#ifdef BIT64 +#ifdef HOST_64BIT #define __int64 long -#else // BIT64 +#else // HOST_64BIT #define __int64 long long -#endif // BIT64 +#endif // HOST_64BIT #endif // !_MSC_VER diff --git a/src/coreclr/tests/src/JIT/Directed/pinning/object-pin/mirror.cpp b/src/coreclr/tests/src/JIT/Directed/pinning/object-pin/mirror.cpp index 268fb265231be..427166c1e14c8 100644 --- a/src/coreclr/tests/src/JIT/Directed/pinning/object-pin/mirror.cpp +++ b/src/coreclr/tests/src/JIT/Directed/pinning/object-pin/mirror.cpp @@ -3,11 +3,11 @@ #else #define EXPORT_API extern "C" __attribute__((visibility("default"))) -#ifdef BIT64 +#ifdef HOST_64BIT #define __int64 long -#else // BIT64 +#else // HOST_64BIT #define __int64 long long -#endif // BIT64 +#endif // HOST_64BIT #define __int32 int #define __int16 short int diff --git a/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvokeexamplenative.cpp b/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvokeexamplenative.cpp index a434a33ab786f..d2245ad27d689 100644 --- a/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvokeexamplenative.cpp +++ b/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvokeexamplenative.cpp @@ -13,11 +13,11 @@ #else #define EXPORT_API extern "C" __attribute__((visibility("default"))) -#ifdef BIT64 +#ifdef HOST_64BIT #define __int64 long -#else // BIT64 +#else // HOST_64BIT #define __int64 long long -#endif // BIT64 +#endif // HOST_64BIT #define __int32 int #define __int16 short int diff --git a/src/coreclr/tests/src/JIT/jit64/hfa/main/dll/hfa_native.cpp b/src/coreclr/tests/src/JIT/jit64/hfa/main/dll/hfa_native.cpp index 740790aa2e95e..1309ebd67ea2f 100644 --- a/src/coreclr/tests/src/JIT/jit64/hfa/main/dll/hfa_native.cpp +++ b/src/coreclr/tests/src/JIT/jit64/hfa/main/dll/hfa_native.cpp @@ -4,11 +4,11 @@ #include "hfa_native.h" #ifndef _MSC_VER -#ifdef BIT64 +#ifdef HOST_64BIT #define __int64 long -#else // BIT64 +#else // HOST_64BIT #define __int64 long long -#endif // BIT64 +#endif // HOST_64BIT #define __int32 int #define __int16 short int diff --git a/src/coreclr/tests/src/JIT/jit64/hfa/main/dll/hfa_native.h b/src/coreclr/tests/src/JIT/jit64/hfa/main/dll/hfa_native.h index 1298225b2393b..f1fff2289ffae 100644 --- a/src/coreclr/tests/src/JIT/jit64/hfa/main/dll/hfa_native.h +++ b/src/coreclr/tests/src/JIT/jit64/hfa/main/dll/hfa_native.h @@ -8,11 +8,11 @@ #define __stdcall #endif // !__i386__ -#ifdef BIT64 +#ifdef HOST_64BIT #define __int64 long -#else // BIT64 +#else // HOST_64BIT #define __int64 long long -#endif // BIT64 +#endif // HOST_64BIT #define __int32 int #define __int16 short int diff --git a/src/coreclr/tests/src/JIT/jit64/mcc/interop/native.h b/src/coreclr/tests/src/JIT/jit64/mcc/interop/native.h index cd07950e9db5a..2c4cc4c4823e3 100644 --- a/src/coreclr/tests/src/JIT/jit64/mcc/interop/native.h +++ b/src/coreclr/tests/src/JIT/jit64/mcc/interop/native.h @@ -9,11 +9,11 @@ #else #define MCC_API extern "C" __attribute__((visibility("default"))) #define WINAPI -#ifdef BIT64 +#ifdef HOST_64BIT #define __int64 long -#else // BIT64 +#else // HOST_64BIT #define __int64 long long -#endif // BIT64 +#endif // HOST_64BIT #define __int32 int #define __int16 short int