diff --git a/engine/l_studio.cpp b/engine/l_studio.cpp index 9855e1a00..b49077d6a 100644 --- a/engine/l_studio.cpp +++ b/engine/l_studio.cpp @@ -40,7 +40,7 @@ #include "materialsystem/materialsystem_config.h" #include "materialsystem/itexture.h" #include "IHammer.h" -#if defined( _WIN32 ) && !defined( _X360 ) && !defined(_M_ARM) +#if defined( _WIN32 ) && defined( _M_IX86 ) #include #endif #include "staticpropmgr.h" diff --git a/public/materialsystem/imesh.h b/public/materialsystem/imesh.h index 19cd83e1b..a6d5e92bc 100644 --- a/public/materialsystem/imesh.h +++ b/public/materialsystem/imesh.h @@ -1220,7 +1220,7 @@ inline void CVertexBuilder::FastVertexSSE( const ModelVertexDX7_t &vertex ) Assert( m_CompressionType == VERTEX_COMPRESSION_NONE ); // FIXME: support compressed verts if needed Assert( m_nCurrentVertex < m_nMaxVertexCount ); -#if defined( _WIN32 ) && !defined( _X360 ) && defined( _M_IX86 ) +#if defined( _WIN32 ) && defined( _M_IX86 ) const void *pRead = &vertex; void *pCurrPos = m_pCurrPosition; __asm @@ -1265,7 +1265,7 @@ inline void CVertexBuilder::Fast4VerticesSSE( Assert( m_CompressionType == VERTEX_COMPRESSION_NONE ); // FIXME: support compressed verts if needed Assert( m_nCurrentVertex < m_nMaxVertexCount-3 ); -#if defined( _WIN32 ) && !defined( _X360 ) && defined( _M_IX86 ) +#if defined( _WIN32 ) && defined( _M_IX86 ) void *pCurrPos = m_pCurrPosition; __asm { @@ -1426,7 +1426,7 @@ inline void CVertexBuilder::FastVertexSSE( const ModelVertexDX8_t &vertex ) Assert( m_CompressionType == VERTEX_COMPRESSION_NONE ); // FIXME: support compressed verts if needed Assert( m_nCurrentVertex < m_nMaxVertexCount ); -#if defined( _WIN32 ) && !defined( _X360 ) && defined( _M_IX86 ) +#if defined( _WIN32 ) && defined( _M_IX86 ) const void *pRead = &vertex; void *pCurrPos = m_pCurrPosition; __asm diff --git a/public/mathlib/mathlib.h b/public/mathlib/mathlib.h index 4be9e8c9c..aeb4aedef 100644 --- a/public/mathlib/mathlib.h +++ b/public/mathlib/mathlib.h @@ -1188,7 +1188,7 @@ inline float SimpleSplineRemapValClamped( float val, float A, float B, float C, FORCEINLINE int RoundFloatToInt(float f) { -#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) +#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(PLATFORM_WINDOWS_PC64) return _mm_cvtss_si32(_mm_load_ss(&f)); #elif defined( _X360 ) #ifdef Assert diff --git a/public/mathlib/ssemath.h b/public/mathlib/ssemath.h index 8b0c11581..91525ba67 100644 --- a/public/mathlib/ssemath.h +++ b/public/mathlib/ssemath.h @@ -1834,7 +1834,7 @@ FORCEINLINE fltx4 ReplicateX4( float flValue ) FORCEINLINE float SubFloat( const fltx4 & a, int idx ) { // NOTE: if the output goes into a register, this causes a Load-Hit-Store stall (don't mix fpu/vpu math!) -#if defined(_WIN32) && defined(__i386__) || defined(__x86_64__) +#if defined(_WIN32) && (defined(__i386__) || defined(__x86_64__)) return a.m128_f32[ idx ]; #else return (reinterpret_cast(&a))[idx]; @@ -1843,7 +1843,7 @@ FORCEINLINE float SubFloat( const fltx4 & a, int idx ) FORCEINLINE float & SubFloat( fltx4 & a, int idx ) { -#if defined(_WIN32) && defined(__i386__) || defined(__x86_64__) +#if defined(_WIN32) && (defined(__i386__) || defined(__x86_64__)) return a.m128_f32[ idx ]; #else return (reinterpret_cast(&a))[idx]; @@ -1857,7 +1857,7 @@ FORCEINLINE uint32 SubFloatConvertToInt( const fltx4 & a, int idx ) FORCEINLINE uint32 SubInt( const fltx4 & a, int idx ) { -#if defined(_WIN32) && defined(__i386__) || defined(__x86_64__) +#if defined(_WIN32) && (defined(__i386__) || defined(__x86_64__)) return a.m128_u32[ idx ]; #else return (reinterpret_cast(&a))[idx]; @@ -1866,7 +1866,7 @@ FORCEINLINE uint32 SubInt( const fltx4 & a, int idx ) FORCEINLINE uint32 & SubInt( fltx4 & a, int idx ) { -#if defined(_WIN32) && defined(__i386__) || defined(__x86_64__) +#if defined(_WIN32) && (defined(__i386__) || defined(__x86_64__)) return a.m128_u32[ idx ]; #else return (reinterpret_cast(&a))[idx]; diff --git a/public/tier0/tslist.h b/public/tier0/tslist.h index 90aec6482..5e05639f0 100644 --- a/public/tier0/tslist.h +++ b/public/tier0/tslist.h @@ -37,7 +37,6 @@ #if defined( PLATFORM_64BITS ) && !defined(_M_ARM64) #if defined (PLATFORM_WINDOWS) -typedef __int128 int128; //typedef __m128i int128; //inline int128 int128_zero() { return _mm_setzero_si128(); } #else // PLATFORM_WINDOWS diff --git a/studiorender/r_studiodraw.cpp b/studiorender/r_studiodraw.cpp index c39b43263..9c6b1ecbc 100644 --- a/studiorender/r_studiodraw.cpp +++ b/studiorender/r_studiodraw.cpp @@ -657,7 +657,7 @@ static matrix3x4_t *ComputeSkinMatrix( mstudioboneweight_t &boneweights, matrix3 static matrix3x4_t *ComputeSkinMatrixSSE( mstudioboneweight_t &boneweights, matrix3x4_t *pPoseToWorld, matrix3x4_t &result ) { // NOTE: pPoseToWorld, being cache aligned, doesn't need explicit initialization -#if defined( _WIN32 ) && !defined( _X360 ) && defined(_M_IX86) +#if defined( _WIN32 ) && defined(_M_IX86) switch( boneweights.numbones ) { default: diff --git a/tier0/cpu.cpp b/tier0/cpu.cpp index 92467c0d2..10efb38d4 100644 --- a/tier0/cpu.cpp +++ b/tier0/cpu.cpp @@ -22,7 +22,7 @@ const tchar* GetProcessorVendorId(); static bool cpuid(uint32 function, uint32& out_eax, uint32& out_ebx, uint32& out_ecx, uint32& out_edx) { -#if !defined(__i386__) && !defined(__x86_64) || defined( _X360 ) +#if !defined(__i386__) && !defined(__x86_64) return false; #elif defined(GNUC) diff --git a/tier0/stacktools.cpp b/tier0/stacktools.cpp index c458135ed..f804c452a 100644 --- a/tier0/stacktools.cpp +++ b/tier0/stacktools.cpp @@ -175,7 +175,7 @@ int GetCallStack_Fast( void **pReturnAddressesOut, int iArrayCount, int iSkipCou { //Only tested in windows. This function won't work with frame pointer omission enabled. "vpc /nofpo" all projects #if (defined( TIER0_FPO_DISABLED ) || defined( _DEBUG )) &&\ - (defined( WIN32 ) && !defined( _X360 ) && defined(_M_X86)) + (defined( WIN32 ) && defined(_M_X86)) void *pStackCrawlEBP; __asm { @@ -1467,7 +1467,7 @@ CStackTop_CopyParentStack::CStackTop_CopyParentStack( void * const *pParentStack #if defined( ENABLE_RUNTIME_STACK_TRANSLATION ) //miniature version of GetCallStack_Fast() #if (defined( TIER0_FPO_DISABLED ) || defined( _DEBUG )) &&\ - (defined( WIN32 ) && !defined( _X360 ) && defined(_M_X86)) + (defined( WIN32 ) && defined(_M_X86)) void *pStackCrawlEBP; __asm { @@ -1525,7 +1525,7 @@ CStackTop_ReferenceParentStack::CStackTop_ReferenceParentStack( void * const *pP #if defined( ENABLE_RUNTIME_STACK_TRANSLATION ) //miniature version of GetCallStack_Fast() #if (defined( TIER0_FPO_DISABLED ) || defined( _DEBUG )) &&\ - (defined( WIN32 ) && !defined( _X360 ) && defined(_M_X86)) + (defined( WIN32 ) && defined(_M_X86)) void *pStackCrawlEBP; __asm {