diff --git a/stl/inc/xcharconv_ryu.h b/stl/inc/xcharconv_ryu.h index 0e97cb1231..d2f8db8dd6 100644 --- a/stl/inc/xcharconv_ryu.h +++ b/stl/inc/xcharconv_ryu.h @@ -193,28 +193,19 @@ _NODISCARD __forceinline uint64_t __ryu_umul128(const uint64_t __a, const uint64 #endif // ^^^ intrinsics unavailable ^^^ _NODISCARD inline uint64_t __ryu_shiftright128(const uint64_t __lo, const uint64_t __hi, const uint32_t __dist) { -#if defined(_M_X64) && !defined(_M_ARM64EC) - // For the __shiftright128 intrinsic, the shift value is always - // modulo 64. - // In the current implementation of the double-precision version - // of Ryu, the shift value is always < 64. - // (The shift value is in the range [49, 58].) - // Check this here in case a future change requires larger shift - // values. In this case this function needs to be adjusted. + // In the current implementation, the shift value is always < 64. + // If larger shift values are ever required, this function will need to be adjusted. _STL_INTERNAL_CHECK(__dist < 64); + +#if defined(_M_X64) && !defined(_M_ARM64EC) return __shiftright128(__lo, __hi, static_cast(__dist)); -#else // ^^^ defined(_M_X64) && !defined(_M_ARM64EC) / !defined(_M_X64) || defined(_M_ARM64EC) vvv - // We don't need to handle the case __dist >= 64 here (see above). - _STL_INTERNAL_CHECK(__dist < 64); -#if defined(_WIN64) || defined(_M_HYBRID_X86_ARM64) - _STL_INTERNAL_CHECK(__dist > 0); +#else // ^^^ __shiftright128 intrinsic available / __shiftright128 intrinsic unavailable vvv + if (__dist == 0) { + return __lo; + } + return (__hi << (64 - __dist)) | (__lo >> __dist); -#else // ^^^ 64-bit or _M_HYBRID_X86_ARM64 / 32-bit vvv - // Avoid a 64-bit shift by taking advantage of the range of shift values. - _STL_INTERNAL_CHECK(__dist >= 32); - return (__hi << (64 - __dist)) | (static_cast(__lo >> 32) >> (__dist - 32)); -#endif // ^^^ 32-bit ^^^ -#endif // defined(_M_X64) && !defined(_M_ARM64EC) +#endif // ^^^ __shiftright128 intrinsic unavailable ^^^ }