From d6a9e9342627cb9fa83ba083a66221b28915ba3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Mon, 11 Jul 2022 22:38:03 +0200 Subject: [PATCH] deps: patch V8 to 10.2.154.13 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refs: https://github.com/v8/v8/compare/10.2.154.4...10.2.154.13 PR-URL: https://github.com/nodejs/node/pull/43727 Reviewed-By: Ben Noordhuis Reviewed-By: Jiawen Geng Reviewed-By: Tobias Nießen --- deps/v8/include/v8-version.h | 2 +- deps/v8/src/builtins/arm/builtins-arm.cc | 23 ++++-- deps/v8/src/builtins/arm64/builtins-arm64.cc | 81 +++++++++++-------- deps/v8/src/builtins/ia32/builtins-ia32.cc | 27 ++++--- .../src/builtins/loong64/builtins-loong64.cc | 73 +++++++++++------ deps/v8/src/builtins/mips/builtins-mips.cc | 68 +++++++++++----- .../v8/src/builtins/mips64/builtins-mips64.cc | 76 +++++++++++------ deps/v8/src/builtins/ppc/builtins-ppc.cc | 30 +++++-- .../src/builtins/riscv64/builtins-riscv64.cc | 67 ++++++++------- deps/v8/src/builtins/s390/builtins-s390.cc | 28 +++++-- deps/v8/src/builtins/x64/builtins-x64.cc | 23 ++++-- .../compiler/backend/instruction-selector.cc | 21 +---- .../compiler/backend/instruction-selector.h | 14 ++-- .../loong64/instruction-selector-loong64.cc | 2 +- .../mips64/instruction-selector-mips64.cc | 2 +- .../riscv64/instruction-selector-riscv64.cc | 2 +- .../backend/x64/instruction-selector-x64.cc | 2 +- deps/v8/src/compiler/pipeline.cc | 5 +- .../riscv64/frame-constants-riscv64.h | 5 +- deps/v8/src/heap/mark-compact.cc | 5 +- deps/v8/src/ic/ic.cc | 17 +--- deps/v8/src/objects/code-kind.h | 4 - deps/v8/src/objects/js-objects.cc | 49 +++++++++++ deps/v8/src/objects/js-objects.h | 5 ++ deps/v8/src/objects/js-weak-refs-inl.h | 21 +++-- deps/v8/src/objects/js-weak-refs.h | 8 +- deps/v8/src/objects/lookup.cc | 4 +- deps/v8/src/objects/objects.cc | 2 +- deps/v8/src/profiler/profile-generator.cc | 7 +- deps/v8/src/profiler/profile-generator.h | 2 +- deps/v8/src/runtime/runtime-object.cc | 43 +++++----- deps/v8/src/runtime/runtime-wasm.cc | 9 +-- deps/v8/src/runtime/runtime.h | 7 +- .../mips64/liftoff-assembler-mips64.h | 12 +-- deps/v8/src/wasm/wasm-linkage.h | 2 + deps/v8/test/cctest/test-js-weak-refs.cc | 51 +++++++++++- .../regress/regress-crbug-1321899-1.js | 21 +++++ .../regress/regress-crbug-1321899-2.js | 7 ++ .../regress/regress-crbug-1321899-3.js | 65 +++++++++++++++ .../regress/regress-crbug-1321899-4.js | 7 ++ .../regress/regress-crbug-1321899-5.js | 19 +++++ .../regress/regress-crbug-1321899-6.js | 7 ++ .../mjsunit/regress/regress-crbug-1321899.js | 63 +++++++++++++++ deps/v8/tools/whitespace.txt | 2 +- 44 files changed, 710 insertions(+), 280 deletions(-) create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1321899-1.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1321899-2.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1321899-3.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1321899-4.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1321899-5.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1321899-6.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1321899.js diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 37abe105b74e3c..7e7823f75e7b1f 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -11,7 +11,7 @@ #define V8_MAJOR_VERSION 10 #define V8_MINOR_VERSION 2 #define V8_BUILD_NUMBER 154 -#define V8_PATCH_LEVEL 4 +#define V8_PATCH_LEVEL 13 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index e3f78a86aa99d5..a4044380600b07 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -2609,8 +2609,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in a register by the jump table trampoline. // Convert to Smi for the runtime call. - __ SmiTag(kWasmCompileLazyFuncIndexRegister, - kWasmCompileLazyFuncIndexRegister); + __ SmiTag(kWasmCompileLazyFuncIndexRegister); { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2640,22 +2639,34 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ stm(db_w, sp, gp_regs); __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg); - // Pass instance and function index as explicit arguments to the runtime + // Push the Wasm instance for loading the jump table address after the + // runtime call. + __ push(kWasmInstanceRegister); + + // Push the Wasm instance again as an explicit argument to the runtime // function. __ push(kWasmInstanceRegister); + // Push the function index as second argument. __ push(kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ Move(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); - // The entrypoint address is the return value. - __ mov(r8, kReturnRegister0); + // The runtime function returns the jump table slot offset as a Smi. Use + // that to compute the jump target in r8. + __ pop(kWasmInstanceRegister); + __ ldr(r8, MemOperand( + kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); + __ add(r8, r8, Operand::SmiUntag(kReturnRegister0)); + // r8 now holds the jump table slot where we want to jump to in the end. // Restore registers. __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg); __ ldm(ia_w, sp, gp_regs); } - // Finally, jump to the entrypoint. + + // Finally, jump to the jump table slot for the function. __ Jump(r8); } diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 0cb649b15dddcc..009e9b03dff1ef 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -3018,41 +3018,50 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Sign extend and convert to Smi for the runtime call. __ sxtw(kWasmCompileLazyFuncIndexRegister, kWasmCompileLazyFuncIndexRegister.W()); - __ SmiTag(kWasmCompileLazyFuncIndexRegister, - kWasmCompileLazyFuncIndexRegister); - - UseScratchRegisterScope temps(masm); - { - HardAbortScope hard_abort(masm); // Avoid calls to Abort. - FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); - - // Save all parameter registers (see wasm-linkage.h). They might be - // overwritten in the runtime call below. We don't have any callee-saved - // registers in wasm, so no need to store anything else. - RegList gp_regs; + __ SmiTag(kWasmCompileLazyFuncIndexRegister); + + // Compute register lists for parameters to be saved. We save all parameter + // registers (see wasm-linkage.h). They might be overwritten in the runtime + // call below. We don't have any callee-saved registers in wasm, so no need to + // store anything else. + constexpr RegList kSavedGpRegs = ([]() constexpr { + RegList saved_gp_regs; for (Register gp_param_reg : wasm::kGpParamRegisters) { - gp_regs.set(gp_param_reg); + saved_gp_regs.set(gp_param_reg); } // Also push x1, because we must push multiples of 16 bytes (see // {TurboAssembler::PushCPURegList}. - CHECK_EQ(1, gp_regs.Count() % 2); - gp_regs.set(x1); - CHECK_EQ(0, gp_regs.Count() % 2); + saved_gp_regs.set(x1); + // All set registers were unique. + CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1); + // We push a multiple of 16 bytes. + CHECK_EQ(0, saved_gp_regs.Count() % 2); + // The Wasm instance must be part of the saved registers. + CHECK(saved_gp_regs.has(kWasmInstanceRegister)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, + saved_gp_regs.Count()); + return saved_gp_regs; + })(); - DoubleRegList fp_regs; + constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { + DoubleRegList saved_fp_regs; for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { - fp_regs.set(fp_param_reg); + saved_fp_regs.set(fp_param_reg); } - CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1); - CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); - CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, - gp_regs.Count()); + CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, - fp_regs.Count()); + saved_fp_regs.Count()); + return saved_fp_regs; + })(); - __ PushXRegList(gp_regs); - __ PushQRegList(fp_regs); + { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); + + // Save registers that we need to keep alive across the runtime call. + __ PushXRegList(kSavedGpRegs); + __ PushQRegList(kSavedFpRegs); // Pass instance and function index as explicit arguments to the runtime // function. @@ -3062,17 +3071,23 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ Mov(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); - // Exclude x17 from the scope, there are hardcoded uses of it below. - temps.Exclude(x17); - - // The entrypoint address is the return value. - __ Mov(x17, kReturnRegister0); + // Untag the returned Smi into into x17, for later use. + static_assert(!kSavedGpRegs.has(x17)); + __ SmiUntag(x17, kReturnRegister0); // Restore registers. - __ PopQRegList(fp_regs); - __ PopXRegList(gp_regs); + __ PopQRegList(kSavedFpRegs); + __ PopXRegList(kSavedGpRegs); } - // Finally, jump to the entrypoint. + + // The runtime function returned the jump table slot offset as a Smi (now in + // x17). Use that to compute the jump target. + static_assert(!kSavedGpRegs.has(x18)); + __ ldr(x18, MemOperand( + kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); + __ add(x17, x18, Operand(x17)); + // Finally, jump to the jump table slot for the function. __ Jump(x17); } diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 0a7b49d6ad71ca..bb250c43236a36 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -2878,20 +2878,28 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { offset += kSimd128Size; } - // Push the Wasm instance as an explicit argument to WasmCompileLazy. + // Push the Wasm instance for loading the jump table address after the + // runtime call. + __ Push(kWasmInstanceRegister); + + // Push the Wasm instance again as an explicit argument to the runtime + // function. __ Push(kWasmInstanceRegister); // Push the function index as second argument. __ Push(kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ Move(kContextRegister, Smi::zero()); - { - // At this point, ebx has been spilled to the stack but is not yet - // overwritten with another value. We can still use it as kRootRegister. - __ CallRuntime(Runtime::kWasmCompileLazy, 2); - } - // The entrypoint address is the return value. - __ mov(edi, kReturnRegister0); + __ CallRuntime(Runtime::kWasmCompileLazy, 2); + // The runtime function returns the jump table slot offset as a Smi. Use + // that to compute the jump target in edi. + __ Pop(kWasmInstanceRegister); + __ mov(edi, MemOperand(kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - + kHeapObjectTag)); + __ SmiUntag(kReturnRegister0); + __ add(edi, kReturnRegister0); + // edi now holds the jump table slot where we want to jump to in the end. // Restore registers. for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) { @@ -2904,7 +2912,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ Pop(reg); } } - // Finally, jump to the entrypoint. + + // Finally, jump to the jump table slot for the function. __ jmp(edi); } diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc index fa3bf4f32f0f8f..934a86e6f26128 100644 --- a/deps/v8/src/builtins/loong64/builtins-loong64.cc +++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc @@ -2648,37 +2648,50 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in t0 by the jump table trampoline. // Convert to Smi for the runtime call __ SmiTag(kWasmCompileLazyFuncIndexRegister); - { - HardAbortScope hard_abort(masm); // Avoid calls to Abort. - FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); - // Save all parameter registers (see wasm-linkage.h). They might be - // overwritten in the runtime call below. We don't have any callee-saved - // registers in wasm, so no need to store anything else. - RegList gp_regs; + // Compute register lists for parameters to be saved. We save all parameter + // registers (see wasm-linkage.h). They might be overwritten in the runtime + // call below. We don't have any callee-saved registers in wasm, so no need to + // store anything else. + constexpr RegList kSavedGpRegs = ([]() constexpr { + RegList saved_gp_regs; for (Register gp_param_reg : wasm::kGpParamRegisters) { - gp_regs.set(gp_param_reg); + saved_gp_regs.set(gp_param_reg); } - DoubleRegList fp_regs; + // All set registers were unique. + CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); + // The Wasm instance must be part of the saved registers. + CHECK(saved_gp_regs.has(kWasmInstanceRegister)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, + saved_gp_regs.Count()); + return saved_gp_regs; + })(); + + constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { + DoubleRegList saved_fp_regs; for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { - fp_regs.set(fp_param_reg); + saved_fp_regs.set(fp_param_reg); } - CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); - CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); - CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, - gp_regs.Count()); + CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, - fp_regs.Count()); + saved_fp_regs.Count()); + return saved_fp_regs; + })(); + + { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); - __ MultiPush(gp_regs); - __ MultiPushFPU(fp_regs); + // Save registers that we need to keep alive across the runtime call. + __ MultiPush(kSavedGpRegs); + __ MultiPushFPU(kSavedFpRegs); // kFixedFrameSizeFromFp is hard coded to include space for Simd // registers, so we still need to allocate extra (unused) space on the stack // as if they were saved. - __ Sub_d(sp, sp, fp_regs.Count() * kDoubleSize); + __ Sub_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize); // Pass instance and function index as an explicit arguments to the runtime // function. @@ -2687,15 +2700,27 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // set the current context on the isolate. __ Move(kContextRegister, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); - __ mov(t8, a0); - __ Add_d(sp, sp, fp_regs.Count() * kDoubleSize); + // Untag the returned Smi into into t7, for later use. + static_assert(!kSavedGpRegs.has(t7)); + __ SmiUntag(t7, a0); + + __ Add_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize); // Restore registers. - __ MultiPopFPU(fp_regs); - __ MultiPop(gp_regs); + __ MultiPopFPU(kSavedFpRegs); + __ MultiPop(kSavedGpRegs); } - // Finally, jump to the entrypoint. - __ Jump(t8); + + // The runtime function returned the jump table slot offset as a Smi (now in + // t7). Use that to compute the jump target. + static_assert(!kSavedGpRegs.has(t8)); + __ Ld_d(t8, MemOperand( + kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); + __ Add_d(t7, t8, Operand(t7)); + + // Finally, jump to the jump table slot for the function. + __ Jump(t7); } void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index 05e676a8837a29..c11ced1b728a13 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -2592,32 +2592,45 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in t0 by the jump table trampoline. // Convert to Smi for the runtime call. __ SmiTag(kWasmCompileLazyFuncIndexRegister); - { - HardAbortScope hard_abort(masm); // Avoid calls to Abort. - FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); - // Save all parameter registers (see wasm-linkage.h). They might be - // overwritten in the runtime call below. We don't have any callee-saved - // registers in wasm, so no need to store anything else. - RegList gp_regs; + // Compute register lists for parameters to be saved. We save all parameter + // registers (see wasm-linkage.h). They might be overwritten in the runtime + // call below. We don't have any callee-saved registers in wasm, so no need to + // store anything else. + constexpr RegList kSavedGpRegs = ([]() constexpr { + RegList saved_gp_regs; for (Register gp_param_reg : wasm::kGpParamRegisters) { - gp_regs.set(gp_param_reg); + saved_gp_regs.set(gp_param_reg); } - DoubleRegList fp_regs; + // All set registers were unique. + CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); + // The Wasm instance must be part of the saved registers. + CHECK(saved_gp_regs.has(kWasmInstanceRegister)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, + saved_gp_regs.Count()); + return saved_gp_regs; + })(); + + constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { + DoubleRegList saved_fp_regs; for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { - fp_regs.set(fp_param_reg); + saved_fp_regs.set(fp_param_reg); } - CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); - CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); - CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, - gp_regs.Count()); + CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, - fp_regs.Count()); + saved_fp_regs.Count()); + return saved_fp_regs; + })(); - __ MultiPush(gp_regs); - __ MultiPushFPU(fp_regs); + { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); + + // Save registers that we need to keep alive across the runtime call. + __ MultiPush(kSavedGpRegs); + __ MultiPushFPU(kSavedFpRegs); // Pass instance and function index as an explicit arguments to the runtime // function. @@ -2628,11 +2641,24 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ CallRuntime(Runtime::kWasmCompileLazy, 2); // Restore registers. - __ MultiPopFPU(fp_regs); - __ MultiPop(gp_regs); + __ MultiPopFPU(kSavedFpRegs); + __ MultiPop(kSavedGpRegs); } - // Finally, jump to the entrypoint. - __ Jump(kScratchReg, v0, 0); + + // Untag the returned Smi, for later use. + static_assert(!kSavedGpRegs.has(v0)); + __ SmiUntag(v0); + + // The runtime function returned the jump table slot offset as a Smi (now in + // t8). Use that to compute the jump target. + static_assert(!kSavedGpRegs.has(t8)); + __ Lw(t8, + MemOperand(kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); + __ Addu(t8, v0, t8); + + // Finally, jump to the jump table slot for the function. + __ Jump(t8); } void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 90be59a884b1da..408e2fe4331056 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -2643,31 +2643,44 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in t0 by the jump table trampoline. // Convert to Smi for the runtime call __ SmiTag(kWasmCompileLazyFuncIndexRegister); - { - HardAbortScope hard_abort(masm); // Avoid calls to Abort. - FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); - // Save all parameter registers (see wasm-linkage.h). They might be - // overwritten in the runtime call below. We don't have any callee-saved - // registers in wasm, so no need to store anything else. - RegList gp_regs; + // Compute register lists for parameters to be saved. We save all parameter + // registers (see wasm-linkage.h). They might be overwritten in the runtime + // call below. We don't have any callee-saved registers in wasm, so no need to + // store anything else. + constexpr RegList kSavedGpRegs = ([]() constexpr { + RegList saved_gp_regs; for (Register gp_param_reg : wasm::kGpParamRegisters) { - gp_regs.set(gp_param_reg); + saved_gp_regs.set(gp_param_reg); } - DoubleRegList fp_regs; + // All set registers were unique. + CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); + // The Wasm instance must be part of the saved registers. + CHECK(saved_gp_regs.has(kWasmInstanceRegister)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, + saved_gp_regs.Count()); + return saved_gp_regs; + })(); + + constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { + DoubleRegList saved_fp_regs; for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { - fp_regs.set(fp_param_reg); + saved_fp_regs.set(fp_param_reg); } - CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); - CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); - CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, - gp_regs.Count()); + CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, - fp_regs.Count()); + saved_fp_regs.Count()); + return saved_fp_regs; + })(); - __ MultiPush(gp_regs); + { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); + + // Save registers that we need to keep alive across the runtime call. + __ MultiPush(kSavedGpRegs); // Check if machine has simd enabled, if so push vector registers. If not // then only push double registers. Label push_doubles, simd_pushed; @@ -2679,15 +2692,15 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { { CpuFeatureScope msa_scope( masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported); - __ MultiPushMSA(fp_regs); + __ MultiPushMSA(kSavedFpRegs); } __ Branch(&simd_pushed); __ bind(&push_doubles); - __ MultiPushFPU(fp_regs); + __ MultiPushFPU(kSavedFpRegs); // kFixedFrameSizeFromFp is hard coded to include space for Simd // registers, so we still need to allocate extra (unused) space on the stack // as if they were saved. - __ Dsubu(sp, sp, fp_regs.Count() * kDoubleSize); + __ Dsubu(sp, sp, kSavedFpRegs.Count() * kDoubleSize); __ bind(&simd_pushed); // Pass instance and function index as an explicit arguments to the runtime // function. @@ -2707,17 +2720,30 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { { CpuFeatureScope msa_scope( masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported); - __ MultiPopMSA(fp_regs); + __ MultiPopMSA(kSavedFpRegs); } __ Branch(&simd_popped); __ bind(&pop_doubles); - __ Daddu(sp, sp, fp_regs.Count() * kDoubleSize); - __ MultiPopFPU(fp_regs); + __ Daddu(sp, sp, kSavedFpRegs.Count() * kDoubleSize); + __ MultiPopFPU(kSavedFpRegs); __ bind(&simd_popped); - __ MultiPop(gp_regs); + __ MultiPop(kSavedGpRegs); } - // Finally, jump to the entrypoint. - __ Jump(v0); + + // Untag the returned Smi, for later use. + static_assert(!kSavedGpRegs.has(v0)); + __ SmiUntag(v0); + + // The runtime function returned the jump table slot offset as a Smi (now in + // t8). Use that to compute the jump target. + static_assert(!kSavedGpRegs.has(t8)); + __ Ld(t8, + MemOperand(kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); + __ Daddu(t8, v0, t8); + + // Finally, jump to the jump table slot for the function. + __ Jump(t8); } void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 754e7f1679c34c..0aa39986d3f830 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -2832,8 +2832,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in a register by the jump table trampoline. // Convert to Smi for the runtime call. - __ SmiTag(kWasmCompileLazyFuncIndexRegister, - kWasmCompileLazyFuncIndexRegister); + __ SmiTag(kWasmCompileLazyFuncIndexRegister); + { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2867,21 +2867,37 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ MultiPush(gp_regs); __ MultiPushF64AndV128(fp_regs, simd_regs); - // Pass instance and function index as explicit arguments to the runtime + // Push the Wasm instance for loading the jump table address after the + // runtime call. + __ Push(kWasmInstanceRegister); + + // Push the Wasm instance again as an explicit argument to the runtime // function. - __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); + __ Push(kWasmInstanceRegister); + // Push the function index as second argument. + __ Push(kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ LoadSmiLiteral(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); - // The entrypoint address is the return value. - __ mr(r11, kReturnRegister0); + // The runtime function returns the jump table slot offset as a Smi. Use + // that to compute the jump target in r11. + __ Pop(kWasmInstanceRegister); + __ LoadU64( + r11, + MemOperand(kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag), + r0); + __ SmiUntag(kReturnRegister0); + __ AddS64(r11, r11, kReturnRegister0); + // r11 now holds the jump table slot where we want to jump to in the end. // Restore registers. __ MultiPopF64AndV128(fp_regs, simd_regs); __ MultiPop(gp_regs); } - // Finally, jump to the entrypoint. + + // Finally, jump to the jump table slot for the function. __ Jump(r11); } diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index 6bccec0ef8a614..addbe945462fa5 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -2751,37 +2751,40 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in t0 by the jump table trampoline. // Convert to Smi for the runtime call __ SmiTag(kWasmCompileLazyFuncIndexRegister); - { - HardAbortScope hard_abort(masm); // Avoid calls to Abort. - FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); - // Save all parameter registers (see kGpParamRegisters in wasm-linkage.cc). - // They might be overwritten in the runtime call below. We don't have any - // callee-saved registers in wasm, so no need to store anything else. - RegList gp_regs; + RegList kSavedGpRegs = ([]() constexpr { + RegList saved_gp_regs; for (Register gp_param_reg : wasm::kGpParamRegisters) { - gp_regs.set(gp_param_reg); + saved_gp_regs.set(gp_param_reg); } - // Also push a1, because we must push multiples of 16 bytes (see - // {TurboAssembler::PushCPURegList}. - CHECK_EQ(1, gp_regs.Count() % 2); - gp_regs.set(a1); - // Ensure that A1 will not be repeated. - CHECK_EQ(0, gp_regs.Count() % 2); - - DoubleRegList fp_regs; + + // All set registers were unique. + CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); + // The Wasm instance must be part of the saved registers. + CHECK(saved_gp_regs.has(kWasmInstanceRegister)); + CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, + saved_gp_regs.Count()); + return saved_gp_regs; + })(); + + DoubleRegList kSavedFpRegs = ([]() constexpr { + DoubleRegList saved_fp_regs; for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { - fp_regs.set(fp_param_reg); + saved_fp_regs.set(fp_param_reg); } - CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1); - CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); - CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, - gp_regs.Count()); + CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, - fp_regs.Count()); - __ MultiPush(gp_regs); - __ MultiPushFPU(fp_regs); + saved_fp_regs.Count()); + return saved_fp_regs; + })(); + + { + HardAbortScope hard_abort(masm); // Avoid calls to Abort. + FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); + + __ MultiPush(kSavedGpRegs); + __ MultiPushFPU(kSavedFpRegs); // Pass instance and function index as an explicit arguments to the runtime // function. @@ -2791,13 +2794,21 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ Move(kContextRegister, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); - __ Move(s1, a0); // move return value to s1 since a0 will be restored to - // the value before the call + __ SmiUntag(s1, a0); // move return value to s1 since a0 will be restored + // to the value before the call + CHECK(!kSavedGpRegs.has(s1)); // Restore registers. - __ MultiPopFPU(fp_regs); - __ MultiPop(gp_regs); + __ MultiPopFPU(kSavedFpRegs); + __ MultiPop(kSavedGpRegs); } + + // The runtime function returned the jump table slot offset as a Smi (now in + // x17). Use that to compute the jump target. + __ Ld(kScratchReg, + MemOperand(kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); + __ Add64(s1, s1, Operand(kScratchReg)); // Finally, jump to the entrypoint. __ Jump(s1); } diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index d96115a74abe16..e9a3cd82581760 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -2877,8 +2877,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in a register by the jump table trampoline. // Convert to Smi for the runtime call. - __ SmiTag(kWasmCompileLazyFuncIndexRegister, - kWasmCompileLazyFuncIndexRegister); + __ SmiTag(kWasmCompileLazyFuncIndexRegister); + { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2906,21 +2906,35 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ MultiPush(gp_regs); __ MultiPushF64OrV128(fp_regs, ip); - // Pass instance and function index as explicit arguments to the runtime + // Push the Wasm instance for loading the jump table address after the + // runtime call. + __ Push(kWasmInstanceRegister); + + // Push the Wasm instance again as an explicit argument to the runtime // function. - __ Push(kWasmInstanceRegister, r7); + __ Push(kWasmInstanceRegister); + // Push the function index as second argument. + __ Push(kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ LoadSmiLiteral(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); - // The entrypoint address is the return value. - __ mov(ip, r2); + // The runtime function returns the jump table slot offset as a Smi. Use + // that to compute the jump target in ip. + __ Pop(kWasmInstanceRegister); + __ LoadU64(ip, MemOperand(kWasmInstanceRegister, + WasmInstanceObject::kJumpTableStartOffset - + kHeapObjectTag)); + __ SmiUntag(kReturnRegister0); + __ AddS64(ip, ip, kReturnRegister0); + // ip now holds the jump table slot where we want to jump to in the end. // Restore registers. __ MultiPopF64OrV128(fp_regs, ip); __ MultiPop(gp_regs); } - // Finally, jump to the entrypoint. + + // Finally, jump to the jump table slot for the function. __ Jump(ip); } diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index c5709e9d9e2dd8..f9088b98864707 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -2786,6 +2786,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ Pop(r15); // Convert to Smi for the runtime call. __ SmiTag(r15); + { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2809,7 +2810,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { offset += kSimd128Size; } - // Push the Wasm instance as an explicit argument to WasmCompileLazy. + // Push the Wasm instance for loading the jump table address after the + // runtime call. + __ Push(kWasmInstanceRegister); + + // Push the Wasm instance again as an explicit argument to the runtime + // function. __ Push(kWasmInstanceRegister); // Push the function index as second argument. __ Push(r15); @@ -2817,8 +2823,15 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // set the current context on the isolate. __ Move(kContextRegister, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); - // The entrypoint address is the return value. - __ movq(r15, kReturnRegister0); + // The runtime function returns the jump table slot offset as a Smi. Use + // that to compute the jump target in r15. + __ Pop(kWasmInstanceRegister); + __ movq(r15, MemOperand(kWasmInstanceRegister, + wasm::ObjectAccess::ToTagged( + WasmInstanceObject::kJumpTableStartOffset))); + __ SmiUntag(kReturnRegister0); + __ addq(r15, kReturnRegister0); + // r15 now holds the jump table slot where we want to jump to in the end. // Restore registers. for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) { @@ -2831,7 +2844,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ Pop(reg); } } - // Finally, jump to the entrypoint. + + // Finally, jump to the jump table slot for the function. __ jmp(r15); } @@ -3597,7 +3611,6 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { __ j(equal, &place_float_param); // ref params have already been pushed, so go through directly - __ addq(current_int_param_slot, Immediate(kSystemPointerSize)); __ jmp(&loop_through_valuetypes); // All other types are reference types. We can just fall through to place them diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index 3c96e7f30e5cf3..7da0bce3c51c90 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -283,7 +283,7 @@ Instruction* InstructionSelector::Emit(Instruction* instr) { bool InstructionSelector::CanCover(Node* user, Node* node) const { // 1. Both {user} and {node} must be in the same basic block. - if (schedule()->block(node) != schedule()->block(user)) { + if (schedule()->block(node) != current_block_) { return false; } // 2. Pure {node}s must be owned by the {user}. @@ -291,7 +291,7 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const { return node->OwnedBy(user); } // 3. Impure {node}s must match the effect level of {user}. - if (GetEffectLevel(node) != GetEffectLevel(user)) { + if (GetEffectLevel(node) != current_effect_level_) { return false; } // 4. Only {node} must have value edges pointing to {user}. @@ -303,21 +303,6 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const { return true; } -bool InstructionSelector::CanCoverTransitively(Node* user, Node* node, - Node* node_input) const { - if (CanCover(user, node) && CanCover(node, node_input)) { - // If {node} is pure, transitivity might not hold. - if (node->op()->HasProperty(Operator::kPure)) { - // If {node_input} is pure, the effect levels do not matter. - if (node_input->op()->HasProperty(Operator::kPure)) return true; - // Otherwise, {user} and {node_input} must have the same effect level. - return GetEffectLevel(user) == GetEffectLevel(node_input); - } - return true; - } - return false; -} - bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const { BasicBlock* bb_user = schedule()->block(user); @@ -1192,6 +1177,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) { int effect_level = 0; for (Node* const node : *block) { SetEffectLevel(node, effect_level); + current_effect_level_ = effect_level; if (node->opcode() == IrOpcode::kStore || node->opcode() == IrOpcode::kUnalignedStore || node->opcode() == IrOpcode::kCall || @@ -1209,6 +1195,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) { // control input should be on the same effect level as the last node. if (block->control_input() != nullptr) { SetEffectLevel(block->control_input(), effect_level); + current_effect_level_ = effect_level; } auto FinishEmittedInstructions = [&](Node* node, int instruction_start) { diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index c1a12d97ec322d..8bbe30618432ae 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -407,15 +407,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final { // Used in pattern matching during code generation. // Check if {node} can be covered while generating code for the current // instruction. A node can be covered if the {user} of the node has the only - // edge and the two are in the same basic block. - // Before fusing two instructions a and b, it is useful to check that - // CanCover(a, b) holds. If this is not the case, code for b must still be - // generated for other users, and fusing is unlikely to improve performance. + // edge, the two are in the same basic block, and there are no side-effects + // in-between. The last check is crucial for soundness. + // For pure nodes, CanCover(a,b) is checked to avoid duplicated execution: + // If this is not the case, code for b must still be generated for other + // users, and fusing is unlikely to improve performance. bool CanCover(Node* user, Node* node) const; - // CanCover is not transitive. The counter example are Nodes A,B,C such that - // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A - // and B might differ. CanCoverTransitively does the additional checks. - bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const; // Used in pattern matching during code generation. // This function checks that {node} and {user} are in the same basic block, @@ -739,6 +736,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { BoolVector defined_; BoolVector used_; IntVector effect_level_; + int current_effect_level_; IntVector virtual_registers_; IntVector virtual_register_rename_; InstructionScheduler* scheduler_; diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc index 4f03f99acd5b85..6b2d25f5dc1d91 100644 --- a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc +++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc @@ -1446,7 +1446,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { if (CanCover(node, value)) { switch (value->opcode()) { case IrOpcode::kWord64Sar: { - if (CanCoverTransitively(node, value, value->InputAt(0)) && + if (CanCover(value, value->InputAt(0)) && TryEmitExtendingLoad(this, value, node)) { return; } else { diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index 4f5738ddaddba0..1e29e0ed730be4 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -1532,7 +1532,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { if (CanCover(node, value)) { switch (value->opcode()) { case IrOpcode::kWord64Sar: { - if (CanCoverTransitively(node, value, value->InputAt(0)) && + if (CanCover(value, value->InputAt(0)) && TryEmitExtendingLoad(this, value, node)) { return; } else { diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc index a4547402e94cce..fce1b92f9683c5 100644 --- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc +++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc @@ -1479,7 +1479,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { if (CanCover(node, value)) { switch (value->opcode()) { case IrOpcode::kWord64Sar: { - if (CanCoverTransitively(node, value, value->InputAt(0)) && + if (CanCover(value, value->InputAt(0)) && TryEmitExtendingLoad(this, value, node)) { return; } else { diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index d40f659e4b7f2d..62e82b58475301 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -1822,7 +1822,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { case IrOpcode::kWord64Shr: { Int64BinopMatcher m(value); if (m.right().Is(32)) { - if (CanCoverTransitively(node, value, value->InputAt(0)) && + if (CanCover(value, value->InputAt(0)) && TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) { return EmitIdentity(node); } diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index cd546fe9c81f5f..a71427f5682fc1 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -3486,8 +3486,11 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) { const RegisterConfiguration* config = RegisterConfiguration::Default(); std::unique_ptr restricted_config; + // The mid-tier register allocator keeps values in stack slots for too long. + // This is incompatible with left-trimming, therefore we cannot enable it for + // JS functions. bool use_mid_tier_register_allocator = - !CodeKindIsStaticallyCompiled(data->info()->code_kind()) && + data->info()->code_kind() == CodeKind::WASM_FUNCTION && (FLAG_turbo_force_mid_tier_regalloc || (FLAG_turbo_use_mid_tier_regalloc_for_huge_functions && data->sequence()->VirtualRegisterCount() > diff --git a/deps/v8/src/execution/riscv64/frame-constants-riscv64.h b/deps/v8/src/execution/riscv64/frame-constants-riscv64.h index 6b70815ea40cb9..d5e3165956b452 100644 --- a/deps/v8/src/execution/riscv64/frame-constants-riscv64.h +++ b/deps/v8/src/execution/riscv64/frame-constants-riscv64.h @@ -24,14 +24,15 @@ class EntryFrameConstants : public AllStatic { class WasmCompileLazyFrameConstants : public TypedFrameConstants { public: static constexpr int kNumberOfSavedGpParamRegs = - arraysize(wasm::kGpParamRegisters) + 1; + arraysize(wasm::kGpParamRegisters); static constexpr int kNumberOfSavedFpParamRegs = arraysize(wasm::kFpParamRegisters); static constexpr int kNumberOfSavedAllParamRegs = kNumberOfSavedGpParamRegs + kNumberOfSavedFpParamRegs; // FP-relative. - // See Generate_WasmCompileLazy in builtins-mips64.cc. + // See Generate_WasmCompileLazy in builtins-riscv64.cc. + // TODO(riscv): add rvv v reg save static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(kNumberOfSavedAllParamRegs); static constexpr int kFixedFrameSizeFromFp = diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc index 49a05b6a137b1d..ef0b67ca2b6274 100644 --- a/deps/v8/src/heap/mark-compact.cc +++ b/deps/v8/src/heap/mark-compact.cc @@ -3054,14 +3054,11 @@ void MarkCompactCollector::ClearJSWeakRefs() { // unregister_token field set to undefined when processing the first // WeakCell. Like above, we're modifying pointers during GC, so record the // slots. - HeapObject undefined = ReadOnlyRoots(isolate()).undefined_value(); JSFinalizationRegistry finalization_registry = JSFinalizationRegistry::cast(weak_cell.finalization_registry()); finalization_registry.RemoveUnregisterToken( JSReceiver::cast(unregister_token), isolate(), - [undefined](WeakCell matched_cell) { - matched_cell.set_unregister_token(undefined); - }, + JSFinalizationRegistry::kKeepMatchedCellsInRegistry, gc_notify_updated_slot); } else { // The unregister_token is alive. diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc index b0572bc23e4507..1fdf72440a418e 100644 --- a/deps/v8/src/ic/ic.cc +++ b/deps/v8/src/ic/ic.cc @@ -1849,19 +1849,10 @@ MaybeHandle StoreIC::Store(Handle object, Handle name, IsAnyDefineOwn() ? LookupIterator::OWN : LookupIterator::DEFAULT); if (name->IsPrivate()) { - bool exists = it.IsFound(); - if (name->IsPrivateName() && exists == IsDefineKeyedOwnIC()) { - Handle name_string( - String::cast(Symbol::cast(*name).description()), isolate()); - if (exists) { - MessageTemplate message = - name->IsPrivateBrand() - ? MessageTemplate::kInvalidPrivateBrandReinitialization - : MessageTemplate::kInvalidPrivateFieldReinitialization; - return TypeError(message, object, name_string); - } else { - return TypeError(MessageTemplate::kInvalidPrivateMemberWrite, object, - name_string); + if (name->IsPrivateName()) { + DCHECK(!IsDefineNamedOwnIC()); + if (!JSReceiver::CheckPrivateNameStore(&it, IsDefineKeyedOwnIC())) { + return MaybeHandle(); } } diff --git a/deps/v8/src/objects/code-kind.h b/deps/v8/src/objects/code-kind.h index b43affdc2d972e..32c3b025757aa1 100644 --- a/deps/v8/src/objects/code-kind.h +++ b/deps/v8/src/objects/code-kind.h @@ -57,10 +57,6 @@ inline constexpr bool CodeKindIsBaselinedJSFunction(CodeKind kind) { return kind == CodeKind::BASELINE; } -inline constexpr bool CodeKindIsStaticallyCompiled(CodeKind kind) { - return kind == CodeKind::BYTECODE_HANDLER || kind == CodeKind::BUILTIN; -} - inline constexpr bool CodeKindIsUnoptimizedJSFunction(CodeKind kind) { STATIC_ASSERT(static_cast(CodeKind::INTERPRETED_FUNCTION) + 1 == static_cast(CodeKind::BASELINE)); diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc index 3f806f5a090651..4335a7cf0e4698 100644 --- a/deps/v8/src/objects/js-objects.cc +++ b/deps/v8/src/objects/js-objects.cc @@ -185,6 +185,55 @@ Maybe JSReceiver::HasInPrototypeChain(Isolate* isolate, } } +// static +bool JSReceiver::CheckPrivateNameStore(LookupIterator* it, bool is_define) { + DCHECK(it->GetName()->IsPrivateName()); + Isolate* isolate = it->isolate(); + Handle name_string( + String::cast(Handle::cast(it->GetName())->description()), + isolate); + bool should_throw = GetShouldThrow(isolate, Nothing()) == + ShouldThrow::kThrowOnError; + for (; it->IsFound(); it->Next()) { + switch (it->state()) { + case LookupIterator::TRANSITION: + case LookupIterator::INTERCEPTOR: + case LookupIterator::JSPROXY: + case LookupIterator::NOT_FOUND: + case LookupIterator::INTEGER_INDEXED_EXOTIC: + case LookupIterator::ACCESSOR: + UNREACHABLE(); + case LookupIterator::ACCESS_CHECK: + if (!it->HasAccess()) { + isolate->ReportFailedAccessCheck( + Handle::cast(it->GetReceiver())); + RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, false); + return false; + } + break; + case LookupIterator::DATA: + if (is_define && should_throw) { + MessageTemplate message = + it->GetName()->IsPrivateBrand() + ? MessageTemplate::kInvalidPrivateBrandReinitialization + : MessageTemplate::kInvalidPrivateFieldReinitialization; + isolate->Throw(*(isolate->factory()->NewTypeError( + message, name_string, it->GetReceiver()))); + return false; + } + return true; + } + } + DCHECK(!it->IsFound()); + if (!is_define && should_throw) { + isolate->Throw(*(isolate->factory()->NewTypeError( + MessageTemplate::kInvalidPrivateMemberWrite, name_string, + it->GetReceiver()))); + return false; + } + return true; +} + // static Maybe JSReceiver::CheckIfCanDefine(Isolate* isolate, LookupIterator* it, Handle value, diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h index d6a96a8fe2d062..4edb34d5c9f81c 100644 --- a/deps/v8/src/objects/js-objects.h +++ b/deps/v8/src/objects/js-objects.h @@ -161,6 +161,11 @@ class JSReceiver : public TorqueGeneratedJSReceiver { Isolate* isolate, Handle object, Handle key, PropertyDescriptor* desc, Maybe should_throw); + // Check if private name property can be store on the object. It will return + // false with an error when it cannot. + V8_WARN_UNUSED_RESULT static bool CheckPrivateNameStore(LookupIterator* it, + bool is_define); + // Check if a data property can be created on the object. It will fail with // an error when it cannot. V8_WARN_UNUSED_RESULT static Maybe CheckIfCanDefine( diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h index acce7b72b9430d..76e6e075e5ded6 100644 --- a/deps/v8/src/objects/js-weak-refs-inl.h +++ b/deps/v8/src/objects/js-weak-refs-inl.h @@ -60,16 +60,14 @@ bool JSFinalizationRegistry::Unregister( // key. Each WeakCell will be in the "active_cells" or "cleared_cells" list of // its FinalizationRegistry; remove it from there. return finalization_registry->RemoveUnregisterToken( - *unregister_token, isolate, - [isolate](WeakCell matched_cell) { - matched_cell.RemoveFromFinalizationRegistryCells(isolate); - }, + *unregister_token, isolate, kRemoveMatchedCellsFromRegistry, [](HeapObject, ObjectSlot, Object) {}); } -template +template bool JSFinalizationRegistry::RemoveUnregisterToken( - JSReceiver unregister_token, Isolate* isolate, MatchCallback match_callback, + JSReceiver unregister_token, Isolate* isolate, + RemoveUnregisterTokenMode removal_mode, GCNotifyUpdatedSlotCallback gc_notify_updated_slot) { // This method is called from both FinalizationRegistry#unregister and for // removing weakly-held dead unregister tokens. The latter is during GC so @@ -107,7 +105,16 @@ bool JSFinalizationRegistry::RemoveUnregisterToken( value = weak_cell.key_list_next(); if (weak_cell.unregister_token() == unregister_token) { // weak_cell has the same unregister token; remove it from the key list. - match_callback(weak_cell); + switch (removal_mode) { + case kRemoveMatchedCellsFromRegistry: + weak_cell.RemoveFromFinalizationRegistryCells(isolate); + break; + case kKeepMatchedCellsInRegistry: + // Do nothing. + break; + } + // Clear unregister token-related fields. + weak_cell.set_unregister_token(undefined); weak_cell.set_key_list_prev(undefined); weak_cell.set_key_list_next(undefined); was_present = true; diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h index 57f765b282e653..f678234ff81afc 100644 --- a/deps/v8/src/objects/js-weak-refs.h +++ b/deps/v8/src/objects/js-weak-refs.h @@ -43,10 +43,14 @@ class JSFinalizationRegistry // it modifies slots in key_map and WeakCells and the normal write barrier is // disabled during GC, we need to tell the GC about the modified slots via the // gc_notify_updated_slot function. - template + enum RemoveUnregisterTokenMode { + kRemoveMatchedCellsFromRegistry, + kKeepMatchedCellsInRegistry + }; + template inline bool RemoveUnregisterToken( JSReceiver unregister_token, Isolate* isolate, - MatchCallback match_callback, + RemoveUnregisterTokenMode removal_mode, GCNotifyUpdatedSlotCallback gc_notify_updated_slot); // Returns true if the cleared_cells list is non-empty. diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc index 81f83302e7b2d9..df9e219d33b3a6 100644 --- a/deps/v8/src/objects/lookup.cc +++ b/deps/v8/src/objects/lookup.cc @@ -1264,7 +1264,9 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder( } #endif // V8_ENABLE_WEBASSEMBLY if (map.is_access_check_needed()) { - if (is_element || !name_->IsPrivate(isolate_)) return ACCESS_CHECK; + if (is_element || !name_->IsPrivate(isolate_) || + name_->IsPrivateName(isolate_)) + return ACCESS_CHECK; } V8_FALLTHROUGH; case ACCESS_CHECK: diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc index 4616ef7ab74d6c..d16146114e81cc 100644 --- a/deps/v8/src/objects/objects.cc +++ b/deps/v8/src/objects/objects.cc @@ -6981,7 +6981,7 @@ void JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap( } // weak_cell is now removed from the unregister token map, so clear its - // unregister token-related fields for heap verification. + // unregister token-related fields. weak_cell.set_unregister_token(undefined); weak_cell.set_key_list_prev(undefined); weak_cell.set_key_list_next(undefined); diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc index afbe860cb244fd..c70e4e50cf1ba1 100644 --- a/deps/v8/src/profiler/profile-generator.cc +++ b/deps/v8/src/profiler/profile-generator.cc @@ -570,6 +570,8 @@ void ContextFilter::OnMoveEvent(Address from_address, Address to_address) { using v8::tracing::TracedValue; +std::atomic CpuProfilesCollection::last_id_{0}; + CpuProfile::CpuProfile(CpuProfiler* profiler, ProfilerId id, const char* title, CpuProfilingOptions options, std::unique_ptr delegate) @@ -892,10 +894,7 @@ size_t CodeMap::GetEstimatedMemoryUsage() const { } CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate) - : profiler_(nullptr), - current_profiles_semaphore_(1), - last_id_(0), - isolate_(isolate) { + : profiler_(nullptr), current_profiles_semaphore_(1), isolate_(isolate) { USE(isolate_); } diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h index 2c3b44e6b0936f..360d4c23aa24fc 100644 --- a/deps/v8/src/profiler/profile-generator.h +++ b/deps/v8/src/profiler/profile-generator.h @@ -587,7 +587,7 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection { // Accessed by VM thread and profile generator thread. std::vector> current_profiles_; base::Semaphore current_profiles_semaphore_; - ProfilerId last_id_; + static std::atomic last_id_; Isolate* isolate_; }; diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc index e4da1da95c5770..4241abe151e539 100644 --- a/deps/v8/src/runtime/runtime-object.cc +++ b/deps/v8/src/runtime/runtime-object.cc @@ -48,6 +48,9 @@ MaybeHandle Runtime::GetObjectProperty( LookupIterator(isolate, receiver, lookup_key, lookup_start_object); MaybeHandle result = Object::GetProperty(&it); + if (result.is_null()) { + return result; + } if (is_found) *is_found = it.IsFound(); if (!it.IsFound() && key->IsSymbol() && @@ -572,15 +575,9 @@ MaybeHandle Runtime::SetObjectProperty( PropertyKey lookup_key(isolate, key, &success); if (!success) return MaybeHandle(); LookupIterator it(isolate, object, lookup_key); - - if (!it.IsFound() && key->IsSymbol() && - Symbol::cast(*key).is_private_name()) { - Handle name_string(Symbol::cast(*key).description(), isolate); - DCHECK(name_string->IsString()); - THROW_NEW_ERROR(isolate, - NewTypeError(MessageTemplate::kInvalidPrivateMemberWrite, - name_string, object), - Object); + if (key->IsSymbol() && Symbol::cast(*key).is_private_name() && + !JSReceiver::CheckPrivateNameStore(&it, false)) { + return MaybeHandle(); } MAYBE_RETURN_NULL( @@ -589,10 +586,11 @@ MaybeHandle Runtime::SetObjectProperty( return value; } -MaybeHandle Runtime::DefineObjectOwnProperty( - Isolate* isolate, Handle object, Handle key, - Handle value, StoreOrigin store_origin, - Maybe should_throw) { +MaybeHandle Runtime::DefineObjectOwnProperty(Isolate* isolate, + Handle object, + Handle key, + Handle value, + StoreOrigin store_origin) { if (object->IsNullOrUndefined(isolate)) { THROW_NEW_ERROR( isolate, @@ -607,20 +605,15 @@ MaybeHandle Runtime::DefineObjectOwnProperty( LookupIterator it(isolate, object, lookup_key, LookupIterator::OWN); if (key->IsSymbol() && Symbol::cast(*key).is_private_name()) { - Handle private_symbol = Handle::cast(key); - if (it.IsFound()) { - Handle name_string(private_symbol->description(), isolate); - DCHECK(name_string->IsString()); - MessageTemplate message = - private_symbol->is_private_brand() - ? MessageTemplate::kInvalidPrivateBrandReinitialization - : MessageTemplate::kInvalidPrivateFieldReinitialization; - THROW_NEW_ERROR(isolate, NewTypeError(message, name_string), Object); - } else { - MAYBE_RETURN_NULL(JSReceiver::AddPrivateField(&it, value, should_throw)); + if (!JSReceiver::CheckPrivateNameStore(&it, true)) { + return MaybeHandle(); } + DCHECK(!it.IsFound()); + MAYBE_RETURN_NULL( + JSReceiver::AddPrivateField(&it, value, Nothing())); } else { - MAYBE_RETURN_NULL(JSReceiver::CreateDataProperty(&it, value, should_throw)); + MAYBE_RETURN_NULL( + JSReceiver::CreateDataProperty(&it, value, Nothing())); } return value; diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc index a6712673c0d1d3..501f665927d8dd 100644 --- a/deps/v8/src/runtime/runtime-wasm.cc +++ b/deps/v8/src/runtime/runtime-wasm.cc @@ -224,14 +224,11 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) { bool success = wasm::CompileLazy(isolate, instance, func_index); if (!success) { DCHECK(isolate->has_pending_exception()); - return ReadOnlyRoots(isolate).exception(); + return ReadOnlyRoots{isolate}.exception(); } - Address entrypoint = - instance->module_object().native_module()->GetCallTargetForFunction( - func_index); - - return Object(entrypoint); + auto* native_module = instance->module_object().native_module(); + return Smi::FromInt(native_module->GetJumpTableOffset(func_index)); } namespace { diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h index a140f9b5269a66..877b277a5e26cb 100644 --- a/deps/v8/src/runtime/runtime.h +++ b/deps/v8/src/runtime/runtime.h @@ -821,10 +821,9 @@ class Runtime : public AllStatic { // private field definition), this method throws if the field already exists // on object. V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle - DefineObjectOwnProperty( - Isolate* isolate, Handle object, Handle key, - Handle value, StoreOrigin store_origin, - Maybe should_throw = Nothing()); + DefineObjectOwnProperty(Isolate* isolate, Handle object, + Handle key, Handle value, + StoreOrigin store_origin); // When "receiver" is not passed, it defaults to "lookup_start_object". V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index b4b238421d21a9..36413545c609c5 100644 --- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -748,13 +748,13 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg, Daddu(temp0, dst_op.rm(), dst_op.offset()); \ switch (type.value()) { \ case StoreType::kI64Store8: \ - ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst64, 7); \ + ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 8, inst64, 7); \ break; \ case StoreType::kI32Store8: \ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 8, inst32, 3); \ break; \ case StoreType::kI64Store16: \ - ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst64, 7); \ + ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, 16, inst64, 7); \ break; \ case StoreType::kI32Store16: \ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, 16, inst32, 3); \ @@ -823,13 +823,13 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, Daddu(temp0, dst_op.rm(), dst_op.offset()); switch (type.value()) { case StoreType::kI64Store8: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 7); + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, 8, 7); break; case StoreType::kI32Store8: ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 3); break; case StoreType::kI64Store16: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 7); + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, 16, 7); break; case StoreType::kI32Store16: ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 3); @@ -899,13 +899,13 @@ void LiftoffAssembler::AtomicCompareExchange( Daddu(temp0, dst_op.rm(), dst_op.offset()); switch (type.value()) { case StoreType::kI64Store8: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 7); + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, 8, 7); break; case StoreType::kI32Store8: ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 8, 3); break; case StoreType::kI64Store16: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 7); + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, 16, 7); break; case StoreType::kI32Store16: ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, 16, 3); diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h index 77ed549c900bb8..88115462bede4d 100644 --- a/deps/v8/src/wasm/wasm-linkage.h +++ b/deps/v8/src/wasm/wasm-linkage.h @@ -143,6 +143,8 @@ constexpr DoubleRegister kFpReturnRegisters[] = {}; // The parameter index where the instance parameter should be placed in wasm // call descriptors. This is used by the Int64Lowering::LowerNode method. constexpr int kWasmInstanceParameterIndex = 0; +static_assert(kWasmInstanceRegister == + kGpParamRegisters[kWasmInstanceParameterIndex]); class LinkageAllocator { public: diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc index a2c3a3e5045dd1..39c3aee560e9f3 100644 --- a/deps/v8/test/cctest/test-js-weak-refs.cc +++ b/deps/v8/test/cctest/test-js-weak-refs.cc @@ -853,9 +853,7 @@ TEST(TestRemoveUnregisterToken) { finalization_registry->RemoveUnregisterToken( JSReceiver::cast(*token2), isolate, - [undefined](WeakCell matched_cell) { - matched_cell.set_unregister_token(*undefined); - }, + JSFinalizationRegistry::kKeepMatchedCellsInRegistry, [](HeapObject, ObjectSlot, Object) {}); // Both weak_cell2a and weak_cell2b remain on the weak cell chains. @@ -1025,5 +1023,52 @@ TEST(UnregisterTokenHeapVerifier) { EmptyMessageQueues(isolate); } +TEST(UnregisteredAndUnclearedCellHeapVerifier) { + if (!FLAG_incremental_marking) return; + ManualGCScope manual_gc_scope; +#ifdef VERIFY_HEAP + FLAG_verify_heap = true; +#endif + + CcTest::InitializeVM(); + v8::Isolate* isolate = CcTest::isolate(); + Heap* heap = CcTest::heap(); + v8::HandleScope outer_scope(isolate); + + { + // Make a new FinalizationRegistry and register an object with a token. + v8::HandleScope scope(isolate); + CompileRun( + "var token = {}; " + "var registry = new FinalizationRegistry(function () {}); " + "registry.register({}, undefined, token);"); + } + + // Start incremental marking to activate the marking barrier. + heap::SimulateIncrementalMarking(heap, false); + + { + // Make a WeakCell list with length >1, then unregister with the token to + // the WeakCell from the registry. The linked list manipulation keeps the + // unregistered WeakCell alive (i.e. not put into cleared_cells) due to the + // marking barrier from incremental marking. Then make the original token + // collectible. + v8::HandleScope scope(isolate); + CompileRun( + "registry.register({}); " + "registry.unregister(token); " + "token = 0;"); + } + + // Trigger GC. + CcTest::CollectAllGarbage(); + CcTest::CollectAllGarbage(); + + // Pump message loop to run the finalizer task, then the incremental marking + // task. The verifier will verify that live WeakCells don't point to dead + // unregister tokens. + EmptyMessageQueues(isolate); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1321899-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-1.js new file mode 100644 index 00000000000000..03990a774dba04 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-1.js @@ -0,0 +1,21 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +d8.file.execute('test/mjsunit/regress/regress-crbug-1321899.js'); + +// Detached global should not have access +const realm = Realm.createAllowCrossRealmAccess(); +const detached = Realm.global(realm); +Realm.detachGlobal(realm); + +assertThrows(() => new B(detached), Error, /no access/); +assertThrows(() => new C(detached), Error, /no access/); +assertThrows(() => new D(detached), Error, /no access/); +assertThrows(() => new E(detached), Error, /no access/); +assertThrows(() => B.setField(detached), Error, /no access/); +assertThrows(() => C.setField(detached), Error, /no access/); +assertThrows(() => D.setAccessor(detached), Error, /no access/); +assertThrows(() => E.setMethod(detached), Error, /no access/); +assertThrows(() => D.getAccessor(detached), Error, /no access/); +assertThrows(() => E.getMethod(detached), Error, /no access/); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1321899-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-2.js new file mode 100644 index 00000000000000..ff1c9a1772c613 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-2.js @@ -0,0 +1,7 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --no-lazy-feedback-allocation + +d8.file.execute('test/mjsunit/regress/regress-crbug-1321899-1.js'); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1321899-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-3.js new file mode 100644 index 00000000000000..5d513a9eca8ab8 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-3.js @@ -0,0 +1,65 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +d8.file.execute('test/mjsunit/regress/regress-crbug-1321899.js'); + +// Attached global should have access +const realm = Realm.createAllowCrossRealmAccess(); +const globalProxy = Realm.global(realm); + +assertThrows(() => B.setField(globalProxy), TypeError, /Cannot write private member #b to an object whose class did not declare it/); +assertThrows(() => B.getField(globalProxy), TypeError, /Cannot read private member #b from an object whose class did not declare it/); + +new B(globalProxy); +assertEquals(B.getField(globalProxy), 1); +B.setField(globalProxy); +assertEquals(B.getField(globalProxy), 'b'); // Fast case +B.setField(globalProxy); // Fast case +assertEquals(B.getField(globalProxy), 'b'); // Fast case +assertThrows(() => new B(globalProxy), TypeError, /Cannot initialize #b twice on the same object/); + +assertThrows(() => C.setField(globalProxy), TypeError, /Cannot write private member #c to an object whose class did not declare it/); +assertThrows(() => C.getField(globalProxy), TypeError, /Cannot read private member #c from an object whose class did not declare it/); + +new C(globalProxy); +assertEquals(C.getField(globalProxy), undefined); +C.setField(globalProxy); +assertEquals(C.getField(globalProxy), 'c'); // Fast case +C.setField(globalProxy); // Fast case +assertEquals(C.getField(globalProxy), 'c'); // Fast case +assertThrows(() => new C(globalProxy), TypeError, /Cannot initialize #c twice on the same object/); + +assertThrows(() => D.setAccessor(globalProxy), TypeError, /Receiver must be an instance of class D/); +assertThrows(() => D.getAccessor(globalProxy), TypeError, /Receiver must be an instance of class D/); + +new D(globalProxy); +assertEquals(D.getAccessor(globalProxy), 0); +D.setAccessor(globalProxy); +assertEquals(D.getAccessor(globalProxy), 'd'); // Fast case +D.setAccessor(globalProxy); // Fast case +assertEquals(D.getAccessor(globalProxy), 'd'); // Fast case +assertThrows(() => new D(globalProxy), TypeError, /Cannot initialize private methods of class D twice on the same object/); + +assertThrows(() => E.setMethod(globalProxy), TypeError, /Receiver must be an instance of class E/); +assertThrows(() => E.getMethod(globalProxy), TypeError, /Receiver must be an instance of class E/); + +new E(globalProxy); +assertEquals(E.getMethod(globalProxy)(), 0); +assertThrows(() => E.setMethod(globalProxy), TypeError, /Private method '#e' is not writable/); +assertEquals(E.getMethod(globalProxy)(), 0); // Fast case +assertThrows(() => new E(globalProxy), TypeError, /Cannot initialize private methods of class E twice on the same object/); + +// Access should fail after detaching +Realm.detachGlobal(realm); + +assertThrows(() => new B(globalProxy), Error, /no access/); +assertThrows(() => new C(globalProxy), Error, /no access/); +assertThrows(() => new D(globalProxy), Error, /no access/); +assertThrows(() => new E(globalProxy), Error, /no access/); +assertThrows(() => B.setField(globalProxy), Error, /no access/); +assertThrows(() => C.setField(globalProxy), Error, /no access/); +assertThrows(() => D.setAccessor(globalProxy), Error, /no access/); +assertThrows(() => E.setMethod(globalProxy), Error, /no access/); +assertThrows(() => D.getAccessor(globalProxy), Error, /no access/); +assertThrows(() => E.getMethod(globalProxy), Error, /no access/); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1321899-4.js b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-4.js new file mode 100644 index 00000000000000..1f23dac13ea4d8 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-4.js @@ -0,0 +1,7 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --no-lazy-feedback-allocation + +d8.file.execute('test/mjsunit/regress/regress-crbug-1321899-3.js'); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1321899-5.js b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-5.js new file mode 100644 index 00000000000000..d3bff2fbbeedc7 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-5.js @@ -0,0 +1,19 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +d8.file.execute('test/mjsunit/regress/regress-crbug-1321899.js'); + +const realm = Realm.create(); +const globalProxy = Realm.global(realm); + +assertThrows(() => new B(globalProxy), Error, /no access/); +assertThrows(() => new C(globalProxy), Error, /no access/); +assertThrows(() => new D(globalProxy), Error, /no access/); +assertThrows(() => new E(globalProxy), Error, /no access/); +assertThrows(() => B.setField(globalProxy), Error, /no access/); +assertThrows(() => C.setField(globalProxy), Error, /no access/); +assertThrows(() => D.setAccessor(globalProxy), Error, /no access/); +assertThrows(() => E.setMethod(globalProxy), Error, /no access/); +assertThrows(() => D.getAccessor(globalProxy), Error, /no access/); +assertThrows(() => E.getMethod(globalProxy), Error, /no access/); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1321899-6.js b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-6.js new file mode 100644 index 00000000000000..a49542a3b76fb0 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1321899-6.js @@ -0,0 +1,7 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --no-lazy-feedback-allocation + +d8.file.execute('test/mjsunit/regress/regress-crbug-1321899-5.js'); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1321899.js b/deps/v8/test/mjsunit/regress/regress-crbug-1321899.js new file mode 100644 index 00000000000000..be1996ef3064e0 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1321899.js @@ -0,0 +1,63 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +class A { + constructor(arg) { + return arg; + } +} + +class B extends A { + #b = 1; // ACCESS_CHECK -> DATA + constructor(arg) { + super(arg); + } + static setField(obj) { + obj.#b = 'b'; // KeyedStoreIC + } + static getField(obj) { + return obj.#b; + } +} + +class C extends A { + #c; // DefineKeyedOwnIC: ACCESS_CHECK -> NOT_FOUND + constructor(arg) { + super(arg); + } + static setField(obj) { + obj.#c = 'c'; // KeyedStoreIC + } + static getField(obj) { + return obj.#c; + } +} + +let d = 0; +class D extends A { + get #d() { return d; } + set #d(val) { d = val;} + constructor(arg) { + super(arg); // KeyedStoreIC for private brand + } + static setAccessor(obj) { + obj.#d = 'd'; // KeyedLoadIC for private brand + } + static getAccessor(obj) { + return obj.#d; // KeyedLoadIC for private brand + } +} + +class E extends A { + #e() { return 0; } + constructor(arg) { + super(arg); // KeyedStoreIC for private brand + } + static setMethod(obj) { + obj.#e = 'e'; // KeyedLoadIC for private brand + } + static getMethod(obj) { + return obj.#e; // KeyedLoadIC for private brand + } +} diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt index f890e67970bdba..c4d4fe959e294a 100644 --- a/deps/v8/tools/whitespace.txt +++ b/deps/v8/tools/whitespace.txt @@ -15,4 +15,4 @@ Because whitespaces are not that funny...... Today's answer to life the universe and everything is 12950! Today's answer to life the universe and everything is 6728! Today's answer to life the universe and everything is 6728!! -. +.. \ No newline at end of file