From 6d2440fd2f54f45b60c8e4f97dcab55ff16bee68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Thu, 30 Jan 2025 16:35:04 +0100 Subject: [PATCH 01/18] deps: update V8 to 13.0.245.25 --- deps/v8/.clang-tidy | 12 +- deps/v8/.gitignore | 3 + deps/v8/AUTHORS | 3 + deps/v8/BUILD.bazel | 46 +- deps/v8/BUILD.gn | 210 ++- deps/v8/COMMON_OWNERS | 3 +- deps/v8/DEPS | 207 +-- deps/v8/WATCHLISTS | 1 - deps/v8/bazel/defs.bzl | 2 +- deps/v8/docs/OWNERS | 3 +- deps/v8/gni/v8.gni | 5 + deps/v8/include/OWNERS | 1 - deps/v8/include/cppgc/default-platform.h | 5 +- deps/v8/include/cppgc/heap-consistency.h | 2 +- deps/v8/include/cppgc/platform.h | 9 + deps/v8/include/v8-callbacks.h | 38 +- deps/v8/include/v8-context.h | 2 +- deps/v8/include/v8-internal.h | 264 ++-- deps/v8/include/v8-isolate.h | 3 +- deps/v8/include/v8-memory-span.h | 172 ++- deps/v8/include/v8-platform.h | 30 +- deps/v8/include/v8-script.h | 11 +- deps/v8/include/v8-template.h | 2 - deps/v8/include/v8-traced-handle.h | 33 +- deps/v8/include/v8-unwinder-state.h | 5 +- deps/v8/include/v8-version.h | 8 +- deps/v8/include/v8config.h | 34 +- deps/v8/infra/mb/gn_isolate_map.pyl | 4 + deps/v8/samples/OWNERS | 2 +- deps/v8/src/api/api-macros.h | 2 +- deps/v8/src/api/api-natives.cc | 4 +- deps/v8/src/api/api.cc | 45 +- deps/v8/src/asmjs/asm-parser.cc | 42 +- deps/v8/src/ast/scopes.h | 2 +- deps/v8/src/base/bit-field.h | 11 +- deps/v8/src/base/bounded-page-allocator.cc | 4 + deps/v8/src/base/bounded-page-allocator.h | 2 + deps/v8/src/base/build_config.h | 2 +- deps/v8/src/base/compiler-specific.h | 7 +- deps/v8/src/base/cpu.cc | 33 +- deps/v8/src/base/cpu.h | 14 +- deps/v8/src/base/debug/stack_trace.h | 2 +- deps/v8/src/base/ieee754.cc | 2 +- deps/v8/src/base/iterator.h | 16 + deps/v8/src/base/macros.h | 8 +- deps/v8/src/base/page-allocator.cc | 4 + deps/v8/src/base/page-allocator.h | 2 + deps/v8/src/base/platform/memory.h | 4 +- deps/v8/src/base/platform/platform-cygwin.cc | 15 +- deps/v8/src/base/platform/platform-fuchsia.cc | 3 + deps/v8/src/base/platform/platform-posix.cc | 45 +- deps/v8/src/base/platform/platform-win32.cc | 9 +- deps/v8/src/base/platform/platform.h | 8 + deps/v8/src/base/platform/time.cc | 5 +- deps/v8/src/base/sys-info.cc | 2 - .../virtual-address-space-page-allocator.cc | 4 + .../virtual-address-space-page-allocator.h | 2 + deps/v8/src/baseline/baseline-compiler.cc | 23 +- deps/v8/src/baseline/baseline-compiler.h | 2 +- deps/v8/src/builtins/DEPS | 4 + deps/v8/src/builtins/arm/builtins-arm.cc | 96 +- deps/v8/src/builtins/arm64/builtins-arm64.cc | 210 ++- deps/v8/src/builtins/array-from-async.tq | 14 +- deps/v8/src/builtins/builtins-array-gen.cc | 4 + deps/v8/src/builtins/builtins-array.cc | 2 +- .../builtins-async-disposable-stack.cc | 353 ++++- .../builtins/builtins-async-function-gen.cc | 13 +- deps/v8/src/builtins/builtins-async-gen.cc | 81 +- deps/v8/src/builtins/builtins-async-gen.h | 14 +- .../builtins/builtins-async-generator-gen.cc | 54 +- .../builtins/builtins-async-iterator-gen.cc | 6 +- deps/v8/src/builtins/builtins-bigint-gen.cc | 4 + deps/v8/src/builtins/builtins-call-gen.cc | 4 + .../src/builtins/builtins-collections-gen.cc | 4 + .../src/builtins/builtins-constructor-gen.cc | 10 +- .../src/builtins/builtins-conversion-gen.cc | 4 + deps/v8/src/builtins/builtins-date-gen.cc | 4 + deps/v8/src/builtins/builtins-definitions.h | 21 +- .../src/builtins/builtins-disposable-stack.cc | 2 + .../v8/src/builtins/builtins-generator-gen.cc | 4 + deps/v8/src/builtins/builtins-global-gen.cc | 4 + deps/v8/src/builtins/builtins-handler-gen.cc | 4 + deps/v8/src/builtins/builtins-inl.h | 23 + deps/v8/src/builtins/builtins-internal-gen.cc | 6 +- deps/v8/src/builtins/builtins-intl-gen.cc | 4 + deps/v8/src/builtins/builtins-iterator-gen.cc | 4 + deps/v8/src/builtins/builtins-lazy-gen.cc | 40 +- .../builtins/builtins-microtask-queue-gen.cc | 4 + deps/v8/src/builtins/builtins-number-gen.cc | 6 + deps/v8/src/builtins/builtins-number-tsa.cc | 47 + deps/v8/src/builtins/builtins-object-gen.cc | 51 +- deps/v8/src/builtins/builtins-proxy-gen.cc | 7 +- deps/v8/src/builtins/builtins-regexp-gen.cc | 4 + .../src/builtins/builtins-shadow-realm-gen.cc | 7 +- .../builtins-sharedarraybuffer-gen.cc | 4 + deps/v8/src/builtins/builtins-string-gen.cc | 4 + deps/v8/src/builtins/builtins-string-tsa.cc | 3 +- deps/v8/src/builtins/builtins-temporal-gen.cc | 4 + .../src/builtins/builtins-typed-array-gen.cc | 5 + deps/v8/src/builtins/builtins-utils-gen.h | 47 +- deps/v8/src/builtins/builtins-wasm-gen.cc | 30 +- deps/v8/src/builtins/builtins.cc | 1 + deps/v8/src/builtins/builtins.h | 26 + deps/v8/src/builtins/data-view-ops.h | 10 +- .../generate-bytecodes-builtins-list.cc | 6 +- .../src/builtins/growable-fixed-array-gen.cc | 4 + deps/v8/src/builtins/ia32/builtins-ia32.cc | 114 +- deps/v8/src/builtins/js-to-js.tq | 5 +- deps/v8/src/builtins/js-to-wasm.tq | 8 +- .../src/builtins/loong64/builtins-loong64.cc | 111 +- .../v8/src/builtins/mips64/builtins-mips64.cc | 60 +- .../builtins/number-builtins-reducer-inl.h | 64 + deps/v8/src/builtins/ppc/builtins-ppc.cc | 92 +- .../builtins/promise-abstract-operations.tq | 21 +- deps/v8/src/builtins/promise-all.tq | 5 +- deps/v8/src/builtins/promise-any.tq | 5 +- deps/v8/src/builtins/promise-finally.tq | 8 +- deps/v8/src/builtins/promise-misc.tq | 7 +- deps/v8/src/builtins/riscv/builtins-riscv.cc | 107 +- deps/v8/src/builtins/s390/builtins-s390.cc | 76 +- deps/v8/src/builtins/set-difference.tq | 3 +- deps/v8/src/builtins/set-intersection.tq | 3 +- deps/v8/src/builtins/set-is-disjoint-from.tq | 3 +- deps/v8/src/builtins/set-is-subset-of.tq | 3 +- deps/v8/src/builtins/set-is-superset-of.tq | 3 +- .../src/builtins/setup-builtins-internal.cc | 57 +- deps/v8/src/builtins/string-trim.tq | 2 +- deps/v8/src/builtins/wasm-to-js.tq | 37 +- deps/v8/src/builtins/wasm.tq | 19 +- deps/v8/src/builtins/x64/builtins-x64.cc | 178 ++- deps/v8/src/codegen/OWNERS | 3 +- deps/v8/src/codegen/arm/assembler-arm.cc | 9 +- .../v8/src/codegen/arm/macro-assembler-arm.cc | 2 +- deps/v8/src/codegen/arm/register-arm.h | 2 +- deps/v8/src/codegen/arm64/assembler-arm64.cc | 9 +- deps/v8/src/codegen/arm64/cpu-arm64.cc | 4 + .../codegen/arm64/macro-assembler-arm64-inl.h | 5 + .../codegen/arm64/macro-assembler-arm64.cc | 302 +++- .../src/codegen/arm64/macro-assembler-arm64.h | 48 +- deps/v8/src/codegen/arm64/register-arm64.h | 9 +- deps/v8/src/codegen/assembler-arch.h | 2 +- deps/v8/src/codegen/assembler-inl.h | 2 +- deps/v8/src/codegen/assembler.h | 10 +- deps/v8/src/codegen/code-desc.cc | 14 +- deps/v8/src/codegen/code-desc.h | 10 +- deps/v8/src/codegen/code-stub-assembler-inl.h | 5 + deps/v8/src/codegen/code-stub-assembler.cc | 157 +- deps/v8/src/codegen/code-stub-assembler.h | 156 +- deps/v8/src/codegen/compiler.cc | 197 +-- deps/v8/src/codegen/compiler.h | 3 +- deps/v8/src/codegen/constant-pool.cc | 4 +- deps/v8/src/codegen/constant-pool.h | 6 +- deps/v8/src/codegen/constants-arch.h | 2 +- deps/v8/src/codegen/cpu-features.h | 5 +- .../define-code-stub-assembler-macros.inc | 92 ++ deps/v8/src/codegen/external-reference.cc | 7 +- deps/v8/src/codegen/external-reference.h | 3 + deps/v8/src/codegen/heap-object-list.h | 23 - deps/v8/src/codegen/ia32/assembler-ia32.cc | 9 +- .../src/codegen/ia32/macro-assembler-ia32.cc | 2 +- deps/v8/src/codegen/ia32/register-ia32.h | 2 +- .../src/codegen/interface-descriptors-inl.h | 28 +- deps/v8/src/codegen/interface-descriptors.h | 15 + .../src/codegen/loong64/assembler-loong64.cc | 9 +- .../loong64/macro-assembler-loong64.cc | 79 +- .../codegen/loong64/macro-assembler-loong64.h | 6 + .../v8/src/codegen/loong64/register-loong64.h | 2 +- deps/v8/src/codegen/macro-assembler.h | 5 +- .../v8/src/codegen/mips64/assembler-mips64.cc | 11 +- .../codegen/mips64/macro-assembler-mips64.cc | 79 +- .../codegen/mips64/macro-assembler-mips64.h | 6 + deps/v8/src/codegen/mips64/register-mips64.h | 2 +- .../src/codegen/optimized-compilation-info.cc | 7 +- .../src/codegen/optimized-compilation-info.h | 43 +- deps/v8/src/codegen/ppc/assembler-ppc-inl.h | 41 - deps/v8/src/codegen/ppc/assembler-ppc.cc | 99 +- deps/v8/src/codegen/ppc/assembler-ppc.h | 37 - deps/v8/src/codegen/ppc/constants-ppc.cc | 4 +- deps/v8/src/codegen/ppc/constants-ppc.h | 16 +- deps/v8/src/codegen/ppc/cpu-ppc.cc | 40 +- .../ppc/interface-descriptors-ppc-inl.h | 4 +- .../v8/src/codegen/ppc/macro-assembler-ppc.cc | 466 +----- deps/v8/src/codegen/ppc/macro-assembler-ppc.h | 49 - deps/v8/src/codegen/ppc/register-ppc.h | 2 +- deps/v8/src/codegen/register-arch.h | 2 +- deps/v8/src/codegen/register-configuration.cc | 2 - deps/v8/src/codegen/reglist.h | 2 +- deps/v8/src/codegen/reloc-info-inl.h | 2 +- deps/v8/src/codegen/reloc-info.cc | 21 +- deps/v8/src/codegen/reloc-info.h | 10 - .../src/codegen/riscv/assembler-riscv-inl.h | 2 +- deps/v8/src/codegen/riscv/assembler-riscv.cc | 97 +- deps/v8/src/codegen/riscv/assembler-riscv.h | 118 +- .../codegen/riscv/macro-assembler-riscv.cc | 151 +- .../src/codegen/riscv/macro-assembler-riscv.h | 6 + deps/v8/src/codegen/riscv/register-riscv.h | 2 +- deps/v8/src/codegen/s390/assembler-s390.cc | 9 +- .../src/codegen/s390/macro-assembler-s390.cc | 7 +- .../src/codegen/s390/macro-assembler-s390.h | 1 + deps/v8/src/codegen/s390/register-s390.h | 2 +- .../macro-assembler-shared-ia32-x64.cc | 82 +- .../macro-assembler-shared-ia32-x64.h | 69 + deps/v8/src/codegen/source-position.h | 4 - .../turboshaft-builtins-assembler-inl.h | 281 +++- .../undef-code-stub-assembler-macros.inc | 32 + deps/v8/src/codegen/x64/assembler-x64.cc | 51 +- deps/v8/src/codegen/x64/assembler-x64.h | 20 +- .../x64/builtin-jump-table-info-x64.cc | 64 + .../codegen/x64/builtin-jump-table-info-x64.h | 72 + .../v8/src/codegen/x64/macro-assembler-x64.cc | 342 +++-- deps/v8/src/codegen/x64/macro-assembler-x64.h | 59 +- deps/v8/src/codegen/x64/register-x64.h | 10 +- deps/v8/src/common/code-memory-access-inl.h | 5 +- deps/v8/src/common/code-memory-access.cc | 3 +- deps/v8/src/common/code-memory-access.h | 7 + deps/v8/src/common/globals.h | 56 +- deps/v8/src/common/message-template.h | 3 +- deps/v8/src/common/ptr-compr-inl.h | 14 +- deps/v8/src/common/ptr-compr.h | 7 +- deps/v8/src/common/segmented-table-inl.h | 14 +- deps/v8/src/common/segmented-table.h | 34 +- .../lazy-compile-dispatcher.cc | 2 +- .../optimizing-compile-dispatcher.cc | 17 +- .../optimizing-compile-dispatcher.h | 5 +- deps/v8/src/compiler/OWNERS | 7 +- deps/v8/src/compiler/access-builder.cc | 51 +- deps/v8/src/compiler/access-builder.h | 6 +- deps/v8/src/compiler/backend/OWNERS | 1 + .../backend/arm/code-generator-arm.cc | 18 +- .../backend/arm/instruction-selector-arm.cc | 10 +- .../backend/arm64/code-generator-arm64.cc | 76 +- .../arm64/instruction-selector-arm64.cc | 9 +- .../v8/src/compiler/backend/code-generator.cc | 30 +- deps/v8/src/compiler/backend/code-generator.h | 4 + .../backend/ia32/code-generator-ia32.cc | 18 +- .../src/compiler/backend/instruction-codes.h | 2 +- .../compiler/backend/instruction-selector.cc | 48 +- .../compiler/backend/instruction-selector.h | 4 + .../backend/loong64/code-generator-loong64.cc | 34 +- .../loong64/instruction-selector-loong64.cc | 28 +- .../backend/mips64/code-generator-mips64.cc | 32 +- .../mips64/instruction-selector-mips64.cc | 40 +- .../backend/ppc/code-generator-ppc.cc | 271 +--- .../backend/ppc/instruction-selector-ppc.cc | 302 ++-- .../src/compiler/backend/register-allocator.h | 2 +- .../backend/riscv/code-generator-riscv.cc | 38 +- .../riscv/instruction-selector-riscv.h | 555 +++---- .../riscv/instruction-selector-riscv32.cc | 202 +-- .../riscv/instruction-selector-riscv64.cc | 514 ++++--- .../backend/s390/code-generator-s390.cc | 27 +- .../backend/s390/instruction-selector-s390.cc | 24 +- .../compiler/backend/unwinding-info-writer.h | 2 +- .../backend/x64/code-generator-x64.cc | 116 +- .../backend/x64/instruction-codes-x64.h | 1 + .../backend/x64/instruction-scheduler-x64.cc | 1 + .../backend/x64/instruction-selector-x64.cc | 51 +- deps/v8/src/compiler/bytecode-analysis.cc | 8 +- .../v8/src/compiler/bytecode-graph-builder.cc | 4 +- deps/v8/src/compiler/c-linkage.cc | 2 +- deps/v8/src/compiler/code-assembler.cc | 46 + deps/v8/src/compiler/code-assembler.h | 34 +- .../src/compiler/escape-analysis-reducer.cc | 11 - deps/v8/src/compiler/fast-api-calls.cc | 12 +- deps/v8/src/compiler/heap-refs.h | 5 +- deps/v8/src/compiler/js-call-reducer.cc | 33 +- deps/v8/src/compiler/js-call-reducer.h | 9 +- deps/v8/src/compiler/js-create-lowering.cc | 37 +- deps/v8/src/compiler/js-heap-broker.cc | 2 +- deps/v8/src/compiler/js-heap-broker.h | 6 - deps/v8/src/compiler/js-inlining-heuristic.h | 10 +- deps/v8/src/compiler/js-inlining.cc | 29 +- deps/v8/src/compiler/js-inlining.h | 19 + deps/v8/src/compiler/linkage.cc | 12 +- deps/v8/src/compiler/machine-graph.h | 4 +- deps/v8/src/compiler/machine-operator.h | 2 +- deps/v8/src/compiler/memory-lowering.cc | 2 +- deps/v8/src/compiler/node.h | 2 +- deps/v8/src/compiler/pipeline-data-inl.h | 35 +- deps/v8/src/compiler/pipeline.cc | 87 +- deps/v8/src/compiler/pipeline.h | 7 +- deps/v8/src/compiler/simplified-lowering.cc | 50 +- .../src/compiler/string-builder-optimizer.h | 2 +- .../src/compiler/turboshaft/access-builder.h | 7 + deps/v8/src/compiler/turboshaft/assembler.h | 124 +- .../compiler/turboshaft/build-graph-phase.cc | 11 +- .../compiler/turboshaft/builtin-compiler.cc | 52 + .../compiler/turboshaft/builtin-compiler.h | 56 + .../turboshaft/define-assembler-macros.inc | 10 + .../fast-api-call-lowering-reducer.h | 128 +- .../src/compiler/turboshaft/field-macro.inc | 9 + .../src/compiler/turboshaft/graph-builder.cc | 114 +- .../src/compiler/turboshaft/graph-builder.h | 6 +- deps/v8/src/compiler/turboshaft/graph.h | 3 + .../turboshaft/int64-lowering-reducer.h | 5 +- .../turboshaft/machine-lowering-phase.cc | 2 +- .../turboshaft/machine-lowering-reducer-inl.h | 2 +- .../turboshaft/machine-optimization-reducer.h | 16 +- .../turboshaft/maglev-graph-building-phase.cc | 23 +- .../turboshaft/memory-optimization-reducer.h | 6 +- deps/v8/src/compiler/turboshaft/operations.cc | 6 + deps/v8/src/compiler/turboshaft/operations.h | 81 +- deps/v8/src/compiler/turboshaft/opmasks.h | 8 +- deps/v8/src/compiler/turboshaft/phase.h | 19 +- deps/v8/src/compiler/turboshaft/pipelines.h | 14 + .../pretenuring-propagation-reducer.h | 6 +- .../compiler/turboshaft/recreate-schedule.cc | 7 +- .../turboshaft/runtime-call-descriptors.h | 19 + .../turboshaft/stack-check-lowering-reducer.h | 64 +- .../turboshaft/undef-assembler-macros.inc | 2 +- .../compiler/turboshaft/variable-reducer.h | 3 +- .../wasm-gc-typed-optimization-reducer.cc | 47 +- .../wasm-gc-typed-optimization-reducer.h | 2 + .../turboshaft/wasm-in-js-inlining-phase.cc | 26 + .../turboshaft/wasm-in-js-inlining-phase.h | 26 + .../wasm-in-js-inlining-reducer-inl.h | 1230 +++++++++++++++ .../compiler/turboshaft/wasm-revec-reducer.cc | 10 +- .../compiler/turboshaft/wasm-revec-reducer.h | 33 +- deps/v8/src/compiler/typer.cc | 52 +- deps/v8/src/compiler/wasm-compiler.cc | 90 +- deps/v8/src/compiler/wasm-compiler.h | 9 +- deps/v8/src/compiler/wasm-graph-assembler.cc | 28 +- deps/v8/src/compiler/wasm-graph-assembler.h | 7 +- deps/v8/src/compiler/wasm-inlining-into-js.cc | 4 +- deps/v8/src/d8/d8-platforms.cc | 10 +- deps/v8/src/d8/d8-test.cc | 20 +- deps/v8/src/d8/d8.cc | 230 ++- deps/v8/src/d8/d8.h | 8 +- deps/v8/src/debug/debug-evaluate.cc | 14 +- deps/v8/src/debug/debug.cc | 27 +- deps/v8/src/debug/debug.h | 2 + deps/v8/src/debug/liveedit.cc | 12 +- .../v8/src/deoptimizer/arm/deoptimizer-arm.cc | 5 + .../deoptimizer/arm64/deoptimizer-arm64.cc | 5 + deps/v8/src/deoptimizer/deoptimizer.cc | 80 +- deps/v8/src/deoptimizer/deoptimizer.h | 4 + .../src/deoptimizer/ia32/deoptimizer-ia32.cc | 5 + .../loong64/deoptimizer-loong64.cc | 5 + .../deoptimizer/mips64/deoptimizer-mips64.cc | 5 + .../v8/src/deoptimizer/ppc/deoptimizer-ppc.cc | 5 + .../deoptimizer/riscv/deoptimizer-riscv.cc | 5 + .../src/deoptimizer/s390/deoptimizer-s390.cc | 5 + deps/v8/src/deoptimizer/translated-state.cc | 25 +- .../v8/src/deoptimizer/x64/deoptimizer-x64.cc | 21 + deps/v8/src/diagnostics/disassembler.cc | 34 +- deps/v8/src/diagnostics/etw-jit-win.cc | 2 +- deps/v8/src/diagnostics/gdb-jit.cc | 3 - deps/v8/src/diagnostics/objects-debug.cc | 86 +- deps/v8/src/diagnostics/objects-printer.cc | 56 +- deps/v8/src/diagnostics/ppc/disasm-ppc.cc | 42 +- deps/v8/src/diagnostics/x64/disasm-x64.cc | 5 + deps/v8/src/execution/OWNERS | 1 + .../src/execution/arm/frame-constants-arm.h | 2 +- deps/v8/src/execution/arm/simulator-arm.h | 2 +- .../execution/arm64/frame-constants-arm64.h | 2 +- .../arm64/pointer-authentication-arm64.h | 3 +- .../v8/src/execution/arm64/simulator-arm64.cc | 20 +- deps/v8/src/execution/frame-constants.h | 42 +- deps/v8/src/execution/frames-inl.h | 5 + deps/v8/src/execution/frames.cc | 174 ++- deps/v8/src/execution/frames.h | 64 +- .../src/execution/ia32/frame-constants-ia32.h | 2 +- deps/v8/src/execution/isolate-data.h | 32 +- deps/v8/src/execution/isolate-utils-inl.h | 16 - deps/v8/src/execution/isolate.cc | 160 +- deps/v8/src/execution/isolate.h | 27 +- .../loong64/frame-constants-loong64.h | 2 +- .../execution/mips64/frame-constants-mips64.h | 2 +- .../execution/pointer-authentication-dummy.h | 3 +- .../src/execution/ppc/frame-constants-ppc.cc | 4 +- .../src/execution/ppc/frame-constants-ppc.h | 2 +- deps/v8/src/execution/ppc/simulator-ppc.cc | 51 - deps/v8/src/execution/ppc/simulator-ppc.h | 4 - .../execution/riscv/frame-constants-riscv.h | 2 +- .../src/execution/s390/frame-constants-s390.h | 2 +- deps/v8/src/execution/simulator-base.h | 1 - deps/v8/src/execution/simulator.h | 2 +- deps/v8/src/execution/tiering-manager.cc | 66 +- .../src/execution/x64/frame-constants-x64.h | 2 +- deps/v8/src/flags/flag-definitions.h | 108 +- deps/v8/src/flags/flags-impl.h | 2 +- deps/v8/src/flags/flags.cc | 4 +- deps/v8/src/handles/traced-handles-inl.h | 2 +- deps/v8/src/handles/traced-handles.cc | 6 +- deps/v8/src/handles/traced-handles.h | 2 +- deps/v8/src/heap/allocation-observer.cc | 2 + deps/v8/src/heap/array-buffer-sweeper.cc | 7 +- .../heap/base/incremental-marking-schedule.cc | 5 +- .../heap/base/incremental-marking-schedule.h | 4 +- deps/v8/src/heap/code-range.cc | 20 +- deps/v8/src/heap/code-range.h | 10 +- deps/v8/src/heap/collection-barrier.cc | 2 + deps/v8/src/heap/combined-heap.h | 7 - deps/v8/src/heap/cppgc-js/cpp-heap.cc | 14 +- .../cppgc-js/unified-heap-marking-state-inl.h | 3 +- .../cppgc-js/unified-heap-marking-verifier.cc | 13 +- .../cppgc-js/unified-heap-marking-visitor.cc | 3 - deps/v8/src/heap/cppgc/compactor.cc | 11 +- deps/v8/src/heap/cppgc/heap-base.h | 9 + deps/v8/src/heap/cppgc/heap-config.h | 6 - deps/v8/src/heap/cppgc/heap-page.cc | 4 - deps/v8/src/heap/cppgc/heap-page.h | 4 +- deps/v8/src/heap/cppgc/marker.cc | 17 +- deps/v8/src/heap/cppgc/memory.cc | 71 + deps/v8/src/heap/cppgc/memory.h | 94 +- deps/v8/src/heap/cppgc/object-start-bitmap.h | 2 +- deps/v8/src/heap/cppgc/stats-collector.h | 7 +- deps/v8/src/heap/cppgc/sweeper.cc | 56 +- deps/v8/src/heap/evacuation-verifier.cc | 2 +- deps/v8/src/heap/factory-base.cc | 57 +- deps/v8/src/heap/factory-base.h | 14 +- deps/v8/src/heap/factory.cc | 78 +- deps/v8/src/heap/factory.h | 27 +- deps/v8/src/heap/free-list.cc | 4 +- deps/v8/src/heap/free-list.h | 2 +- deps/v8/src/heap/gc-tracer.cc | 2 + deps/v8/src/heap/heap-allocator-inl.h | 87 +- deps/v8/src/heap/heap-allocator.h | 3 + deps/v8/src/heap/heap-controller.cc | 66 +- deps/v8/src/heap/heap-controller.h | 12 +- deps/v8/src/heap/heap-inl.h | 46 +- deps/v8/src/heap/heap-verifier.cc | 64 +- deps/v8/src/heap/heap-write-barrier-inl.h | 369 ++--- deps/v8/src/heap/heap-write-barrier.cc | 302 +++- deps/v8/src/heap/heap-write-barrier.h | 184 ++- deps/v8/src/heap/heap.cc | 667 +++------ deps/v8/src/heap/heap.h | 161 +- deps/v8/src/heap/incremental-marking-job.cc | 77 +- deps/v8/src/heap/incremental-marking-job.h | 23 +- deps/v8/src/heap/large-page-metadata-inl.h | 1 - deps/v8/src/heap/large-spaces.cc | 9 +- deps/v8/src/heap/local-heap-inl.h | 1 - deps/v8/src/heap/main-allocator-inl.h | 1 - deps/v8/src/heap/main-allocator.cc | 16 +- deps/v8/src/heap/main-allocator.h | 2 +- deps/v8/src/heap/mark-compact.cc | 307 ++-- deps/v8/src/heap/mark-sweep-utilities.cc | 3 +- deps/v8/src/heap/marking-barrier.cc | 23 +- deps/v8/src/heap/marking-inl.h | 20 + deps/v8/src/heap/marking-visitor-inl.h | 67 +- deps/v8/src/heap/marking.cc | 2 + deps/v8/src/heap/marking.h | 13 + deps/v8/src/heap/memory-allocator.cc | 2 + deps/v8/src/heap/memory-chunk-metadata-inl.h | 3 - deps/v8/src/heap/memory-chunk.h | 7 - deps/v8/src/heap/memory-measurement.cc | 5 +- deps/v8/src/heap/mutable-page-metadata-inl.h | 4 - deps/v8/src/heap/mutable-page-metadata.cc | 1 + deps/v8/src/heap/new-spaces.cc | 88 +- deps/v8/src/heap/new-spaces.h | 14 +- deps/v8/src/heap/objects-visiting.h | 1 - deps/v8/src/heap/page-metadata-inl.h | 3 - deps/v8/src/heap/paged-spaces-inl.h | 3 - deps/v8/src/heap/paged-spaces.cc | 55 +- deps/v8/src/heap/paged-spaces.h | 17 +- deps/v8/src/heap/read-only-heap.cc | 25 +- deps/v8/src/heap/read-only-promotion.cc | 81 +- deps/v8/src/heap/read-only-spaces.cc | 10 +- deps/v8/src/heap/scavenger.cc | 4 +- deps/v8/src/heap/setup-heap-internal.cc | 46 +- deps/v8/src/heap/third-party/heap-api-stub.cc | 75 - deps/v8/src/heap/third-party/heap-api.h | 66 - .../heap/traced-handles-marking-visitor.cc | 3 + .../src/heap/traced-handles-marking-visitor.h | 4 +- deps/v8/src/ic/OWNERS | 1 + deps/v8/src/ic/accessor-assembler.cc | 4 + deps/v8/src/ic/binary-op-assembler.cc | 4 + deps/v8/src/ic/handler-configuration.h | 2 +- deps/v8/src/ic/ic.cc | 67 +- deps/v8/src/ic/keyed-store-generic.cc | 4 + deps/v8/src/ic/unary-op-assembler.cc | 4 + deps/v8/src/init/bootstrapper.cc | 1334 +++++++++-------- deps/v8/src/init/heap-symbols.h | 2 + deps/v8/src/init/isolate-group.cc | 23 +- deps/v8/src/init/isolate-group.h | 6 +- .../inspector/v8-heap-profiler-agent-impl.cc | 1 - deps/v8/src/interpreter/DEPS | 17 + .../src/interpreter/bytecode-array-builder.cc | 2 +- .../src/interpreter/bytecode-array-builder.h | 2 +- deps/v8/src/interpreter/bytecode-generator.cc | 197 ++- deps/v8/src/interpreter/bytecode-node.h | 2 +- deps/v8/src/interpreter/bytecodes.cc | 28 +- deps/v8/src/interpreter/bytecodes.h | 28 +- .../src/interpreter/interpreter-assembler.cc | 10 +- .../interpreter/interpreter-generator-tsa.cc | 351 +++++ .../interpreter/interpreter-generator-tsa.h | 16 + .../src/interpreter/interpreter-generator.cc | 43 +- .../interpreter-intrinsics-generator.cc | 4 + deps/v8/src/json/json-stringifier.cc | 2 +- deps/v8/src/libplatform/default-platform.cc | 2 +- deps/v8/src/libplatform/default-platform.h | 2 +- .../src/libplatform/tracing/trace-writer.cc | 2 +- deps/v8/src/libsampler/sampler.cc | 7 +- deps/v8/src/logging/log.cc | 14 +- deps/v8/src/logging/runtime-call-stats.h | 1 + .../src/maglev/arm/maglev-assembler-arm-inl.h | 24 +- .../maglev/arm64/maglev-assembler-arm64-inl.h | 24 +- deps/v8/src/maglev/maglev-assembler-inl.h | 28 + deps/v8/src/maglev/maglev-assembler.cc | 36 +- deps/v8/src/maglev/maglev-assembler.h | 43 +- deps/v8/src/maglev/maglev-code-generator.cc | 23 +- deps/v8/src/maglev/maglev-code-generator.h | 1 + deps/v8/src/maglev/maglev-graph-builder.cc | 204 +-- deps/v8/src/maglev/maglev-graph-builder.h | 29 +- deps/v8/src/maglev/maglev-graph-printer.cc | 20 + deps/v8/src/maglev/maglev-graph-verifier.h | 3 +- .../maglev/maglev-interpreter-frame-state.cc | 13 +- deps/v8/src/maglev/maglev-ir-inl.h | 6 +- deps/v8/src/maglev/maglev-ir.cc | 113 +- deps/v8/src/maglev/maglev-ir.h | 9 +- .../maglev-phi-representation-selector.cc | 11 +- .../maglev/s390/maglev-assembler-s390-inl.h | 41 +- deps/v8/src/maglev/s390/maglev-ir-s390.cc | 6 + .../src/maglev/x64/maglev-assembler-x64-inl.h | 22 +- .../v8/src/maglev/x64/maglev-assembler-x64.cc | 4 +- deps/v8/src/numbers/conversions.cc | 221 +-- deps/v8/src/numbers/conversions.h | 5 + deps/v8/src/objects/bytecode-array.cc | 6 +- deps/v8/src/objects/code-inl.h | 42 +- deps/v8/src/objects/code.cc | 7 +- deps/v8/src/objects/code.h | 20 +- deps/v8/src/objects/contexts.h | 3 + deps/v8/src/objects/deoptimization-data-inl.h | 2 +- deps/v8/src/objects/elements.cc | 4 +- deps/v8/src/objects/elements.h | 2 +- deps/v8/src/objects/feedback-cell-inl.h | 10 +- deps/v8/src/objects/feedback-cell.h | 7 +- deps/v8/src/objects/feedback-vector-inl.h | 19 + deps/v8/src/objects/feedback-vector.cc | 3 +- deps/v8/src/objects/feedback-vector.h | 7 +- deps/v8/src/objects/fixed-array-inl.h | 30 +- deps/v8/src/objects/fixed-array.h | 49 +- deps/v8/src/objects/fixed-array.tq | 5 - deps/v8/src/objects/hash-table.h | 5 - deps/v8/src/objects/heap-object.h | 52 +- deps/v8/src/objects/instruction-stream.cc | 6 +- deps/v8/src/objects/intl-objects.cc | 2 +- deps/v8/src/objects/js-array-buffer-inl.h | 2 +- deps/v8/src/objects/js-break-iterator.cc | 90 +- deps/v8/src/objects/js-collection.h | 8 +- deps/v8/src/objects/js-date-time-format.h | 34 +- deps/v8/src/objects/js-display-names-inl.h | 6 +- deps/v8/src/objects/js-display-names.h | 14 +- deps/v8/src/objects/js-disposable-stack-inl.h | 66 +- deps/v8/src/objects/js-disposable-stack.cc | 231 ++- deps/v8/src/objects/js-disposable-stack.h | 19 +- deps/v8/src/objects/js-disposable-stack.tq | 5 +- deps/v8/src/objects/js-duration-format-inl.h | 2 +- deps/v8/src/objects/js-function-inl.h | 136 +- deps/v8/src/objects/js-function.cc | 37 +- deps/v8/src/objects/js-function.h | 32 +- deps/v8/src/objects/js-function.tq | 2 +- deps/v8/src/objects/js-list-format-inl.h | 4 +- deps/v8/src/objects/js-list-format.h | 12 +- deps/v8/src/objects/js-objects-inl.h | 23 +- deps/v8/src/objects/js-plural-rules-inl.h | 2 +- deps/v8/src/objects/js-plural-rules.h | 4 +- deps/v8/src/objects/js-promise-inl.h | 21 + deps/v8/src/objects/js-promise.h | 10 +- deps/v8/src/objects/js-promise.tq | 2 +- deps/v8/src/objects/js-proxy.h | 2 +- .../src/objects/js-relative-time-format-inl.h | 2 +- deps/v8/src/objects/js-relative-time-format.h | 5 +- deps/v8/src/objects/js-segment-iterator-inl.h | 2 +- deps/v8/src/objects/js-segment-iterator.h | 6 +- deps/v8/src/objects/js-segmenter-inl.h | 2 +- deps/v8/src/objects/js-segmenter.h | 6 +- deps/v8/src/objects/js-segments-inl.h | 2 +- deps/v8/src/objects/js-segments.h | 6 +- deps/v8/src/objects/literal-objects.cc | 120 +- deps/v8/src/objects/literal-objects.h | 2 +- deps/v8/src/objects/map-inl.h | 19 +- deps/v8/src/objects/map.cc | 18 +- deps/v8/src/objects/map.h | 11 +- deps/v8/src/objects/module-inl.h | 2 +- deps/v8/src/objects/module.cc | 22 +- deps/v8/src/objects/module.h | 6 +- deps/v8/src/objects/object-list-macros.h | 2 - deps/v8/src/objects/object-macros.h | 108 +- .../objects/objects-body-descriptors-inl.h | 47 +- deps/v8/src/objects/objects-definitions.h | 15 +- deps/v8/src/objects/objects-inl.h | 63 +- deps/v8/src/objects/objects.cc | 17 +- deps/v8/src/objects/objects.h | 2 +- deps/v8/src/objects/scope-info.h | 4 +- .../v8/src/objects/shared-function-info-inl.h | 10 +- deps/v8/src/objects/shared-function-info.h | 13 +- deps/v8/src/objects/shared-function-info.tq | 9 +- deps/v8/src/objects/source-text-module.cc | 151 +- deps/v8/src/objects/source-text-module.h | 3 +- deps/v8/src/objects/string.cc | 1 - deps/v8/src/objects/tagged-field-inl.h | 3 +- deps/v8/src/objects/tagged-field.h | 2 + deps/v8/src/objects/tagged-impl.h | 11 + deps/v8/src/objects/transitions.h | 6 - deps/v8/src/objects/value-serializer.cc | 2 +- deps/v8/src/objects/visitors.h | 2 +- deps/v8/src/parsing/parser-base.h | 8 + deps/v8/src/parsing/parser.cc | 25 +- deps/v8/src/parsing/parser.h | 2 + deps/v8/src/parsing/preparser.h | 4 + deps/v8/src/parsing/rewriter.cc | 19 +- .../src/parsing/scanner-character-streams.cc | 4 +- deps/v8/src/parsing/scanner.cc | 2 +- deps/v8/src/profiler/cpu-profiler.h | 2 +- .../src/profiler/heap-snapshot-generator.cc | 25 + deps/v8/src/profiler/profile-generator.h | 2 +- deps/v8/src/profiler/tick-sample.cc | 2 +- .../regexp/arm/regexp-macro-assembler-arm.cc | 13 + .../regexp/arm/regexp-macro-assembler-arm.h | 3 + .../arm64/regexp-macro-assembler-arm64.cc | 132 +- .../arm64/regexp-macro-assembler-arm64.h | 6 +- .../ia32/regexp-macro-assembler-ia32.cc | 13 + .../regexp/ia32/regexp-macro-assembler-ia32.h | 3 + .../loong64/regexp-macro-assembler-loong64.cc | 13 + .../loong64/regexp-macro-assembler-loong64.h | 3 + .../mips64/regexp-macro-assembler-mips64.cc | 13 + .../mips64/regexp-macro-assembler-mips64.h | 3 + .../regexp/ppc/regexp-macro-assembler-ppc.cc | 17 +- .../regexp/ppc/regexp-macro-assembler-ppc.h | 3 + .../src/regexp/regexp-bytecode-generator.cc | 24 +- .../v8/src/regexp/regexp-bytecode-generator.h | 4 + deps/v8/src/regexp/regexp-compiler-tonode.cc | 33 +- deps/v8/src/regexp/regexp-compiler.cc | 273 ++-- deps/v8/src/regexp/regexp-compiler.h | 6 +- deps/v8/src/regexp/regexp-dotprinter.cc | 1 + deps/v8/src/regexp/regexp-interpreter.cc | 2 +- .../src/regexp/regexp-macro-assembler-arch.h | 2 +- .../regexp/regexp-macro-assembler-tracer.cc | 33 + .../regexp/regexp-macro-assembler-tracer.h | 3 + deps/v8/src/regexp/regexp-macro-assembler.h | 5 + deps/v8/src/regexp/regexp-nodes.h | 57 +- deps/v8/src/regexp/regexp-parser.cc | 13 +- deps/v8/src/regexp/regexp.cc | 6 +- .../riscv/regexp-macro-assembler-riscv.cc | 13 + .../riscv/regexp-macro-assembler-riscv.h | 3 + .../s390/regexp-macro-assembler-s390.cc | 13 + .../regexp/s390/regexp-macro-assembler-s390.h | 3 + .../regexp/x64/regexp-macro-assembler-x64.cc | 139 +- .../regexp/x64/regexp-macro-assembler-x64.h | 13 + deps/v8/src/roots/OWNERS | 8 +- deps/v8/src/roots/roots.h | 48 +- deps/v8/src/roots/static-roots.h | 1072 +++++++------ deps/v8/src/runtime/runtime-atomics.cc | 6 +- deps/v8/src/runtime/runtime-compiler.cc | 56 +- deps/v8/src/runtime/runtime-internal.cc | 39 +- deps/v8/src/runtime/runtime-scopes.cc | 22 +- deps/v8/src/runtime/runtime-strings.cc | 42 +- deps/v8/src/runtime/runtime-test-wasm.cc | 122 +- deps/v8/src/runtime/runtime-test.cc | 14 +- deps/v8/src/runtime/runtime-utils.cc | 38 + deps/v8/src/runtime/runtime-utils.h | 14 + deps/v8/src/runtime/runtime-wasm.cc | 164 +- deps/v8/src/runtime/runtime.cc | 2 + deps/v8/src/runtime/runtime.h | 8 +- deps/v8/src/sandbox/code-pointer-table.h | 4 +- .../compactible-external-entity-table.h | 2 + .../src/sandbox/external-entity-table-inl.h | 9 +- deps/v8/src/sandbox/external-entity-table.h | 14 +- .../src/sandbox/external-pointer-table-inl.h | 2 + deps/v8/src/sandbox/external-pointer-table.h | 3 +- deps/v8/src/sandbox/isolate-inl.h | 1 + deps/v8/src/sandbox/js-dispatch-table-inl.h | 121 +- deps/v8/src/sandbox/js-dispatch-table.cc | 79 +- deps/v8/src/sandbox/js-dispatch-table.h | 91 +- deps/v8/src/sandbox/sandbox.cc | 2 +- deps/v8/src/sandbox/testing.cc | 118 +- deps/v8/src/sandbox/testing.h | 6 + deps/v8/src/sandbox/trusted-pointer-table.h | 4 +- deps/v8/src/snapshot/OWNERS | 1 + deps/v8/src/snapshot/code-serializer.cc | 11 +- deps/v8/src/snapshot/context-serializer.cc | 2 +- deps/v8/src/snapshot/deserializer.cc | 365 ++++- deps/v8/src/snapshot/deserializer.h | 30 +- deps/v8/src/snapshot/mksnapshot.cc | 17 +- .../read-only-serializer-deserializer.h | 15 +- deps/v8/src/snapshot/read-only-serializer.cc | 10 +- .../v8/src/snapshot/serializer-deserializer.h | 40 +- deps/v8/src/snapshot/serializer-inl.h | 2 +- deps/v8/src/snapshot/serializer.cc | 67 +- deps/v8/src/snapshot/serializer.h | 2 +- deps/v8/src/snapshot/snapshot.cc | 10 +- deps/v8/src/strings/unicode.h | 14 - deps/v8/src/torque/constants.h | 2 +- deps/v8/src/torque/ls/json-parser.cc | 2 +- deps/v8/src/trap-handler/trap-handler.h | 3 +- .../baseline/arm/liftoff-assembler-arm-inl.h | 44 +- .../arm64/liftoff-assembler-arm64-inl.h | 87 +- .../ia32/liftoff-assembler-ia32-inl.h | 16 +- .../wasm/baseline/liftoff-assembler-defs.h | 2 +- .../src/wasm/baseline/liftoff-assembler-inl.h | 2 +- .../v8/src/wasm/baseline/liftoff-assembler.cc | 18 +- deps/v8/src/wasm/baseline/liftoff-assembler.h | 18 +- deps/v8/src/wasm/baseline/liftoff-compiler.cc | 55 +- .../loong64/liftoff-assembler-loong64-inl.h | 22 +- .../mips64/liftoff-assembler-mips64-inl.h | 22 +- deps/v8/src/wasm/baseline/parallel-move.cc | 23 +- .../baseline/ppc/liftoff-assembler-ppc-inl.h | 12 +- .../riscv/liftoff-assembler-riscv-inl.h | 8 +- .../riscv/liftoff-assembler-riscv32-inl.h | 5 +- .../riscv/liftoff-assembler-riscv64-inl.h | 5 +- .../s390/liftoff-assembler-s390-inl.h | 12 +- .../baseline/x64/liftoff-assembler-x64-inl.h | 128 +- deps/v8/src/wasm/canonical-types.cc | 47 + deps/v8/src/wasm/canonical-types.h | 9 + deps/v8/src/wasm/decoder.h | 38 +- deps/v8/src/wasm/function-body-decoder-impl.h | 46 +- deps/v8/src/wasm/function-body-decoder.cc | 2 +- deps/v8/src/wasm/function-compiler.cc | 6 + deps/v8/src/wasm/graph-builder-interface.cc | 28 +- .../arm64/interpreter-builtins-arm64.cc | 8 +- .../interpreter/wasm-interpreter-runtime.cc | 11 +- .../src/wasm/interpreter/wasm-interpreter.cc | 2 +- .../x64/interpreter-builtins-x64.cc | 2 +- deps/v8/src/wasm/jump-table-assembler.cc | 4 +- deps/v8/src/wasm/memory-tracing.cc | 56 - deps/v8/src/wasm/memory-tracing.h | 8 - deps/v8/src/wasm/module-compiler.cc | 154 +- deps/v8/src/wasm/module-compiler.h | 9 +- deps/v8/src/wasm/module-instantiate.cc | 171 ++- deps/v8/src/wasm/object-access.h | 5 - deps/v8/src/wasm/stacks.cc | 94 +- deps/v8/src/wasm/stacks.h | 83 +- .../v8/src/wasm/turboshaft-graph-interface.cc | 390 ++--- deps/v8/src/wasm/turboshaft-graph-interface.h | 14 +- deps/v8/src/wasm/wasm-builtin-list.h | 3 +- deps/v8/src/wasm/wasm-code-manager.cc | 221 ++- deps/v8/src/wasm/wasm-code-manager.h | 47 +- .../v8/src/wasm/wasm-code-pointer-table-inl.h | 178 +++ deps/v8/src/wasm/wasm-code-pointer-table.cc | 126 ++ deps/v8/src/wasm/wasm-code-pointer-table.h | 148 ++ deps/v8/src/wasm/wasm-debug.cc | 15 + deps/v8/src/wasm/wasm-debug.h | 1 + deps/v8/src/wasm/wasm-disassembler.cc | 5 + deps/v8/src/wasm/wasm-engine.cc | 120 +- deps/v8/src/wasm/wasm-engine.h | 14 +- deps/v8/src/wasm/wasm-external-refs.cc | 104 +- deps/v8/src/wasm/wasm-external-refs.h | 10 +- deps/v8/src/wasm/wasm-feature-flags.h | 19 +- deps/v8/src/wasm/wasm-features.cc | 1 - deps/v8/src/wasm/wasm-import-wrapper-cache.cc | 187 ++- deps/v8/src/wasm/wasm-import-wrapper-cache.h | 43 +- deps/v8/src/wasm/wasm-js.cc | 32 +- deps/v8/src/wasm/wasm-linkage.h | 13 +- deps/v8/src/wasm/wasm-module.cc | 6 +- deps/v8/src/wasm/wasm-module.h | 7 + deps/v8/src/wasm/wasm-objects-inl.h | 7 + deps/v8/src/wasm/wasm-objects.cc | 365 +++-- deps/v8/src/wasm/wasm-objects.h | 73 +- deps/v8/src/wasm/wasm-objects.tq | 4 - deps/v8/src/wasm/wasm-serialization.cc | 38 +- deps/v8/src/wasm/wasm-subtyping.cc | 2 +- deps/v8/src/wasm/wrappers.cc | 316 ++-- deps/v8/test/benchmarks/benchmarks.status | 2 + deps/v8/test/cctest/BUILD.gn | 13 +- deps/v8/test/cctest/cctest-utils.h | 3 - deps/v8/test/cctest/cctest.cc | 6 +- deps/v8/test/cctest/cctest.h | 4 +- deps/v8/test/cctest/cctest.status | 379 +---- .../test/cctest/compiler/function-tester.cc | 2 +- .../cctest/compiler/test-code-generator.cc | 6 +- .../test-concurrent-shared-function-info.cc | 2 +- .../cctest/compiler/test-run-load-store.cc | 2 +- .../test/cctest/compiler/test-run-machops.cc | 4 +- .../cctest/compiler/test-select-combine.cc | 2 +- deps/v8/test/cctest/heap/test-compaction.cc | 4 +- deps/v8/test/cctest/heap/test-heap.cc | 25 +- .../cctest/heap/test-incremental-marking.cc | 2 +- .../cctest/heap/test-memory-measurement.cc | 2 +- deps/v8/test/cctest/heap/test-spaces.cc | 3 +- .../test/cctest/heap/test-weak-references.cc | 1 - .../v8/test/cctest/heap/test-write-barrier.cc | 6 +- .../v8/test/cctest/test-accessor-assembler.cc | 4 + deps/v8/test/cctest/test-api.cc | 14 +- .../test/cctest/test-code-stub-assembler.cc | 19 +- deps/v8/test/cctest/test-constantpool.cc | 4 +- deps/v8/test/cctest/test-cpu-profiler.cc | 4 +- deps/v8/test/cctest/test-descriptor-array.cc | 4 + deps/v8/test/cctest/test-heap-profiler.cc | 1 - deps/v8/test/cctest/test-icache.cc | 2 +- .../test-ignition-statistics-extension.cc | 4 +- .../cctest/test-inobject-slack-tracking.cc | 42 +- deps/v8/test/cctest/test-js-to-wasm.cc | 20 - deps/v8/test/cctest/test-profile-generator.cc | 2 +- deps/v8/test/cctest/test-shared-strings.cc | 2 +- deps/v8/test/cctest/test-strings.cc | 16 +- .../cctest/test-swiss-name-dictionary-csa.cc | 4 + deps/v8/test/cctest/torque/test-torque.cc | 4 + .../v8/test/cctest/wasm/test-run-wasm-simd.cc | 4 +- .../cctest/wasm/test-run-wasm-wrappers.cc | 82 +- .../cctest/wasm/test-streaming-compilation.cc | 2 +- deps/v8/test/cctest/wasm/test-wasm-codegen.cc | 30 +- .../wasm/test-wasm-import-wrapper-cache.cc | 89 +- deps/v8/test/cctest/wasm/test-wasm-metrics.cc | 2 +- deps/v8/test/cctest/wasm/wasm-run-utils.cc | 17 +- deps/v8/test/cctest/wasm/wasm-simd-utils.cc | 6 +- deps/v8/test/cctest/wasm/wasm-simd-utils.h | 16 +- .../debug-evaluate-no-side-effect-builtins.js | 5 + ...g-evaluate-no-side-effect-runtime-check.js | 62 + deps/v8/test/debugger/debugger.status | 12 - .../debugger/regress/regress-374013413.js | 15 - deps/v8/test/fuzzer/fuzzer.status | 10 - deps/v8/test/fuzzer/wasm-deopt.cc | 5 +- deps/v8/test/fuzzer/wasm-fuzzer-common.cc | 6 +- deps/v8/test/fuzzer/wasm-fuzzer-common.h | 4 +- deps/v8/test/fuzzer/wasm-init-expr.cc | 2 +- deps/v8/test/fuzzilli/libreprl.c | 285 +++- deps/v8/test/fuzzilli/libreprl.h | 50 +- deps/v8/test/fuzzilli/main.cc | 25 +- .../debugger/wasm-stepping-expected.txt | 112 +- ...tepping-with-blackboxed-range-expected.txt | 106 ++ .../wasm-stepping-with-blackboxed-range.js | 123 ++ .../test/inspector/debugger/wasm-stepping.js | 79 +- deps/v8/test/inspector/inspector.status | 320 ---- .../regress-crbug-1469092-expected.txt | 2 +- .../regress/regress-crbug-1469092.js | 2 +- .../CanonicalizeLocaleListTakeLocale.js | 6 +- .../modules-import-source-wasm-not-found.mjs | 7 + .../modules-import-source-wasm-not-found.out | 5 + deps/v8/test/message/message.status | 7 +- .../message/wasm-in-js-inlining-turboshaft.js | 288 ++++ .../wasm-in-js-inlining-turboshaft.out | 329 ++++ .../v8/test/message/wasm-recognize-imports.js | 1 + .../message/wasm-trace-memory-liftoff.out | 28 +- deps/v8/test/message/wasm-trace-memory.out | 28 +- .../message/wasm-trace-memory64-liftoff.out | 56 +- deps/v8/test/message/wasm-trace-memory64.out | 56 +- deps/v8/test/mjsunit/clone-ic-regressions.js | 14 + .../test/mjsunit/compiler/fast-api-calls.js | 7 +- .../mjsunit/compiler/regress-crbug-1502042.js | 2 +- deps/v8/test/mjsunit/date-parse.js | 2 +- .../decorators/auto-accessors-reparsing.js | 19 + .../mjsunit/es6/unicode-regexp-ignore-case.js | 9 + deps/v8/test/mjsunit/es9/regexp-lookbehind.js | 261 ++-- ...ync-disposable-stack-dispose-tick-count.js | 41 + ...le-stack-with-null-undefined-tick-count.js | 34 + ...unt-ticks.js => await-using-tick-count.js} | 0 ...th-sync-async-null-undefined-tick-count.js | 39 + ...ing-with-sync-null-undefined-tick-count.js | 33 + .../modules-async-error-due-to-import.js | 6 + ...-skip-async-errors-due-to-import-entry.mjs | 7 + .../modules-skip-async-leaf-with-tla.mjs | 5 + .../modules-skip-async-mid-with-sync-error.js | 7 + .../mjsunit/harmony/promise-withresolvers.js | 2 +- .../v8/test/mjsunit/harmony/set-difference.js | 23 + .../test/mjsunit/harmony/set-intersection.js | 21 + .../mjsunit/harmony/set-is-disjoint-from.js | 18 + .../test/mjsunit/harmony/set-is-subset-of.js | 18 + .../mjsunit/harmony/set-is-superset-of.js | 18 + deps/v8/test/mjsunit/maglev/literals.js | 4 - .../test/mjsunit/maglev/loop-phi-shrinking.js | 10 +- .../maglev/phi-untagging-conversions.js | 8 +- .../test/mjsunit/maglev/regress-359702854.js | 26 + .../test/mjsunit/maglev/regress-360182480.js | 33 + .../test/mjsunit/maglev/regress-360234501.js | 18 + .../test/mjsunit/maglev/regress-360952235.js | 24 + .../test/mjsunit/maglev/regress-363051811.js | 16 + .../test/mjsunit/maglev/regress-363783495.js | 34 + .../test/mjsunit/maglev/regress-382190919.js | 39 + .../maglev/regress/regress-crbug-1416795.js | 2 +- .../maglev/regress/regress-crbug-1428524.js | 6 +- deps/v8/test/mjsunit/mjsunit.status | 169 +-- .../test/mjsunit/number-tostring-subnormal.js | 2 +- deps/v8/test/mjsunit/regexp-lookahead.js | 4 + .../v8/test/mjsunit/regress/regress-137768.js | 2 +- .../test/mjsunit/regress/regress-364904763.js | 8 + deps/v8/test/mjsunit/regress/regress-687.js | 2 +- .../regress/regress-crbug-364422411.js | 10 + .../mjsunit/regress/regress-crbug-626715.js | 2 +- .../mjsunit/regress/regress-crbug-935932.js | 2 +- .../regress/regress-refreeze-same-map.js | 2 +- .../mjsunit/regress/wasm/regress-10831.js | 2 +- .../mjsunit/regress/wasm/regress-324690505.js | 1 + .../mjsunit/regress/wasm/regress-326260438.js | 2 +- .../mjsunit/regress/wasm/regress-342522151.js | 11 + .../mjsunit/regress/wasm/regress-346197738.js | 42 + .../mjsunit/regress/wasm/regress-346597059.js | 30 + .../mjsunit/regress/wasm/regress-349640002.js | 33 + .../mjsunit/regress/wasm/regress-358393368.js | 28 + .../mjsunit/regress/wasm/regress-360052650.js | 48 + .../mjsunit/regress/wasm/regress-360700873.js | 52 + .../mjsunit/regress/wasm/regress-361369297.js | 26 + .../mjsunit/regress/wasm/regress-361611472.js | 52 + .../mjsunit/regress/wasm/regress-363072477.js | 22 + .../mjsunit/regress/wasm/regress-363826217.js | 61 + .../mjsunit/regress/wasm/regress-364312793.js | 39 + .../mjsunit/regress/wasm/regress-364360260.js | 53 + .../mjsunit/regress/wasm/regress-364667545.js | 28 + .../mjsunit/regress/wasm/regress-366635354.js | 32 + .../mjsunit/regress/wasm/regress-378779897.js | 22 + .../mjsunit/sandbox/memory-corruption-api.js | 4 +- .../sandbox/regress/regress-334120897.js | 15 +- .../sandbox/regress/regress-342297062.js | 40 + .../sandbox/regress/regress-349502157.js | 5 +- .../regress/regress-crbug-335810507.js | 6 +- .../sandbox/regress/regress-crbug-40926051.js | 3 +- .../wasm-imports-concurrent-mutation.js | 16 +- .../mjsunit/sandbox/wasm-inlining-sigcheck.js | 7 +- .../sandbox/wasm-manipulated-instance.js | 15 +- .../sandbox/wasm-signature-verification.js | 15 +- .../test/mjsunit/sandbox/wasm-table-import.js | 3 +- .../mjsunit/sandbox/wasm-table-sigcheck.js | 5 +- .../sandbox/wasm-table-wasmjsfunction.js | 5 +- .../extend-property-backing-store-1.js | 2 +- .../turboshaft/maglev-frontend/raw-abs.js | 2 +- .../maglev-frontend/regress-355016861.js | 2 +- .../mjsunit/wasm/compare-exchange-stress.js | 2 +- .../mjsunit/wasm/compare-exchange64-stress.js | 2 +- .../mjsunit/wasm/esm/module-import-source.mjs | 19 + deps/v8/test/mjsunit/wasm/growable-stacks.js | 227 +++ .../wasm/imported-strings-constants.js | 4 +- .../mjsunit/wasm/imported-strings-invalid.js | 23 +- .../wasm/imported-strings-streaming.js | 6 +- .../mjsunit/wasm/imported-strings-utf8.js | 381 +++++ deps/v8/test/mjsunit/wasm/imported-strings.js | 318 +--- .../wasm/inlining-mutable-instance-fields.js | 89 ++ deps/v8/test/mjsunit/wasm/loop-rotation.js | 2 +- deps/v8/test/mjsunit/wasm/simd-fp16.js | 32 + deps/v8/test/mjsunit/wasm/stack-switching.js | 1 + deps/v8/test/mjsunit/wasm/turboshaft/basic.js | 11 - .../test/mjsunit/wasm/wasm-module-builder.js | 40 + deps/v8/test/test262/PRESUBMIT.py | 8 +- .../decorators/private-auto-accessor.js | 106 -- ...using-in-async-function-call-with-await.js | 33 - ...ng-in-async-function-call-without-await.js | 32 - .../await-using-in-block.js | 32 - .../await-using-in-for-in-statement.js | 26 - .../await-using-in-for-of-statement.js | 27 - .../await-using-in-for-statement.js | 26 - .../await-using-in-switch-case-block.js | 28 - ...wait-using-with-no-async-dispose-method.js | 23 - .../await-using-with-sync-dispose-method.js | 32 - .../mixed-call-dispose-methods.js | 32 - .../local-tests/test/staging/features.txt | 6 - deps/v8/test/test262/test262.status | 46 +- deps/v8/test/test262/testcfg.py | 1 + deps/v8/test/unittests/BUILD.gn | 3 +- .../test/unittests/api/api-wasm-unittest.cc | 34 +- .../api/resource-constraints-unittest.cc | 12 +- .../assembler/assembler-x64-unittest.cc | 31 + .../assembler/macro-assembler-x64-unittest.cc | 97 ++ .../test/unittests/base/ieee754-unittest.cc | 20 +- .../unittests/base/platform/time-unittest.cc | 2 - .../unittests/codegen/code-layout-unittest.cc | 4 + .../compiler-dispatcher-unittest.cc | 2 +- .../instruction-selector-arm64-unittest.cc | 4 +- ...aft-instruction-selector-arm64-unittest.cc | 4 +- .../unittests/compiler/compiler-unittest.cc | 9 +- .../unittests/compiler/function-tester.cc | 6 +- .../instruction-selector-ia32-unittest.cc | 2 +- .../run-bytecode-graph-builder-unittest.cc | 2 +- .../loop-unrolling-analyzer-unittest.cc | 6 +- .../compiler/turboshaft/opmask-unittest.cc | 5 +- .../x64/instruction-selector-x64-unittest.cc | 2 +- ...shaft-instruction-selector-x64-unittest.cc | 2 +- .../heap/cppgc-js/unified-heap-unittest.cc | 2 - .../heap/cppgc/concurrent-sweeper-unittest.cc | 3 +- .../heap/cppgc/gc-invoker-unittest.cc | 3 +- .../unittests/heap/cppgc/test-platform.cc | 3 +- .../test/unittests/heap/gc-tracer-unittest.cc | 19 +- .../heap/heap-controller-unittest.cc | 44 +- deps/v8/test/unittests/heap/heap-unittest.cc | 81 +- .../heap/inner-pointer-resolution-unittest.cc | 23 - .../test/unittests/heap/iterators-unittest.cc | 3 +- deps/v8/test/unittests/heap/pool-unittest.cc | 1 - .../unittests/heap/shared-heap-unittest.cc | 46 + .../v8/test/unittests/heap/spaces-unittest.cc | 3 +- .../bytecode-array-builder-unittest.cc | 2 +- .../AsyncGenerators.golden | 48 +- .../bytecode_expectations/AsyncModules.golden | 232 +-- .../bytecode_expectations/Modules.golden | 319 ++-- .../PrivateAccessorAccess.golden | 8 +- .../PrivateMethodAccess.golden | 4 +- .../StaticPrivateMethodAccess.golden | 30 +- .../interpreter/bytecodes-unittest.cc | 20 +- .../interpreter-assembler-unittest.cc | 2 +- .../interpreter/interpreter-tester.h | 2 +- .../libplatform/default-platform-unittest.cc | 10 +- .../v8/test/unittests/logging/log-unittest.cc | 9 +- .../unittests/objects/weakmaps-unittest.cc | 7 +- .../unittests/objects/weaksets-unittest.cc | 8 +- .../test/unittests/parser/parsing-unittest.cc | 206 +++ .../test/unittests/regexp/regexp-unittest.cc | 2 +- deps/v8/test/unittests/test-utils.h | 3 - deps/v8/test/unittests/testcfg.py | 1 + .../unittests/torque/ls-message-unittest.cc | 2 +- deps/v8/test/unittests/unittests.status | 65 - .../wasm/function-body-decoder-unittest.cc | 4 +- .../wasm/memory-protection-unittest.cc | 74 +- .../wasm/wasm-code-pointer-table-unittest.cc | 106 ++ .../test/wasm-api-tests/wasm-api-tests.status | 8 - deps/v8/test/wasm-js/testcfg.py | 3 +- deps/v8/test/wasm-js/tests.tar.gz.sha1 | 2 +- deps/v8/test/wasm-js/wasm-js.status | 19 +- .../v8/test/wasm-spec-tests/tests.tar.gz.sha1 | 2 +- .../wasm-spec-tests/wasm-spec-tests.status | 100 +- ...d-arguments-in-closure-inline-expected.txt | 2 +- ...ate-inlined-arguments-in-closure-inline.js | 2 +- ...String-elision-trailing-comma-expected.txt | 2 +- .../webkit/toString-elision-trailing-comma.js | 2 +- deps/v8/third_party/abseil-cpp/BUILD.gn | 13 + .../v8/third_party/abseil-cpp/README.chromium | 2 +- .../abseil-cpp/absl/algorithm/container.h | 6 +- .../abseil-cpp/absl/base/BUILD.bazel | 10 + .../third_party/abseil-cpp/absl/base/BUILD.gn | 52 +- .../abseil-cpp/absl/base/CMakeLists.txt | 12 + .../abseil-cpp/absl/base/attributes.h | 59 +- .../absl/base/internal/raw_logging.cc | 2 +- .../third_party/abseil-cpp/absl/base/macros.h | 22 +- .../abseil-cpp/absl/base/nullability.h | 48 +- .../base/nullability_default_nonnull_test.cc | 44 + .../abseil-cpp/absl/base/options.h | 5 +- .../abseil-cpp/absl/base/raw_logging_test.cc | 4 + .../abseil-cpp/absl/container/BUILD.bazel | 1 + .../abseil-cpp/absl/container/BUILD.gn | 1 + .../abseil-cpp/absl/container/CMakeLists.txt | 1 + .../abseil-cpp/absl/container/btree_map.h | 4 +- .../abseil-cpp/absl/container/btree_set.h | 4 +- .../abseil-cpp/absl/container/flat_hash_map.h | 2 +- .../abseil-cpp/absl/container/flat_hash_set.h | 2 +- .../absl/container/internal/raw_hash_set.h | 112 +- .../container/internal/raw_hash_set_test.cc | 40 + .../abseil-cpp/absl/container/node_hash_map.h | 2 +- .../abseil-cpp/absl/container/node_hash_set.h | 2 +- .../debugging/internal/stacktrace_config.h | 7 + .../absl/debugging/symbolize_test.cc | 20 +- .../third_party/abseil-cpp/absl/flags/flag.h | 3 +- .../absl/hash/internal/low_level_hash.cc | 6 +- .../abseil-cpp/absl/log/CMakeLists.txt | 1 + .../abseil-cpp/absl/log/absl_vlog_is_on.h | 2 + .../abseil-cpp/absl/log/internal/BUILD.bazel | 1 + .../abseil-cpp/absl/log/internal/BUILD.gn | 1 + .../abseil-cpp/absl/log/internal/check_op.h | 6 +- .../abseil-cpp/absl/log/vlog_is_on.h | 2 + .../abseil-cpp/absl/status/status.h | 2 +- .../abseil-cpp/absl/strings/string_view.h | 2 +- .../abseil-cpp/absl/time/duration_test.cc | 34 +- .../third_party/abseil-cpp/absl/time/time.h | 37 +- .../abseil-cpp/absl/time/time_test.cc | 31 +- .../abseil-cpp/absl/types/BUILD.gn | 34 +- .../third_party/abseil-cpp/absl/types/span.h | 2 +- .../abseil-cpp/symbols_arm64_dbg.def | 77 +- .../abseil-cpp/symbols_arm64_rel.def | 1 - .../abseil-cpp/symbols_x64_dbg.def | 80 +- .../abseil-cpp/symbols_x64_rel.def | 1 - .../abseil-cpp/symbols_x64_rel_asan.def | 5 +- .../abseil-cpp/symbols_x86_dbg.def | 80 +- .../abseil-cpp/symbols_x86_rel.def | 1 - deps/v8/third_party/fast_float/BUILD.gn | 103 ++ deps/v8/third_party/fast_float/OWNERS | 2 + deps/v8/third_party/fast_float/README.v8 | 15 + .../src/include/fast_float/ascii_number.h | 579 +++++++ .../src/include/fast_float/bigint.h | 620 ++++++++ .../fast_float/constexpr_feature_detect.h | 40 + .../include/fast_float/decimal_to_binary.h | 211 +++ .../src/include/fast_float/digit_comparison.h | 455 ++++++ .../src/include/fast_float/fast_float.h | 56 + .../src/include/fast_float/fast_table.h | 704 +++++++++ .../src/include/fast_float/float_common.h | 785 ++++++++++ .../src/include/fast_float/parse_number.h | 346 +++++ .../fp16/src/include/fp16/bitcasts.h | 10 +- deps/v8/third_party/glibc/README.v8 | 2 +- deps/v8/third_party/v8/builtins/array-sort.tq | 4 +- deps/v8/third_party/v8/codegen/fp16-inl.h | 11 +- deps/v8/third_party/zlib/README.chromium | 1 + deps/v8/third_party/zlib/chromeconf.h | 3 - deps/v8/tools/cfi/ignores.txt | 2 +- .../clusterfuzz/js_fuzzer/source_helpers.js | 25 +- .../mjsunit/fake-wasm-module-builder.js | 5 + .../js_fuzzer/test_data/mjsunit/test_load.js | 5 + .../test_data/mjsunit/test_load_expected.js | 3 + deps/v8/tools/dev/gen-tags.py | 2 +- deps/v8/tools/dev/gm.py | 8 +- deps/v8/tools/gcmole/BUILD.gn | 1 + deps/v8/tools/gdbinit | 2 +- deps/v8/tools/gen-postmortem-metadata.py | 1 - deps/v8/tools/mb/mb.py | 97 ++ deps/v8/tools/mb/mb_test.py | 101 +- deps/v8/tools/parse-processor.html | 2 +- deps/v8/tools/run.py | 20 +- deps/v8/tools/run_perf.py | 2 +- deps/v8/tools/testrunner/local/statusfile.py | 6 +- deps/v8/tools/testrunner/local/variants.py | 1 + deps/v8/tools/testrunner/objects/testcase.py | 10 + .../turboshaft-graph-operation.ts | 2 +- .../tools/turbolizer/src/views/range-view.ts | 2 +- .../wasm/mjsunit-module-disassembler-impl.h | 18 +- 1086 files changed, 28859 insertions(+), 14617 deletions(-) create mode 100644 deps/v8/src/builtins/builtins-number-tsa.cc create mode 100644 deps/v8/src/builtins/number-builtins-reducer-inl.h create mode 100644 deps/v8/src/codegen/define-code-stub-assembler-macros.inc create mode 100644 deps/v8/src/codegen/undef-code-stub-assembler-macros.inc create mode 100644 deps/v8/src/codegen/x64/builtin-jump-table-info-x64.cc create mode 100644 deps/v8/src/codegen/x64/builtin-jump-table-info-x64.h create mode 100644 deps/v8/src/compiler/turboshaft/builtin-compiler.cc create mode 100644 deps/v8/src/compiler/turboshaft/builtin-compiler.h create mode 100644 deps/v8/src/compiler/turboshaft/field-macro.inc create mode 100644 deps/v8/src/compiler/turboshaft/wasm-in-js-inlining-phase.cc create mode 100644 deps/v8/src/compiler/turboshaft/wasm-in-js-inlining-phase.h create mode 100644 deps/v8/src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h delete mode 100644 deps/v8/src/heap/third-party/heap-api-stub.cc delete mode 100644 deps/v8/src/heap/third-party/heap-api.h create mode 100644 deps/v8/src/interpreter/DEPS create mode 100644 deps/v8/src/interpreter/interpreter-generator-tsa.cc create mode 100644 deps/v8/src/interpreter/interpreter-generator-tsa.h create mode 100644 deps/v8/src/runtime/runtime-utils.cc delete mode 100644 deps/v8/src/wasm/memory-tracing.cc create mode 100644 deps/v8/src/wasm/wasm-code-pointer-table-inl.h create mode 100644 deps/v8/src/wasm/wasm-code-pointer-table.cc create mode 100644 deps/v8/src/wasm/wasm-code-pointer-table.h delete mode 100644 deps/v8/test/debugger/regress/regress-374013413.js create mode 100644 deps/v8/test/inspector/debugger/wasm-stepping-with-blackboxed-range-expected.txt create mode 100644 deps/v8/test/inspector/debugger/wasm-stepping-with-blackboxed-range.js create mode 100644 deps/v8/test/message/fail/modules-import-source-wasm-not-found.mjs create mode 100644 deps/v8/test/message/fail/modules-import-source-wasm-not-found.out create mode 100644 deps/v8/test/message/wasm-in-js-inlining-turboshaft.js create mode 100644 deps/v8/test/message/wasm-in-js-inlining-turboshaft.out create mode 100644 deps/v8/test/mjsunit/decorators/auto-accessors-reparsing.js create mode 100644 deps/v8/test/mjsunit/harmony/async-disposable-stack-dispose-tick-count.js create mode 100644 deps/v8/test/mjsunit/harmony/async-disposable-stack-with-null-undefined-tick-count.js rename deps/v8/test/mjsunit/harmony/{await-using-count-ticks.js => await-using-tick-count.js} (100%) create mode 100644 deps/v8/test/mjsunit/harmony/await-using-with-sync-async-null-undefined-tick-count.js create mode 100644 deps/v8/test/mjsunit/harmony/await-using-with-sync-null-undefined-tick-count.js create mode 100644 deps/v8/test/mjsunit/harmony/modules-async-error-due-to-import.js create mode 100644 deps/v8/test/mjsunit/harmony/modules-skip-async-errors-due-to-import-entry.mjs create mode 100644 deps/v8/test/mjsunit/harmony/modules-skip-async-leaf-with-tla.mjs create mode 100644 deps/v8/test/mjsunit/harmony/modules-skip-async-mid-with-sync-error.js create mode 100644 deps/v8/test/mjsunit/maglev/regress-359702854.js create mode 100644 deps/v8/test/mjsunit/maglev/regress-360182480.js create mode 100644 deps/v8/test/mjsunit/maglev/regress-360234501.js create mode 100644 deps/v8/test/mjsunit/maglev/regress-360952235.js create mode 100644 deps/v8/test/mjsunit/maglev/regress-363051811.js create mode 100644 deps/v8/test/mjsunit/maglev/regress-363783495.js create mode 100644 deps/v8/test/mjsunit/maglev/regress-382190919.js create mode 100644 deps/v8/test/mjsunit/regress/regress-364904763.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-364422411.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-342522151.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-346197738.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-346597059.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-349640002.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-358393368.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-360052650.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-360700873.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-361369297.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-361611472.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-363072477.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-363826217.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-364312793.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-364360260.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-364667545.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-366635354.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-378779897.js create mode 100644 deps/v8/test/mjsunit/sandbox/regress/regress-342297062.js create mode 100644 deps/v8/test/mjsunit/wasm/esm/module-import-source.mjs create mode 100644 deps/v8/test/mjsunit/wasm/growable-stacks.js create mode 100644 deps/v8/test/mjsunit/wasm/imported-strings-utf8.js create mode 100644 deps/v8/test/mjsunit/wasm/inlining-mutable-instance-fields.js create mode 100644 deps/v8/test/mjsunit/wasm/simd-fp16.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/decorators/private-auto-accessor.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-in-async-function-call-with-await.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-in-async-function-call-without-await.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-in-block.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-in-for-in-statement.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-in-for-of-statement.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-in-for-statement.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-in-switch-case-block.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-with-no-async-dispose-method.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/await-using-with-sync-dispose-method.js delete mode 100644 deps/v8/test/test262/local-tests/test/staging/explicit-resource-management/mixed-call-dispose-methods.js create mode 100644 deps/v8/test/unittests/wasm/wasm-code-pointer-table-unittest.cc create mode 100644 deps/v8/third_party/abseil-cpp/absl/base/nullability_default_nonnull_test.cc create mode 100644 deps/v8/third_party/fast_float/BUILD.gn create mode 100644 deps/v8/third_party/fast_float/OWNERS create mode 100644 deps/v8/third_party/fast_float/README.v8 create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/ascii_number.h create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/bigint.h create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/constexpr_feature_detect.h create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/decimal_to_binary.h create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/digit_comparison.h create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/fast_float.h create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/fast_table.h create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/float_common.h create mode 100644 deps/v8/third_party/fast_float/src/include/fast_float/parse_number.h create mode 100644 deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/fake-wasm-module-builder.js diff --git a/deps/v8/.clang-tidy b/deps/v8/.clang-tidy index f4796364697225..aa7d3813aa1d2f 100644 --- a/deps/v8/.clang-tidy +++ b/deps/v8/.clang-tidy @@ -2,6 +2,10 @@ --- Checks: '-*, bugprone-unique-ptr-array-mismatch, + # google-build-explicit-make-pair, + google-default-arguments, + google-explicit-constructor, + google-readability-casting, modernize-redundant-void-arg, modernize-replace-random-shuffle, modernize-shrink-to-fit, @@ -11,12 +15,10 @@ # modernize-use-equals-delete, modernize-use-nullptr, modernize-use-override, - # google-build-explicit-make-pair, - google-default-arguments, - google-explicit-constructor, - google-readability-casting' + performance-inefficient-vector-operation, + performance-trivially-destructible, + performance-unnecessary-copy-initialization' WarningsAsErrors: '' HeaderFilterRegex: '' AnalyzeTemporaryDtors: false ... - diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 62b74b99cae7dc..6ecb0ebecd7b0d 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -68,6 +68,9 @@ /third_party/colorama/src !/third_party/cpu_features /third_party/cpu_features/src +!/third_party/fast_float +/third_party/fast_float/src/* +!/third_party/fast_float/src/include !/third_party/glibc !/third_party/googletest /third_party/googletest/src/* diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 38c0f069211911..cc270445c5600c 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -71,7 +71,9 @@ Anton Bershanskiy <8knots@protonmail.com> Anton Bikineev Ao Wang Archil Sharashenidze +Artem Kobzar Arthur Islamov +Asuka Shikina Aurèle Barrière Bala Avulapati Bangfu Tao @@ -309,6 +311,7 @@ Youfeng Hao Yu Yin Yujie Wang Yuri Iozzelli +Yuri Gaevsky Yusif Khudhur Yuxiang Cao Zac Hansen diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index f2b2f4da0f8164..1f269f9c149e56 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -59,8 +59,6 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression # v8_enable_gdbjit # v8_check_header_includes # v8_enable_lazy_source_positions -# v8_enable_third_party_heap -# v8_third_party_heap_files # v8_disable_write_barriers # v8_enable_unconditional_write_barriers # v8_enable_single_generation @@ -492,8 +490,7 @@ v8_config( ], "@v8//bazel/config:v8_target_ppc64le": [ # NOTE: Bazel rules for ppc64le weren't tested on a real system. - "V8_TARGET_ARCH_PPC64", - "V8_TARGET_ARCH_PPC_LE", + "V8_TARGET_ARCH_PPC64" ], }, no_match_error = "Please specify a target cpu supported by v8", @@ -1793,8 +1790,6 @@ filegroup( "src/heap/stress-scavenge-observer.h", "src/heap/sweeper.cc", "src/heap/sweeper.h", - "src/heap/third-party/heap-api.h", - "src/heap/third-party/heap-api-stub.cc", "src/heap/traced-handles-marking-visitor.cc", "src/heap/traced-handles-marking-visitor.h", "src/heap/weak-object-worklists.cc", @@ -2336,6 +2331,7 @@ filegroup( "src/runtime/runtime-test.cc", "src/runtime/runtime-trace.cc", "src/runtime/runtime-typedarray.cc", + "src/runtime/runtime-utils.cc", "src/runtime/runtime-utils.h", "src/runtime/runtime-weak-refs.cc", "src/sandbox/bounded-size.h", @@ -2557,6 +2553,8 @@ filegroup( "src/codegen/x64/assembler-x64.cc", "src/codegen/x64/assembler-x64.h", "src/codegen/x64/assembler-x64-inl.h", + "src/codegen/x64/builtin-jump-table-info-x64.cc", + "src/codegen/x64/builtin-jump-table-info-x64.h", "src/codegen/x64/constants-x64.h", "src/codegen/x64/cpu-x64.cc", "src/codegen/x64/fma-instr.h", @@ -2889,7 +2887,6 @@ filegroup( "src/wasm/leb-helper.h", "src/wasm/local-decl-encoder.cc", "src/wasm/local-decl-encoder.h", - "src/wasm/memory-tracing.cc", "src/wasm/memory-tracing.h", "src/wasm/module-compiler.cc", "src/wasm/module-compiler.h", @@ -2924,6 +2921,9 @@ filegroup( "src/wasm/wasm-builtin-list.h", "src/wasm/wasm-code-manager.cc", "src/wasm/wasm-code-manager.h", + "src/wasm/wasm-code-pointer-table.cc", + "src/wasm/wasm-code-pointer-table.h", + "src/wasm/wasm-code-pointer-table-inl.h", "src/wasm/wasm-debug.cc", "src/wasm/wasm-debug.h", "src/wasm/wasm-deopt-data.cc", @@ -3282,6 +3282,8 @@ filegroup( "src/compiler/turboshaft/build-graph-phase.cc", "src/compiler/turboshaft/build-graph-phase.h", "src/compiler/turboshaft/builtin-call-descriptors.h", + "src/compiler/turboshaft/builtin-compiler.cc", + "src/compiler/turboshaft/builtin-compiler.h", "src/compiler/turboshaft/csa-optimize-phase.cc", "src/compiler/turboshaft/csa-optimize-phase.h", "src/compiler/turboshaft/dataview-lowering-reducer.h", @@ -3303,6 +3305,7 @@ filegroup( "src/compiler/turboshaft/explicit-truncation-reducer.h", "src/compiler/turboshaft/fast-api-call-lowering-reducer.h", "src/compiler/turboshaft/fast-hash.h", + "src/compiler/turboshaft/field-macro.inc", "src/compiler/turboshaft/graph.cc", "src/compiler/turboshaft/graph.h", "src/compiler/turboshaft/graph-builder.cc", @@ -3484,6 +3487,9 @@ filegroup( "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc", "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h", "src/compiler/turboshaft/wasm-load-elimination-reducer.h", + "src/compiler/turboshaft/wasm-in-js-inlining-phase.cc", + "src/compiler/turboshaft/wasm-in-js-inlining-phase.h", + "src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h", "src/compiler/turboshaft/wasm-lowering-phase.cc", "src/compiler/turboshaft/wasm-lowering-phase.h", "src/compiler/turboshaft/wasm-lowering-reducer.h", @@ -3588,6 +3594,7 @@ filegroup( "src/builtins/builtins-lazy-gen.h", "src/builtins/builtins-microtask-queue-gen.cc", "src/builtins/builtins-number-gen.cc", + "src/builtins/builtins-number-tsa.cc", "src/builtins/builtins-object-gen.cc", "src/builtins/builtins-object-gen.h", "src/builtins/builtins-promise-gen.cc", @@ -3607,6 +3614,7 @@ filegroup( "src/builtins/builtins-utils-gen.h", "src/builtins/growable-fixed-array-gen.cc", "src/builtins/growable-fixed-array-gen.h", + "src/builtins/number-builtins-reducer-inl.h", "src/builtins/profile-data-reader.cc", "src/builtins/profile-data-reader.h", "src/builtins/setup-builtins-internal.cc", @@ -3616,6 +3624,8 @@ filegroup( "third_party/v8/codegen/fp16-inl.h", "src/codegen/code-stub-assembler-inl.h", "src/codegen/code-stub-assembler.h", + "src/codegen/define-code-stub-assembler-macros.inc", + "src/codegen/undef-code-stub-assembler-macros.inc", "src/heap/setup-heap-internal.cc", "src/ic/accessor-assembler.cc", "src/ic/accessor-assembler.h", @@ -3629,6 +3639,8 @@ filegroup( "src/interpreter/interpreter-assembler.h", "src/interpreter/interpreter-generator.cc", "src/interpreter/interpreter-generator.h", + "src/interpreter/interpreter-generator-tsa.cc", + "src/interpreter/interpreter-generator-tsa.h", "src/interpreter/interpreter-intrinsics-generator.cc", "src/interpreter/interpreter-intrinsics-generator.h", "src/numbers/integer-literal.h", @@ -3796,6 +3808,25 @@ filegroup( }), ) +v8_library( + name = "lib_fast_float", + srcs = [ + "third_party/fast_float/src/include/fast_float/ascii_number.h", + "third_party/fast_float/src/include/fast_float/bigint.h", + "third_party/fast_float/src/include/fast_float/constexpr_feature_detect.h", + "third_party/fast_float/src/include/fast_float/decimal_to_binary.h", + "third_party/fast_float/src/include/fast_float/digit_comparison.h", + "third_party/fast_float/src/include/fast_float/fast_float.h", + "third_party/fast_float/src/include/fast_float/fast_table.h", + "third_party/fast_float/src/include/fast_float/float_common.h", + "third_party/fast_float/src/include/fast_float/parse_number.h", + ], + hdrs = [ "third_party/fast_float/src/include/fast_float/fast_float.h" ], + includes = [ + "third_party/fast_float/src/include", + ], +) + v8_library( name = "lib_fp16", srcs = ["third_party/fp16/src/include/fp16.h"], @@ -4299,6 +4330,7 @@ v8_library( ":noicu/generated_torque_definitions", ], deps = [ + ":lib_fast_float", ":lib_fp16", ":v8_libbase", "//external:absl_btree", diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 19e0d2b584790e..87b212921bb8d5 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -266,15 +266,6 @@ declare_args() { # Enable lazy source positions by default. v8_enable_lazy_source_positions = true - # Enable third party HEAP library - v8_enable_third_party_heap = false - - # Libaries used by third party heap - v8_third_party_heap_libs = [] - - # Source code used by third party heap - v8_third_party_heap_files = [] - # Disable write barriers when GCs are non-incremental and # heap has single generation. v8_disable_write_barriers = false @@ -451,6 +442,12 @@ declare_args() { v8_enable_experimental_tsa_builtins = false v8_dcheck_always_on = dcheck_always_on + + # Remote builds require an explicit dependency on icudat, but + # this breaks locally building V8 with ICU support when the file + # isn't present, which some embedders rely on. This option controls + # the explicit dependency and allows the build to complete. + v8_depend_on_icu_data_file = icu_use_data_file } # Derived defaults. @@ -556,14 +553,6 @@ if (v8_enable_single_generation == "") { if (v8_enable_atomic_object_field_writes == "") { v8_enable_atomic_object_field_writes = v8_enable_concurrent_marking } -if (v8_enable_third_party_heap) { - v8_disable_write_barriers = true - v8_enable_single_generation = true - v8_enable_shared_ro_heap = false - v8_enable_pointer_compression = false - v8_enable_pointer_compression_shared_cage = false - v8_enable_allocation_folding = false -} if (v8_enable_single_generation) { v8_allocation_site_tracking = false } @@ -710,9 +699,6 @@ assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage, assert(!v8_enable_sandbox || v8_enable_external_code_space, "The sandbox requires the external code space") -assert(!v8_enable_sandbox || !v8_enable_third_party_heap, - "The sandbox is incompatible with the third-party heap") - assert(!v8_enable_memory_corruption_api || v8_enable_sandbox, "The Memory Corruption API requires the sandbox") @@ -750,6 +736,10 @@ if (v8_enable_single_generation == true) { assert(!v8_enable_snapshot_compression || v8_use_zlib, "Snapshot compression requires zlib") +assert(!v8_enable_cet_shadow_stack || + (v8_target_cpu == "x64" && target_os == "win"), + "CET shadow stack is supported only on x64 Windows") + if (v8_expose_public_symbols == "") { v8_expose_public_symbols = v8_expose_symbols } @@ -1174,9 +1164,6 @@ config("features") { if (v8_disable_write_barriers) { defines += [ "V8_DISABLE_WRITE_BARRIERS" ] } - if (v8_enable_third_party_heap) { - defines += [ "V8_ENABLE_THIRD_PARTY_HEAP" ] - } if (v8_use_external_startup_data) { defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] } @@ -1213,6 +1200,9 @@ config("features") { if (v8_enable_cet_ibt) { defines += [ "V8_ENABLE_CET_IBT" ] } + if (v8_enable_memory_sealing) { + defines += [ "V8_ENABLE_MEMORY_SEALING" ] + } if (v8_enable_wasm_gdb_remote_debugging) { defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ] } @@ -1438,27 +1428,18 @@ config("toolchain") { cflags += [ "-march=z196" ] } } - if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { - if (v8_current_cpu == "ppc") { - defines += [ "V8_TARGET_ARCH_PPC" ] - } else if (v8_current_cpu == "ppc64") { - defines += [ "V8_TARGET_ARCH_PPC64" ] - cflags += [ "-ffp-contract=off" ] - } - if (host_byteorder == "little") { - defines += [ "V8_TARGET_ARCH_PPC_LE" ] - } else if (host_byteorder == "big") { - defines += [ "V8_TARGET_ARCH_PPC_BE" ] - if (current_os == "aix") { - cflags += [ - # Work around AIX ceil, trunc and round oddities. - "-mcpu=power5+", - "-mfprnd", - - # Work around AIX assembler popcntb bug. - "-mno-popcntb", - ] - } + if (v8_current_cpu == "ppc64") { + defines += [ "V8_TARGET_ARCH_PPC64" ] + cflags += [ "-ffp-contract=off" ] + if (current_os == "aix") { + cflags += [ + # Work around AIX ceil, trunc and round oddities. + "-mcpu=power5+", + "-mfprnd", + + # Work around AIX assembler popcntb bug. + "-mno-popcntb", + ] } } @@ -2448,12 +2429,17 @@ template("run_mksnapshot") { } action("run_mksnapshot_" + name) { deps = [ ":mksnapshot($v8_snapshot_toolchain)" ] + if (v8_verify_deterministic_mksnapshot) { + # We archive the snapshot executable when verifying snapshot + # determinism to ease debugging. + data_deps = [ ":mksnapshot($v8_snapshot_toolchain)" ] + } script = "tools/run.py" sources = [] - if (icu_use_data_file) { + if (v8_depend_on_icu_data_file) { deps += [ "//third_party/icu:copy_icudata" ] if (host_byteorder == "big") { sources += [ "$root_out_dir/icudtb.dat" ] @@ -2472,7 +2458,20 @@ template("run_mksnapshot") { ext = "s" } - args = [ + args = [] + + if (v8_verify_deterministic_mksnapshot) { + # Output redirection must be the first argument to run.py. We capture + # output when verifying snapshot determinism for debugging. + args += [ + "--redirect-stdout", + rebase_path("$root_out_dir/mksnapshot_output${suffix}.log", + root_build_dir), + ] + data += [ "$root_out_dir/mksnapshot_output${suffix}.log" ] + } + + args += [ "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)", "root_out_dir") + "/mksnapshot", root_build_dir), @@ -2488,8 +2487,25 @@ template("run_mksnapshot") { "--embedded_src", rebase_path("$target_gen_dir/embedded${suffix}.${ext}", root_build_dir), + + # mksnapshot runs in predictable mode to create deterministic snapshots. + # Note this flag is also set implicitly by mksnapshot itself (see + # mksnapshot.cc). We set it here as well for clarity. + "--predictable", + + # Disable ICs globally in mksnapshot to avoid problems with Code handlers. + # See https://crbug.com/345280736. + # TODO(jgruber): Re-enable once a better fix is available. + # Note this flag is also set implicitly by mksnapshot itself (see + # mksnapshot.cc). We set it here as well for clarity. + "--no-use-ic", ] + if (v8_verify_deterministic_mksnapshot) { + # Flags that help debugging snapshot determinism. + args += [ "--trace-read-only-promotion" ] + } + if (v8_log_builtins_block_count_input != "") { args += [ "--trace-turbo", @@ -2635,11 +2651,15 @@ if (v8_verify_deterministic_mksnapshot) { } } - action("verify_deterministic_mksnapshot") { - deps = [] + group("snapshot_set") { + data_deps = [] foreach(i, runs) { - deps += [ ":run_mksnapshot_$i" ] + data_deps += [ ":run_mksnapshot_$i" ] } + } + + action("verify_deterministic_mksnapshot") { + deps = [ ":snapshot_set" ] report_file = "$target_gen_dir/mksnapshot_comparison.txt" script = "tools/snapshot/compare_mksnapshot_output.py" args = [ @@ -2736,9 +2756,13 @@ action("v8_dump_build_config") { simulator_run = target_cpu != v8_target_cpu use_sanitizer = is_asan || is_cfi || is_msan || is_tsan || is_ubsan + use_leaptiering = v8_enable_sandbox && !v8_disable_leaptiering + # This lists all build-time switches consumed by the test framework. All # switches can be used automatically in the status files as is - no # further files need to be modified. + # However, the switch also has to be entered in `build_config_content` in + # `bazel/defs.bzl` so that the switch also works for tests triggered by bazel. # # Naming conventions: Keep switch names short and remove unnecessary # qualifiers. Drop v8_enable_, v8_, is_ where possible. @@ -2794,7 +2818,6 @@ action("v8_dump_build_config") { "single_generation=$v8_enable_single_generation", "slow_dchecks=$v8_enable_slow_dchecks", "target_cpu=\"$target_cpu\"", - "third_party_heap=$v8_enable_third_party_heap", "tsan=$is_tsan", "ubsan=$is_ubsan", "use_sanitizer=$use_sanitizer", @@ -2805,6 +2828,10 @@ action("v8_dump_build_config") { "verify_heap=$v8_enable_verify_heap", "verify_predictable=$v8_enable_verify_predictable", "memory_corruption_api=$v8_enable_memory_corruption_api", + "leaptiering=$use_leaptiering", + + # Please add new switches also in `build_config_content` in `bazel/defs.bzl` + # so that the switches also work for tests triggered by bazel. ] } @@ -2952,6 +2979,7 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-lazy-gen.h", "src/builtins/builtins-microtask-queue-gen.cc", "src/builtins/builtins-number-gen.cc", + "src/builtins/builtins-number-tsa.cc", "src/builtins/builtins-object-gen.cc", "src/builtins/builtins-object-gen.h", "src/builtins/builtins-promise-gen.cc", @@ -2971,6 +2999,7 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-utils-gen.h", "src/builtins/growable-fixed-array-gen.cc", "src/builtins/growable-fixed-array-gen.h", + "src/builtins/number-builtins-reducer-inl.h", "src/builtins/profile-data-reader.cc", "src/builtins/profile-data-reader.h", "src/builtins/setup-builtins-internal.cc", @@ -2978,8 +3007,12 @@ v8_source_set("v8_initializers") { "src/codegen/code-stub-assembler-inl.h", "src/codegen/code-stub-assembler.cc", "src/codegen/code-stub-assembler.h", + "src/codegen/define-code-stub-assembler-macros.inc", "src/codegen/heap-object-list.h", "src/codegen/turboshaft-builtins-assembler-inl.h", + "src/codegen/undef-code-stub-assembler-macros.inc", + "src/compiler/turboshaft/builtin-compiler.cc", + "src/compiler/turboshaft/builtin-compiler.h", "src/heap/setup-heap-internal.cc", "src/ic/accessor-assembler.cc", "src/ic/accessor-assembler.h", @@ -2991,6 +3024,8 @@ v8_source_set("v8_initializers") { "src/ic/unary-op-assembler.h", "src/interpreter/interpreter-assembler.cc", "src/interpreter/interpreter-assembler.h", + "src/interpreter/interpreter-generator-tsa.cc", + "src/interpreter/interpreter-generator-tsa.h", "src/interpreter/interpreter-generator.cc", "src/interpreter/interpreter-generator.h", "src/interpreter/interpreter-intrinsics-generator.cc", @@ -3046,11 +3081,6 @@ v8_source_set("v8_initializers") { ### gcmole(loong64) ### "src/builtins/loong64/builtins-loong64.cc", ] - } else if (v8_current_cpu == "ppc") { - sources += [ - ### gcmole(ppc) ### - "src/builtins/ppc/builtins-ppc.cc", - ] } else if (v8_current_cpu == "ppc64") { sources += [ ### gcmole(ppc64) ### @@ -3531,6 +3561,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/explicit-truncation-reducer.h", "src/compiler/turboshaft/fast-api-call-lowering-reducer.h", "src/compiler/turboshaft/fast-hash.h", + "src/compiler/turboshaft/field-macro.inc", "src/compiler/turboshaft/graph-builder.h", "src/compiler/turboshaft/graph-visualizer.h", "src/compiler/turboshaft/graph.h", @@ -4354,6 +4385,8 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/wasm-assembler-helpers.h", "src/compiler/turboshaft/wasm-gc-optimize-phase.h", "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h", + "src/compiler/turboshaft/wasm-in-js-inlining-phase.h", + "src/compiler/turboshaft/wasm-in-js-inlining-reducer-inl.h", "src/compiler/turboshaft/wasm-js-lowering-reducer.h", "src/compiler/turboshaft/wasm-load-elimination-reducer.h", "src/compiler/turboshaft/wasm-lowering-phase.h", @@ -4426,6 +4459,8 @@ v8_header_set("v8_internal_headers") { "src/wasm/wasm-arguments.h", "src/wasm/wasm-builtin-list.h", "src/wasm/wasm-code-manager.h", + "src/wasm/wasm-code-pointer-table-inl.h", + "src/wasm/wasm-code-pointer-table.h", "src/wasm/wasm-debug.h", "src/wasm/wasm-deopt-data.h", "src/wasm/wasm-disassembler-impl.h", @@ -4475,12 +4510,6 @@ v8_header_set("v8_internal_headers") { ] } - if (!v8_enable_third_party_heap) { - sources += filter_include(v8_third_party_heap_files, [ "*.h" ]) - } else { - sources += [ "src/heap/third-party/heap-api.h" ] - } - if (v8_enable_i18n_support) { sources += [ "src/objects/intl-objects.h", @@ -4567,6 +4596,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h", "src/codegen/x64/assembler-x64-inl.h", "src/codegen/x64/assembler-x64.h", + "src/codegen/x64/builtin-jump-table-info-x64.h", "src/codegen/x64/constants-x64.h", "src/codegen/x64/fma-instr.h", "src/codegen/x64/interface-descriptors-x64-inl.h", @@ -4728,23 +4758,6 @@ v8_header_set("v8_internal_headers") { sources += [ "src/trap-handler/trap-handler-simulator.h" ] } } - } else if (v8_current_cpu == "ppc") { - sources += [ - ### gcmole(ppc) ### - "src/codegen/ppc/assembler-ppc-inl.h", - "src/codegen/ppc/assembler-ppc.h", - "src/codegen/ppc/constants-ppc.h", - "src/codegen/ppc/interface-descriptors-ppc-inl.h", - "src/codegen/ppc/macro-assembler-ppc.h", - "src/codegen/ppc/register-ppc.h", - "src/codegen/ppc/reglist-ppc.h", - "src/compiler/backend/ppc/instruction-codes-ppc.h", - "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", - "src/execution/ppc/frame-constants-ppc.h", - "src/execution/ppc/simulator-ppc.h", - "src/regexp/ppc/regexp-macro-assembler-ppc.h", - "src/wasm/baseline/ppc/liftoff-assembler-ppc-inl.h", - ] } else if (v8_current_cpu == "ppc64") { sources += [ ### gcmole(ppc64) ### @@ -5082,14 +5095,6 @@ if (v8_current_cpu == "x86") { "src/compiler/backend/loong64/instruction-scheduler-loong64.cc", "src/compiler/backend/loong64/instruction-selector-loong64.cc", ] -} else if (v8_current_cpu == "ppc") { - v8_compiler_sources += [ - ### gcmole(ppc) ### - "src/compiler/backend/ppc/code-generator-ppc.cc", - "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", - "src/compiler/backend/ppc/instruction-selector-ppc.cc", - "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", - ] } else if (v8_current_cpu == "ppc64") { v8_compiler_sources += [ ### gcmole(ppc64) ### @@ -5128,6 +5133,7 @@ if (v8_enable_webassembly) { "src/compiler/turboshaft/int64-lowering-phase.cc", "src/compiler/turboshaft/wasm-gc-optimize-phase.cc", "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc", + "src/compiler/turboshaft/wasm-in-js-inlining-phase.cc", "src/compiler/turboshaft/wasm-lowering-phase.cc", "src/compiler/turboshaft/wasm-optimize-phase.cc", "src/compiler/turboshaft/wasm-turboshaft-compiler.cc", @@ -5747,6 +5753,7 @@ v8_source_set("v8_base_without_compiler") { "src/runtime/runtime-test.cc", "src/runtime/runtime-trace.cc", "src/runtime/runtime-typedarray.cc", + "src/runtime/runtime-utils.cc", "src/runtime/runtime-weak-refs.cc", "src/runtime/runtime.cc", "src/sandbox/code-pointer-table.cc", @@ -5895,7 +5902,6 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/graph-builder-interface.cc", "src/wasm/jump-table-assembler.cc", "src/wasm/local-decl-encoder.cc", - "src/wasm/memory-tracing.cc", "src/wasm/module-compiler.cc", "src/wasm/module-decoder.cc", "src/wasm/module-instantiate.cc", @@ -5908,6 +5914,7 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/turboshaft-graph-interface.cc", "src/wasm/value-type.cc", "src/wasm/wasm-code-manager.cc", + "src/wasm/wasm-code-pointer-table.cc", "src/wasm/wasm-debug.cc", "src/wasm/wasm-deopt-data.cc", "src/wasm/wasm-disassembler.cc", @@ -5944,12 +5951,6 @@ v8_source_set("v8_base_without_compiler") { } } - if (v8_enable_third_party_heap) { - sources += filter_exclude(v8_third_party_heap_files, [ "*.h" ]) - } else { - sources += [ "src/heap/third-party/heap-api-stub.cc" ] - } - if (v8_enable_conservative_stack_scanning) { sources += [ "src/heap/conservative-stack-visitor.cc" ] } @@ -5989,6 +5990,7 @@ v8_source_set("v8_base_without_compiler") { ### gcmole(x64) ### "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc", "src/codegen/x64/assembler-x64.cc", + "src/codegen/x64/builtin-jump-table-info-x64.cc", "src/codegen/x64/cpu-x64.cc", "src/codegen/x64/macro-assembler-x64.cc", "src/deoptimizer/x64/deoptimizer-x64.cc", @@ -6122,21 +6124,6 @@ v8_source_set("v8_base_without_compiler") { sources += [ "src/trap-handler/handler-outside-simulator.cc" ] } } - } else if (v8_current_cpu == "ppc") { - sources += [ - ### gcmole(ppc) ### - "src/codegen/ppc/assembler-ppc.cc", - "src/codegen/ppc/constants-ppc.cc", - "src/codegen/ppc/cpu-ppc.cc", - "src/codegen/ppc/macro-assembler-ppc.cc", - "src/deoptimizer/ppc/deoptimizer-ppc.cc", - "src/diagnostics/ppc/disasm-ppc.cc", - "src/diagnostics/ppc/eh-frame-ppc.cc", - "src/diagnostics/ppc/unwinder-ppc.cc", - "src/execution/ppc/frame-constants-ppc.cc", - "src/execution/ppc/simulator-ppc.cc", - "src/regexp/ppc/regexp-macro-assembler-ppc.cc", - ] } else if (v8_current_cpu == "ppc64") { sources += [ ### gcmole(ppc64) ### @@ -6260,6 +6247,7 @@ v8_source_set("v8_base_without_compiler") { ":v8_tracing", ":v8_version", "src/inspector:inspector", + "//third_party/fast_float", ] public_deps = [ @@ -6323,14 +6311,10 @@ v8_source_set("v8_base_without_compiler") { libs = [] - if (v8_enable_third_party_heap) { - libs += v8_third_party_heap_libs - } - # Platforms that don't have CAS support need to link atomic library # to implement atomic memory access if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" || - v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || + v8_current_cpu == "ppc64" || (current_os != "zos" && (v8_current_cpu == "s390" || v8_current_cpu == "s390x")) || v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS index 5d4e99dec543bd..acd008e3360eff 100644 --- a/deps/v8/COMMON_OWNERS +++ b/deps/v8/COMMON_OWNERS @@ -11,8 +11,8 @@ dlehmann@chromium.org dmercadier@chromium.org ecmziegler@chromium.org evih@chromium.org +fgm@chromium.org gdeepti@chromium.org -hablich@chromium.org hpayer@chromium.org ishell@chromium.org jgruber@chromium.org @@ -22,7 +22,6 @@ liviurau@chromium.org machenbach@chromium.org manoskouk@chromium.org marja@chromium.org -mathias@chromium.org mliedtke@chromium.org mlippautz@chromium.org nicohartmann@chromium.org diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 4208189f466a67..f4a74a3515ceae 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -60,7 +60,7 @@ vars = { 'checkout_fuchsia_no_hooks': False, # reclient CIPD package version - 'reclient_version': 're_client_version:0.157.0.d2566cec-gomaip', + 'reclient_version': 're_client_version:0.163.0.d27158ab-gomaip', # Fetch configuration files required for the 'use_remoteexec' gn arg 'download_remoteexec_cfg': False, @@ -76,22 +76,22 @@ vars = { 'build_with_chromium': False, # GN CIPD package version. - 'gn_version': 'git_revision:54f5b539df8c4e460b18c62a11132d77b5601136', + 'gn_version': 'git_revision:20806f79c6b4ba295274e3a589d85db41a02fdaa', # ninja CIPD package version # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja 'ninja_version': 'version:2@1.12.1.chromium.4', # siso CIPD package version - 'siso_version': 'git_revision:87262779ecc3482c8c60b070404b225107212d0d', + 'siso_version': 'git_revision:eaee19cf51478b64614e2e8daad77378238a3c6c', # luci-go CIPD package version. - 'luci_go': 'git_revision:ad7b787aa0ee53a81bc88fb4f7fee7a3ff1e8c29', + 'luci_go': 'git_revision:1aca70b6bf116c1bd8fbf0526c9a89e9be308718', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:23.20240815.4.1', + 'fuchsia_version': 'version:24.20240913.4.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -124,14 +124,14 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_tools-lint_version # and whatever else without interference from each other. - 'android_sdk_cmdline-tools_version': 'fv6JzkTqfxfIbmsRC8u1b2y0EQO7yQDMDzY3-g0NVu4C', + 'android_sdk_cmdline-tools_version': 'B4p95sDPpm34K8Cf4JcfTM-iYSglWko9qjWgbT9dxWQC', } deps = { 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '7a468ed1849454cffabf4a64110c24e6f1da2c51', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '4bd877395d215d47c694a8383147eb158fafbbd6', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '60a590902cf146c282f15242401bd8543256e2a2', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'a7a84ac61eae5a8946807265a2fd8bd812daf384', 'buildtools/linux64': { 'packages': [ { @@ -177,7 +177,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'bcb42e339dbac06f2f9902046b1fbf62562e0cd3', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd62fa93c8f9ce5e687c0bbaa5d2b59670ab2ff60', 'third_party/android_platform': { 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '6337c445f9963ec3914e7e0c5787941d07b46509', 'condition': 'checkout_android', @@ -231,15 +231,15 @@ deps = { 'dep_type': 'cipd', }, 'third_party/boringssl': { - 'url': Var('chromium_url') + '/chromium/src/third_party/boringssl.git' + '@' + '4d98a91cde88f349b96f4018c00053b6699ffd88', + 'url': Var('chromium_url') + '/chromium/src/third_party/boringssl.git' + '@' + 'c79987a83ceaf2cf911f7d21bec621ddc90c45cc', 'condition': "checkout_centipede_deps", }, 'third_party/boringssl/src': { - 'url': Var('boringssl_url') + '/boringssl.git' + '@' + '11f334121fd0d13830fefdf08041183da2d30ef3', + 'url': Var('boringssl_url') + '/boringssl.git' + '@' + '58f3bc83230d2958bb9710bc910972c4f5d382dc', 'condition': "checkout_centipede_deps", }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '799e21b232f23f6c1391abfd44fe8ab1dd95bd9b', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '296226a4a0067c8cffeb8831fb87526a8035f3cc', 'condition': 'checkout_android', }, 'third_party/clang-format/script': @@ -253,11 +253,13 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0bc7c4832e4f2d453e4826c9a2e1197e11bd6ec7', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '22df6f8e622dc3e8df8dc8b5d3e3503b169af78e', 'third_party/fp16/src': Var('chromium_url') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '0a92994d729ff76a58f692d3028ca1b64b145d91', + 'third_party/fast_float/src': + Var('chromium_url') + '/external/github.com/fastfloat/fast_float.git' + '@' + '3e57d8dcfb0a04b5a8a26b486b54490a2e9b310f', 'third_party/fuchsia-gn-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '30fee7b68b3675e351fa47303c3b6ef322941ccd', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '5086f6c9e4c6d3295a76fdb5d27209f2d6449c6a', 'condition': 'checkout_fuchsia', }, # Exists for rolling the Fuchsia SDK. Check out of the SDK should always @@ -283,7 +285,7 @@ deps = { 'third_party/fuzztest/src': Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + '32eb84a95951fa3a0148fb3e6a1a02f830ded136', 'third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'ff233bdd4cac0a0bf6e5cd45bda3406814cb2796', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '0953a17a4281fc26831da647ad3fcd5e21e6473b', 'third_party/highway/src': Var('chromium_url') + '/external/github.com/google/highway.git' + '@' + '8295336dd70f1201d42c22ab5b0861de38cf8fbf', 'third_party/icu': @@ -303,153 +305,153 @@ deps = { 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', 'third_party/libc++/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'f801c947082a3e0a4b48780303526b73905f6ecd', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '50ab693ecb611942ce4440d8c9ed707ee65ed5e8', 'third_party/libc++abi/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'eb6567388e89d9730c76dee71d68ac82e4a1abf6', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '29b2e9a0f48688da116692cb04758393053d269c', 'third_party/libunwind/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '116c20dae60d84a77005697cf29f72783f81b0f9', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'dc70138c3e68e2f946585f134e20815851e26263', 'third_party/llvm-build/Release+Asserts': { 'dep_type': 'gcs', 'bucket': 'chromium-browser-clang', 'objects': [ { - 'object_name': 'Linux_x64/clang-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '32ac9d9864a6bd99242f1a97778b3a074ac1151ce3eca369903f2ef5337c787a', - 'size_bytes': 52250752, - 'generation': 1723267014378582, + 'object_name': 'Linux_x64/clang-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '6a30f7bc7c5f0eac02a40a4ec9a1ab906ddff2adacf2c9ff065916047c79f0fb', + 'size_bytes': 52892060, + 'generation': 1726118358890940, 'condition': 'host_os == "linux"', }, { - 'object_name': 'Linux_x64/clang-tidy-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '5fd3fb73ceef73593fa09f8228283aec9f7798b648bf450e87f071a097be213b', - 'size_bytes': 13217676, - 'generation': 1723267014627839, + 'object_name': 'Linux_x64/clang-tidy-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '35e00fc8f58cf7cd30f0ad27c2fdef56b677e287030072c46c0f024d23363ae4', + 'size_bytes': 13283180, + 'generation': 1726118359291453, 'condition': 'host_os == "linux" and checkout_clang_tidy', }, { - 'object_name': 'Linux_x64/clangd-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '6e1fe97f8b7131591233d8a2df1ff289ffb878c3fc6834c978a86273f7c00b6b', - 'size_bytes': 26125984, - 'generation': 1723267014767012, + 'object_name': 'Linux_x64/clangd-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '33e2276976dfeaf387f5ea16651ea591eebe3570a12469f3884c74f8079e88bf', + 'size_bytes': 26305668, + 'generation': 1726118359489734, 'condition': 'host_os == "linux" and checkout_clangd', }, { - 'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'ef317481472926d3e2a82e2d2a02cde78685002b4c9923df476108906022d792', - 'size_bytes': 2374748, - 'generation': 1723267015213805, + 'object_name': 'Linux_x64/llvm-code-coverage-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '426c6bd378848de0817a7695fee821bece9efb51e3ed1d7b750a75bc17bf00eb', + 'size_bytes': 2370472, + 'generation': 1726118360237343, 'condition': 'host_os == "linux" and checkout_clang_coverage_tools', }, { - 'object_name': 'Linux_x64/llvmobjdump-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '02be68f7c7c7bf679e1abff2745306b8385275017c89b2b13f638a941785f8c5', - 'size_bytes': 5386480, - 'generation': 1723267014930087, + 'object_name': 'Linux_x64/llvmobjdump-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': 'e11c3043e76c7c79fe7905861a11c78433c6d796d049f837eda0a2ce118f0793', + 'size_bytes': 5410724, + 'generation': 1726118359908897, 'condition': '(checkout_linux or checkout_mac or checkout_android and host_os != "mac")', }, { - 'object_name': 'Mac/clang-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '5df8a609a7d2511343fbc20af1de3ed1682c3703fc074f21af1bf8bc2f58e491', - 'size_bytes': 47200408, - 'generation': 1723267016534642, + 'object_name': 'Mac/clang-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': 'cabfc7ca792ef13d3e665c3a7811f9a76cc39094059c11606cea1724f0394bbc', + 'size_bytes': 47551968, + 'generation': 1726118361528729, 'condition': 'host_os == "mac" and host_cpu == "x64"', }, { - 'object_name': 'Mac/clang-mac-runtime-library-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'ffc72ff3fca85f31340c164aab480bd2babfaa6219ff12e93b81f0056309da55', - 'size_bytes': 869616, - 'generation': 1723267034708598, + 'object_name': 'Mac/clang-mac-runtime-library-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '50a618246d7fd23645640fc50ccb0d4684c1895def378b90963a289f920ea88a', + 'size_bytes': 879508, + 'generation': 1726118377526206, 'condition': 'checkout_mac and not host_os == "mac"', }, { - 'object_name': 'Mac/clang-tidy-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'd02b9a39491d2ff3a291778de87b2a72f5885d01a8093518cb5612b97353ac2a', - 'size_bytes': 12805336, - 'generation': 1723267016822831, + 'object_name': 'Mac/clang-tidy-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '2c325505ea43a8d8a14770890d62aba9af37b397e3063e3fb622cfd51d4706f6', + 'size_bytes': 12884412, + 'generation': 1726118361811669, 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_tidy', }, { - 'object_name': 'Mac/clangd-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'ce09141be75350f0f932fcda14d8b906d2869674d79ef5a10a3e60a9a8d3ccee', - 'size_bytes': 26372428, - 'generation': 1723267016957114, + 'object_name': 'Mac/clangd-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '1c1a0965cc95053dec8c649a7b3bb627ad2300ad230eed97b52ee70a8a8edd85', + 'size_bytes': 26553148, + 'generation': 1726118361978146, 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clangd', }, { - 'object_name': 'Mac/llvm-code-coverage-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'c0ac62ff01f1ce6e5d30134cb0f83fd8eabf858dfb33d07209a6b40d8f1ae789', - 'size_bytes': 2248664, - 'generation': 1723267017743181, + 'object_name': 'Mac/llvm-code-coverage-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '9259bd27c19ca9662c70ffc2b42c10afb584e7c584470d6e656e164643614b50', + 'size_bytes': 2247028, + 'generation': 1726118362377026, 'condition': 'host_os == "mac" and host_cpu == "x64" and checkout_clang_coverage_tools', }, { - 'object_name': 'Mac_arm64/clang-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'f4a384ecdaa051ba4786c9e6c46f9101a751b1a5c5ad4bf0d217c4ba71e0ff30', - 'size_bytes': 42737720, - 'generation': 1723267036349494, + 'object_name': 'Mac_arm64/clang-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': 'e87eb4caa95c98ef36c40aec5b8cd07a6c4fb8959d3c7e7d452f6ed860c8c2bf', + 'size_bytes': 41352592, + 'generation': 1726118378868177, 'condition': 'host_os == "mac" and host_cpu == "arm64"', }, { - 'object_name': 'Mac_arm64/clang-tidy-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '2769378fd2891af945f8d29b5eaf05b4ab0450b2d086539df1c78c684e64be14', - 'size_bytes': 11740656, - 'generation': 1723267036269250, + 'object_name': 'Mac_arm64/clang-tidy-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': 'fcf8f25a8461db90686d0455bd8f195750a2cdc425cb03c48debe4d3e8bb9299', + 'size_bytes': 11476316, + 'generation': 1726118379144738, 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_tidy', }, { - 'object_name': 'Mac_arm64/clangd-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'a35dfb99903a487e06d685712e461125978c76ba8eaa99e9f5742e63d3e67444', - 'size_bytes': 23470088, - 'generation': 1723267036383208, + 'object_name': 'Mac_arm64/clangd-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': 'cca4049d3362528511ebc603db05189c9bef406a80ae4fead22b1db0a4de83e6', + 'size_bytes': 22679568, + 'generation': 1726118379283835, 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clangd', }, { - 'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '574a0d7c560aae964d8bdcd85f0145077b1324e79eee4a3dd1636ab7aefc59e5', - 'size_bytes': 2010540, - 'generation': 1723267036758678, + 'object_name': 'Mac_arm64/llvm-code-coverage-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '001e8582de4bc7c434f321b5bacd2b0b45e553f3134cb7d78e1a4f62e2b97ac6', + 'size_bytes': 1969844, + 'generation': 1726118379757221, 'condition': 'host_os == "mac" and host_cpu == "arm64" and checkout_clang_coverage_tools', }, { - 'object_name': 'Win/clang-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'e255af29c29a741cf39c3000b612466ff805a99766d26ac86ec2afcb4ca0c922', - 'size_bytes': 44019080, - 'generation': 1723267056892790, + 'object_name': 'Win/clang-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': 'cb416511e6379b7fd3f362f637ebb8a28957d0d2ff2dc6e2d9f4484a381f2885', + 'size_bytes': 44655000, + 'generation': 1726118399720986, 'condition': 'host_os == "win"', }, { - 'object_name': 'Win/clang-tidy-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'a86aa579fc90a053b94874a8c79daeb7f3bbd77107fb460c6d0a7959cefb7e61', - 'size_bytes': 13055812, - 'generation': 1723267057185720, + 'object_name': 'Win/clang-tidy-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '15af2ae61dabdfe0ddbdd48f467b996855ba51d0ef633c5c0ac3c74cdc0d8f2c', + 'size_bytes': 13114928, + 'generation': 1726118400057660, 'condition': 'host_os == "win" and checkout_clang_tidy', }, { - 'object_name': 'Win/clang-win-runtime-library-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '6d89f358769ef50d008194e0ab9e8d4d80b8d6ffc0095ed44aef925d900aa743', - 'size_bytes': 2873772, - 'generation': 1723267074433695, + 'object_name': 'Win/clang-win-runtime-library-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '81d66840357d83ca1a2c85ebca5259a7a86d9e99c77b37727fbaee87ccacf675', + 'size_bytes': 2897452, + 'generation': 1726118416326356, 'condition': 'checkout_win and not host_os == "win"', }, { - 'object_name': 'Win/clangd-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'e2b69a726f794005a333ae66a0ef5c0258872a19bc4506eff23f23fdee75ba5c', - 'size_bytes': 25053884, - 'generation': 1723267057351794, + 'object_name': 'Win/clangd-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '1304718c221543b16465a4b6108572fa1ba9f2b75c4e4398bdb01fb983428c10', + 'size_bytes': 25169688, + 'generation': 1726118400193324, 'condition': 'host_os == "win" and checkout_clangd', }, { - 'object_name': 'Win/llvm-code-coverage-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': 'e68e7c7ecbc7b2fc4f7ec3e97565a7f12bab1d195d22bc76959f3a88b1462ac1', - 'size_bytes': 2376020, - 'generation': 1723267057803475, + 'object_name': 'Win/llvm-code-coverage-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': 'e01b8fbca72fc1cca6988e359d9a0eea8fa5ccbaff8d41deffd62970c7f4fed5', + 'size_bytes': 2382756, + 'generation': 1726118400642803, 'condition': 'host_os == "win" and checkout_clang_coverage_tools', }, { - 'object_name': 'Win/llvmobjdump-llvmorg-20-init-1009-g7088a5ed-10.tar.xz', - 'sha256sum': '801714415847b8efea7252b1072b8647f92ba0e946480b3db9b156900e42ab55', - 'size_bytes': 5392812, - 'generation': 1723267057506056, + 'object_name': 'Win/llvmobjdump-llvmorg-20-init-3847-g69c43468-28.tar.xz', + 'sha256sum': '2f837a21d910ad748666282d0c1da15a438d9aae4fc1bc85dab7313da6dfeb7b', + 'size_bytes': 5439736, + 'generation': 1726118400404099, 'condition': 'checkout_linux or checkout_mac or checkout_android and host_os == "win"', }, ], @@ -471,7 +473,7 @@ deps = { 'third_party/perfetto': Var('android_url') + '/platform/external/perfetto.git' + '@' + '6fc824d618d2f06b5d9cd8655ba0419b6b3b366e', 'third_party/protobuf': - Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + 'da2fe725b80ac0ba646fbf77d0ce5b4ac236f823', + Var('chromium_url') + '/chromium/src/third_party/protobuf.git' + '@' + '37bbf271c62d6c01c58c66505b17c7dcf086371a', 'third_party/re2/src': Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + '6dcd83d60f7944926bfd308cc13979fc53dd69ca', 'third_party/requests': { @@ -489,9 +491,9 @@ deps = { 'condition': 'not build_with_chromium and host_cpu != "s390" and host_os != "zos" and host_cpu != "ppc"', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'd3aea2341cdeaf7e717bc257a59aa7a9407d318a', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'fa9f14143c7938e6a1d18443900efee7a1e5e669', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '63b7be17f8981d716ea9a0d65bb04654d79548a8', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'e47c184ec52d50c7aa2a99cd3bd26ebcafaa94b9', 'tools/luci-go': { 'packages': [ { @@ -509,7 +511,7 @@ deps = { 'tools/protoc_wrapper': Var('chromium_url') + '/chromium/src/tools/protoc_wrapper.git' + '@' + 'dbcbea90c20ae1ece442d8ef64e61c7b10e2b013', 'third_party/abseil-cpp': { - 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + 'ed3733b91e472a1e7a641c1f0c1e6c0ea698e958', + 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '1f7e21e34c3807a8841c9562cfc8b3213eb50bfc', 'condition': 'not build_with_chromium', }, 'third_party/zoslib': { @@ -524,6 +526,7 @@ include_rules = [ '+unicode', '+third_party/fdlibm', '+third_party/ittapi/include', + '+third_party/fast_float/src/include', '+third_party/fp16/src/include', '+third_party/v8/codegen', '+third_party/fuzztest', diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS index 6850defb79dd1e..efde42b36b00c1 100644 --- a/deps/v8/WATCHLISTS +++ b/deps/v8/WATCHLISTS @@ -129,7 +129,6 @@ 'verwaest+watch@chromium.org', ], 'feature_shipping_status': [ - 'hablich@chromium.org', 'saelo+watch@chromium.org', ], 'heap_changes': [ diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl index 6a3c868b002142..ba1716d9ad466a 100644 --- a/deps/v8/bazel/defs.bzl +++ b/deps/v8/bazel/defs.bzl @@ -561,6 +561,7 @@ def build_config_content(cpu, icu): ("is_android", "false"), ("is_ios", "false"), ("js_shared_memory", "false"), + ("leaptiering", "true"), ("lite_mode", "false"), ("local_off_stack_check", "false"), ("memory_corruption_api", "false"), @@ -578,7 +579,6 @@ def build_config_content(cpu, icu): ("single_generation", "false"), ("slow_dchecks", "false"), ("target_cpu", cpu), - ("third_party_heap", "false"), ("tsan", "false"), ("ubsan", "false"), ("use_sanitizer", "false"), diff --git a/deps/v8/docs/OWNERS b/deps/v8/docs/OWNERS index 39b706f0cc54e9..07428b275bbbc4 100644 --- a/deps/v8/docs/OWNERS +++ b/deps/v8/docs/OWNERS @@ -1,2 +1 @@ -hablich@chromium.org -mathias@chromium.org +hpayer@chromium.org diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index b606c1b85b06a4..09c3e7adec40a9 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -174,6 +174,11 @@ declare_args() { # Emit CET IBT landing pad instructions in JIT generated code (experimental). v8_enable_cet_ibt = false + + # Use memory sealing to protect various global memory mappings for CFI + # (experimental). + # TODO(sroettger): enable by default once we have bot support for testing. + v8_enable_memory_sealing = false } if (v8_use_external_startup_data == "") { diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS index 535040c539a732..9a2d99e8758559 100644 --- a/deps/v8/include/OWNERS +++ b/deps/v8/include/OWNERS @@ -19,5 +19,4 @@ per-file v8-version.h=v8-ci-autoroll-builder@chops-service-accounts.iam.gservice # For branch updates: per-file v8-version.h=file:../INFRA_OWNERS -per-file v8-version.h=hablich@chromium.org per-file v8-version.h=vahl@chromium.org diff --git a/deps/v8/include/cppgc/default-platform.h b/deps/v8/include/cppgc/default-platform.h index a27871cc37ee47..07ce55bbf42f88 100644 --- a/deps/v8/include/cppgc/default-platform.h +++ b/deps/v8/include/cppgc/default-platform.h @@ -37,11 +37,12 @@ class V8_EXPORT DefaultPlatform : public Platform { return v8_platform_->MonotonicallyIncreasingTime(); } - std::shared_ptr GetForegroundTaskRunner() override { + std::shared_ptr GetForegroundTaskRunner( + TaskPriority priority) override { // V8's default platform creates a new task runner when passed the // `v8::Isolate` pointer the first time. For non-default platforms this will // require getting the appropriate task runner. - return v8_platform_->GetForegroundTaskRunner(kNoIsolate); + return v8_platform_->GetForegroundTaskRunner(kNoIsolate, priority); } std::unique_ptr PostJob( diff --git a/deps/v8/include/cppgc/heap-consistency.h b/deps/v8/include/cppgc/heap-consistency.h index eb7fdaee8c3c02..23b5d909945b96 100644 --- a/deps/v8/include/cppgc/heap-consistency.h +++ b/deps/v8/include/cppgc/heap-consistency.h @@ -114,7 +114,7 @@ class HeapConsistency final { * has not yet been processed. * * \param params The parameters retrieved from `GetWriteBarrierType()`. - * \param object The pointer to the object. May be an interior pointer to a + * \param object The pointer to the object. May be an interior pointer to * an interface of the actual object. */ static V8_INLINE void DijkstraWriteBarrier(const WriteBarrierParams& params, diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h index ae96579dd64e44..a5eccfa80fe812 100644 --- a/deps/v8/include/cppgc/platform.h +++ b/deps/v8/include/cppgc/platform.h @@ -52,6 +52,15 @@ class V8_EXPORT Platform { * Foreground task runner that should be used by a Heap. */ virtual std::shared_ptr GetForegroundTaskRunner() { + return GetForegroundTaskRunner(TaskPriority::kUserBlocking); + } + + /** + * Returns a TaskRunner with a specific |priority| which can be used to post a + * task on the foreground thread. + */ + virtual std::shared_ptr GetForegroundTaskRunner( + TaskPriority priority) { return nullptr; } diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h index 7a8f8abeb402d9..567b36faf63ddb 100644 --- a/deps/v8/include/v8-callbacks.h +++ b/deps/v8/include/v8-callbacks.h @@ -254,15 +254,7 @@ using AddCrashKeyCallback = void (*)(CrashKeyId id, const std::string& value); using BeforeCallEnteredCallback = void (*)(Isolate*); using CallCompletedCallback = void (*)(Isolate*); -// --- AllowCodeGenerationFromStrings callbacks --- - -/** - * Callback to check if code generation from strings is allowed. See - * Context::AllowCodeGenerationFromStrings. - */ -using AllowCodeGenerationFromStringsCallback = bool (*)(Local context, - Local source); - +// --- Modify Code Generation From Strings Callback --- struct ModifyCodeGenerationFromStringsResult { // If true, proceed with the codegen algorithm. Otherwise, block it. bool codegen_allowed = false; @@ -272,6 +264,20 @@ struct ModifyCodeGenerationFromStringsResult { MaybeLocal modified_source; }; +/** + * Callback to check if codegen is allowed from a source object, and convert + * the source to string if necessary. See: ModifyCodeGenerationFromStrings. + */ +using ModifyCodeGenerationFromStringsCallback = + ModifyCodeGenerationFromStringsResult (*)(Local context, + Local source); +using ModifyCodeGenerationFromStringsCallback2 = + ModifyCodeGenerationFromStringsResult (*)(Local context, + Local source, + bool is_code_like); + +// --- Failed Access Check Callback --- + /** * Access type specification. */ @@ -283,23 +289,9 @@ enum AccessType { ACCESS_KEYS }; -// --- Failed Access Check Callback --- - using FailedAccessCheckCallback = void (*)(Local target, AccessType type, Local data); -/** - * Callback to check if codegen is allowed from a source object, and convert - * the source to string if necessary. See: ModifyCodeGenerationFromStrings. - */ -using ModifyCodeGenerationFromStringsCallback = - ModifyCodeGenerationFromStringsResult (*)(Local context, - Local source); -using ModifyCodeGenerationFromStringsCallback2 = - ModifyCodeGenerationFromStringsResult (*)(Local context, - Local source, - bool is_code_like); - // --- WebAssembly compilation callbacks --- using ExtensionCallback = bool (*)(const FunctionCallbackInfo&); diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h index f432ff67f7053a..0861d66493bd2d 100644 --- a/deps/v8/include/v8-context.h +++ b/deps/v8/include/v8-context.h @@ -320,7 +320,7 @@ class V8_EXPORT Context : public Data { * 'Function' constructor are used an exception will be thrown. * * If code generation from strings is not allowed the - * V8::AllowCodeGenerationFromStrings callback will be invoked if + * V8::ModifyCodeGenerationFromStringsCallback callback will be invoked if * set before blocking the call to 'eval' or the 'Function' * constructor. If that callback returns true, the call will be * allowed, otherwise an exception will be thrown. If no callback is diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index e53c20253b2aff..a13db2bd74ad4b 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -18,6 +18,22 @@ #include "v8config.h" // NOLINT(build/include_directory) +// TODO(pkasting): Use /spaceship unconditionally after dropping +// support for old libstdc++ versions. +#if __has_include() +#include +#endif +#if defined(__cpp_lib_three_way_comparison) && \ + __cpp_lib_three_way_comparison >= 201711L && \ + defined(__cpp_lib_concepts) && __cpp_lib_concepts >= 202002L +#include +#include + +#define V8_HAVE_SPACESHIP_OPERATOR 1 +#else +#define V8_HAVE_SPACESHIP_OPERATOR 0 +#endif + namespace v8 { class Array; @@ -295,7 +311,8 @@ constexpr size_t kExternalPointerTableReservationSize = 256 * MB; // The external pointer table indices stored in HeapObjects as external // pointers are shifted to the left by this amount to guarantee that they are -// smaller than the maximum table size. +// smaller than the maximum table size even after the C++ compiler multiplies +// them by 8 to be used as indexes into a table of 64 bit pointers. constexpr uint32_t kExternalPointerIndexShift = 7; #else constexpr size_t kExternalPointerTableReservationSize = 512 * MB; @@ -1351,11 +1368,11 @@ class V8_EXPORT StrongRootAllocatorBase { public: Heap* heap() const { return heap_; } - bool operator==(const StrongRootAllocatorBase& other) const { - return heap_ == other.heap_; - } - bool operator!=(const StrongRootAllocatorBase& other) const { - return heap_ != other.heap_; + friend bool operator==(const StrongRootAllocatorBase& a, + const StrongRootAllocatorBase& b) { + // TODO(pkasting): Replace this body with `= default` after dropping support + // for old gcc versions. + return a.heap_ == b.heap_; } protected: @@ -1390,22 +1407,60 @@ class StrongRootAllocator : private std::allocator { using std::allocator::deallocate; }; +// TODO(pkasting): Replace with `requires` clauses after dropping support for +// old gcc versions. +template +inline constexpr bool kHaveIteratorConcept = false; +template +inline constexpr bool kHaveIteratorConcept< + Iterator, std::void_t> = true; + +template +inline constexpr bool kHaveIteratorCategory = false; +template +inline constexpr bool kHaveIteratorCategory< + Iterator, std::void_t> = true; + +// Helper struct that contains an `iterator_concept` type alias only when either +// `Iterator` or `std::iterator_traits` do. +// Default: no alias. +template +struct MaybeDefineIteratorConcept {}; +// Use `Iterator::iterator_concept` if available. +template +struct MaybeDefineIteratorConcept< + Iterator, std::enable_if_t>> { + using iterator_concept = Iterator::iterator_concept; +}; +// Otherwise fall back to `std::iterator_traits` if possible. +template +struct MaybeDefineIteratorConcept< + Iterator, std::enable_if_t && + !kHaveIteratorConcept>> { + // There seems to be no feature-test macro covering this, so use the + // presence of `` as a crude proxy, since it was added to the + // standard as part of the Ranges papers. + // TODO(pkasting): Add this unconditionally after dropping support for old + // libstdc++ versions. +#if __has_include() + using iterator_concept = std::iterator_traits::iterator_concept; +#endif +}; + // A class of iterators that wrap some different iterator type. // If specified, ElementType is the type of element accessed by the wrapper // iterator; in this case, the actual reference and pointer types of Iterator // must be convertible to ElementType& and ElementType*, respectively. template -class WrappedIterator { +class WrappedIterator : public MaybeDefineIteratorConcept { public: static_assert( - !std::is_void_v || + std::is_void_v || (std::is_convertible_v::pointer, - ElementType*> && + std::add_pointer_t> && std::is_convertible_v::reference, - ElementType&>)); + std::add_lvalue_reference_t>)); - using iterator_category = - typename std::iterator_traits::iterator_category; using difference_type = typename std::iterator_traits::difference_type; using value_type = @@ -1415,24 +1470,96 @@ class WrappedIterator { using pointer = std::conditional_t, typename std::iterator_traits::pointer, - ElementType*>; + std::add_pointer_t>; using reference = std::conditional_t, typename std::iterator_traits::reference, - ElementType&>; + std::add_lvalue_reference_t>; + using iterator_category = + typename std::iterator_traits::iterator_category; - constexpr WrappedIterator() noexcept : it_() {} + constexpr WrappedIterator() noexcept = default; constexpr explicit WrappedIterator(Iterator it) noexcept : it_(it) {} + // TODO(pkasting): Switch to `requires` and concepts after dropping support + // for old gcc and libstdc++ versions. template , - bool> = true> + typename = std::enable_if_t< + std::is_convertible_v>> constexpr WrappedIterator( - const WrappedIterator& it) noexcept - : it_(it.base()) {} + const WrappedIterator& other) noexcept + : it_(other.base()) {} - constexpr reference operator*() const noexcept { return *it_; } - constexpr pointer operator->() const noexcept { return it_.operator->(); } + [[nodiscard]] constexpr reference operator*() const noexcept { return *it_; } + [[nodiscard]] constexpr pointer operator->() const noexcept { + return it_.operator->(); + } + + template + [[nodiscard]] constexpr bool operator==( + const WrappedIterator& other) + const noexcept { + return it_ == other.base(); + } +#if V8_HAVE_SPACESHIP_OPERATOR + template + [[nodiscard]] constexpr auto operator<=>( + const WrappedIterator& other) + const noexcept { + if constexpr (std::three_way_comparable_with) { + return it_ <=> other.base(); + } else if constexpr (std::totally_ordered_with) { + if (it_ < other.base()) { + return std::strong_ordering::less; + } + return (it_ > other.base()) ? std::strong_ordering::greater + : std::strong_ordering::equal; + } else { + if (it_ < other.base()) { + return std::partial_ordering::less; + } + if (other.base() < it_) { + return std::partial_ordering::greater; + } + return (it_ == other.base()) ? std::partial_ordering::equivalent + : std::partial_ordering::unordered; + } + } +#else + // Assume that if spaceship isn't present, operator rewriting might not be + // either. + template + [[nodiscard]] constexpr bool operator!=( + const WrappedIterator& other) + const noexcept { + return it_ != other.base(); + } + + template + [[nodiscard]] constexpr bool operator<( + const WrappedIterator& other) + const noexcept { + return it_ < other.base(); + } + template + [[nodiscard]] constexpr bool operator<=( + const WrappedIterator& other) + const noexcept { + return it_ <= other.base(); + } + template + [[nodiscard]] constexpr bool operator>( + const WrappedIterator& other) + const noexcept { + return it_ > other.base(); + } + template + [[nodiscard]] constexpr bool operator>=( + const WrappedIterator& other) + const noexcept { + return it_ >= other.base(); + } +#endif constexpr WrappedIterator& operator++() noexcept { ++it_; @@ -1453,101 +1580,44 @@ class WrappedIterator { --(*this); return result; } - constexpr WrappedIterator operator+(difference_type n) const noexcept { + [[nodiscard]] constexpr WrappedIterator operator+( + difference_type n) const noexcept { WrappedIterator result(*this); result += n; return result; } + [[nodiscard]] friend constexpr WrappedIterator operator+( + difference_type n, const WrappedIterator& x) noexcept { + return x + n; + } constexpr WrappedIterator& operator+=(difference_type n) noexcept { it_ += n; return *this; } - constexpr WrappedIterator operator-(difference_type n) const noexcept { - return *this + (-n); + [[nodiscard]] constexpr WrappedIterator operator-( + difference_type n) const noexcept { + return *this + -n; } constexpr WrappedIterator& operator-=(difference_type n) noexcept { - *this += -n; - return *this; + return *this += -n; } - constexpr reference operator[](difference_type n) const noexcept { + template + [[nodiscard]] constexpr auto operator-( + const WrappedIterator& other) + const noexcept { + return it_ - other.base(); + } + [[nodiscard]] constexpr reference operator[]( + difference_type n) const noexcept { return it_[n]; } - constexpr Iterator base() const noexcept { return it_; } - - private: - template - friend class WrappedIterator; + [[nodiscard]] constexpr const Iterator& base() const noexcept { return it_; } private: Iterator it_; }; -template -constexpr bool operator==( - const WrappedIterator& x, - const WrappedIterator& y) noexcept { - return x.base() == y.base(); -} - -template -constexpr bool operator<( - const WrappedIterator& x, - const WrappedIterator& y) noexcept { - return x.base() < y.base(); -} - -template -constexpr bool operator!=( - const WrappedIterator& x, - const WrappedIterator& y) noexcept { - return !(x == y); -} - -template -constexpr bool operator>( - const WrappedIterator& x, - const WrappedIterator& y) noexcept { - return y < x; -} - -template -constexpr bool operator>=( - const WrappedIterator& x, - const WrappedIterator& y) noexcept { - return !(x < y); -} - -template -constexpr bool operator<=( - const WrappedIterator& x, - const WrappedIterator& y) noexcept { - return !(y < x); -} - -template -constexpr auto operator-( - const WrappedIterator& x, - const WrappedIterator& y) noexcept - -> decltype(x.base() - y.base()) { - return x.base() - y.base(); -} - -template -constexpr WrappedIterator operator+( - typename WrappedIterator::difference_type n, - const WrappedIterator& x) noexcept { - x += n; - return x; -} - // Helper functions about values contained in handles. // A value is either an indirect pointer or a direct pointer, depending on // whether direct local support is enabled. diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index 08fcfc40f1f46e..54571391fb8227 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -548,6 +548,7 @@ class V8_EXPORT Isolate { kDocumentAllLegacyCall = 141, kDocumentAllLegacyConstruct = 142, kConsoleContext = 143, + kWasmImportedStringsUtf8 = 144, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to @@ -1746,7 +1747,7 @@ class V8_EXPORT Isolate { friend class PersistentValueMapBase; internal::Address* GetDataFromSnapshotOnce(size_t index); - void ReportExternalAllocationLimitReached(); + void HandleExternalMemoryInterrupt(); }; void Isolate::SetData(uint32_t slot, void* data) { diff --git a/deps/v8/include/v8-memory-span.h b/deps/v8/include/v8-memory-span.h index a7614cf7987ed6..7556b20fa949cb 100644 --- a/deps/v8/include/v8-memory-span.h +++ b/deps/v8/include/v8-memory-span.h @@ -13,6 +13,40 @@ #include "v8config.h" // NOLINT(build/include_directory) +// TODO(pkasting): Use /spaceship unconditionally after dropping +// support for old libstdc++ versions. +#if __has_include() +#include +#endif +#if defined(__cpp_lib_three_way_comparison) && \ + __cpp_lib_three_way_comparison >= 201711L +#define V8_HAVE_SPACESHIP_OPERATOR 1 +#else +#define V8_HAVE_SPACESHIP_OPERATOR 0 +#endif + +// TODO(pkasting): Make this block unconditional after dropping support for old +// libstdc++ versions. +#if __has_include() +#include + +namespace v8 { + +template +class V8_EXPORT MemorySpan; + +} // namespace v8 + +// Mark `MemorySpan` as satisfying the `view` and `borrowed_range` concepts. +// This should be done before the definition of `MemorySpan`, so that any +// inlined calls to range functionality use the correct specializations. +template +inline constexpr bool std::ranges::enable_view> = true; +template +inline constexpr bool std::ranges::enable_borrowed_range> = + true; +#endif + namespace v8 { /** @@ -53,13 +87,13 @@ class V8_EXPORT MemorySpan { is_compatible_iterator::value; template - static constexpr U* to_address(U* p) noexcept { + [[nodiscard]] static constexpr U* to_address(U* p) noexcept { return p; } template ().operator->())>> - static constexpr auto to_address(It it) noexcept { + [[nodiscard]] static constexpr auto to_address(It it) noexcept { return it.operator->(); } @@ -108,50 +142,139 @@ class V8_EXPORT MemorySpan { : data_(a.data()), size_{N} {} /** Returns a pointer to the beginning of the buffer. */ - constexpr T* data() const { return data_; } + [[nodiscard]] constexpr T* data() const { return data_; } /** Returns the number of elements that the buffer holds. */ - constexpr size_t size() const { return size_; } + [[nodiscard]] constexpr size_t size() const { return size_; } - constexpr T& operator[](size_t i) const { return data_[i]; } + [[nodiscard]] constexpr T& operator[](size_t i) const { return data_[i]; } /** Returns true if the buffer is empty. */ - constexpr bool empty() const { return size() == 0; } + [[nodiscard]] constexpr bool empty() const { return size() == 0; } class Iterator { public: - using iterator_category = std::forward_iterator_tag; - using value_type = T; using difference_type = std::ptrdiff_t; + using value_type = T; using pointer = value_type*; using reference = value_type&; + using iterator_category = std::random_access_iterator_tag; + // There seems to be no feature-test macro covering this, so use the + // presence of `` as a crude proxy, since it was added to the + // standard as part of the Ranges papers. + // TODO(pkasting): Add this unconditionally after dropping support for old + // libstdc++ versions. +#if __has_include() + using iterator_concept = std::contiguous_iterator_tag; +#endif + + // Required to satisfy `std::semiregular<>`. + constexpr Iterator() = default; + + [[nodiscard]] friend constexpr bool operator==(const Iterator& a, + const Iterator& b) { + // TODO(pkasting): Replace this body with `= default` after dropping + // support for old gcc versions. + return a.ptr_ == b.ptr_; + } +#if V8_HAVE_SPACESHIP_OPERATOR + [[nodiscard]] friend constexpr auto operator<=>(const Iterator&, + const Iterator&) = default; +#else + // Assume that if spaceship isn't present, operator rewriting might not be + // either. + [[nodiscard]] friend constexpr bool operator!=(const Iterator& a, + const Iterator& b) { + return a.ptr_ != b.ptr_; + } - T& operator*() const { return *ptr_; } - T* operator->() const { return ptr_; } - - bool operator==(Iterator other) const { return ptr_ == other.ptr_; } - bool operator!=(Iterator other) const { return !(*this == other); } + [[nodiscard]] friend constexpr bool operator<(const Iterator& a, + const Iterator& b) { + return a.ptr_ < b.ptr_; + } + [[nodiscard]] friend constexpr bool operator<=(const Iterator& a, + const Iterator& b) { + return a.ptr_ <= b.ptr_; + } + [[nodiscard]] friend constexpr bool operator>(const Iterator& a, + const Iterator& b) { + return a.ptr_ > b.ptr_; + } + [[nodiscard]] friend constexpr bool operator>=(const Iterator& a, + const Iterator& b) { + return a.ptr_ >= b.ptr_; + } +#endif - Iterator& operator++() { + constexpr Iterator& operator++() { ++ptr_; return *this; } - Iterator operator++(int) { - Iterator temp(*this); - ++(*this); + constexpr Iterator operator++(int) { + Iterator temp = *this; + ++*this; return temp; } + constexpr Iterator& operator--() { + --ptr_; + return *this; + } + + constexpr Iterator operator--(int) { + Iterator temp = *this; + --*this; + return temp; + } + + constexpr Iterator& operator+=(difference_type rhs) { + ptr_ += rhs; + return this; + } + + [[nodiscard]] friend constexpr Iterator operator+(Iterator lhs, + difference_type rhs) { + lhs += rhs; + return lhs; + } + + [[nodiscard]] friend constexpr Iterator operator+(difference_type lhs, + const Iterator& rhs) { + return rhs + lhs; + } + + constexpr Iterator& operator-=(difference_type rhs) { + ptr_ -= rhs; + return this; + } + + [[nodiscard]] friend constexpr Iterator operator-(Iterator lhs, + difference_type rhs) { + lhs -= rhs; + return lhs; + } + + [[nodiscard]] friend constexpr difference_type operator-( + const Iterator& lhs, const Iterator& rhs) { + return lhs.ptr_ - rhs.ptr_; + } + + [[nodiscard]] constexpr reference operator*() const { return *ptr_; } + [[nodiscard]] constexpr pointer operator->() const { return ptr_; } + [[nodiscard]] constexpr reference operator[](size_t offset) const { + return ptr_[offset]; + } + private: friend class MemorySpan; - explicit Iterator(T* ptr) : ptr_(ptr) {} + constexpr explicit Iterator(T* ptr) : ptr_(ptr) {} T* ptr_ = nullptr; }; - Iterator begin() const { return Iterator(data_); } - Iterator end() const { return Iterator(data_ + size_); } + [[nodiscard]] Iterator begin() const { return Iterator(data_); } + [[nodiscard]] Iterator end() const { return Iterator(data_ + size_); } private: T* data_ = nullptr; @@ -171,25 +294,26 @@ class V8_EXPORT MemorySpan { namespace detail { template -constexpr std::array, N> to_array_lvalue_impl( +[[nodiscard]] constexpr std::array, N> to_array_lvalue_impl( T (&a)[N], std::index_sequence) { return {{a[I]...}}; } template -constexpr std::array, N> to_array_rvalue_impl( +[[nodiscard]] constexpr std::array, N> to_array_rvalue_impl( T (&&a)[N], std::index_sequence) { return {{std::move(a[I])...}}; } } // namespace detail template -constexpr std::array, N> to_array(T (&a)[N]) { +[[nodiscard]] constexpr std::array, N> to_array(T (&a)[N]) { return detail::to_array_lvalue_impl(a, std::make_index_sequence{}); } template -constexpr std::array, N> to_array(T (&&a)[N]) { +[[nodiscard]] constexpr std::array, N> to_array( + T (&&a)[N]) { return detail::to_array_rvalue_impl(std::move(a), std::make_index_sequence{}); } diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index 5110f432077dce..a3a939729c9b14 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -14,7 +14,7 @@ #include #include "v8-source-location.h" // NOLINT(build/include_directory) -#include "v8config.h" // NOLINT(build/include_directory) +#include "v8config.h" // NOLINT(build/include_directory) namespace v8 { @@ -79,9 +79,8 @@ class TaskRunner { * * Embedders should override PostTaskImpl instead of this. */ - void PostTask( - std::unique_ptr task, - const SourceLocation& location = SourceLocation::Current()) { + void PostTask(std::unique_ptr task, + const SourceLocation& location = SourceLocation::Current()) { PostTaskImpl(std::move(task), location); } @@ -553,6 +552,19 @@ class PageAllocator { */ virtual bool DecommitPages(void* address, size_t size) = 0; + /** + * Block any modifications to the given mapping such as changing permissions + * or unmapping the pages on supported platforms. + * The address space reservation will exist until the process ends, but it's + * possible to release the memory using DiscardSystemPages. Note that this + * might require write permissions to the page as e.g. on Linux, mseal will + * block discarding sealed anonymous memory. + */ + virtual bool SealPages(void* address, size_t length) { + // TODO(360048056): make it pure once it's implemented on Chromium side. + return false; + } + /** * INTERNAL ONLY: This interface has not been stabilised and may change * without notice from one release to another without being deprecated first. @@ -1086,11 +1098,8 @@ class Platform { * Returns a TaskRunner which can be used to post a task on the foreground. * The TaskRunner's NonNestableTasksEnabled() must be true. This function * should only be called from a foreground thread. - * TODO(chromium:1448758): Deprecate once |GetForegroundTaskRunner(Isolate*, - * TaskPriority)| is ready. */ - virtual std::shared_ptr GetForegroundTaskRunner( - Isolate* isolate) { + std::shared_ptr GetForegroundTaskRunner(Isolate* isolate) { return GetForegroundTaskRunner(isolate, TaskPriority::kUserBlocking); } @@ -1098,12 +1107,9 @@ class Platform { * Returns a TaskRunner with a specific |priority| which can be used to post a * task on the foreground thread. The TaskRunner's NonNestableTasksEnabled() * must be true. This function should only be called from a foreground thread. - * TODO(chromium:1448758): Make pure virtual once embedders implement it. */ virtual std::shared_ptr GetForegroundTaskRunner( - Isolate* isolate, TaskPriority priority) { - return nullptr; - } + Isolate* isolate, TaskPriority priority) = 0; /** * Schedules a task to be invoked on a worker thread. diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h index c30d42502769c1..9d3556e222253a 100644 --- a/deps/v8/include/v8-script.h +++ b/deps/v8/include/v8-script.h @@ -129,6 +129,11 @@ class V8_EXPORT ModuleRequest : public Data { */ Local GetSpecifier() const; + /** + * Returns the module import phase for this ModuleRequest. + */ + ModuleImportPhase GetPhase() const; + /** * Returns the source code offset of this module request. * Use Module::SourceOffsetToLocation to convert this to line/column numbers. @@ -211,6 +216,9 @@ class V8_EXPORT Module : public Data { using ResolveModuleCallback = MaybeLocal (*)( Local context, Local specifier, Local import_attributes, Local referrer); + using ResolveSourceCallback = MaybeLocal (*)( + Local context, Local specifier, + Local import_attributes, Local referrer); /** * Instantiates the module and its dependencies. @@ -220,7 +228,8 @@ class V8_EXPORT Module : public Data { * exception is propagated.) */ V8_WARN_UNUSED_RESULT Maybe InstantiateModule( - Local context, ResolveModuleCallback callback); + Local context, ResolveModuleCallback module_callback, + ResolveSourceCallback source_callback = nullptr); /** * Evaluates the module and its dependencies. diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h index 7b47c3b6282d24..93b9428572641c 100644 --- a/deps/v8/include/v8-template.h +++ b/deps/v8/include/v8-template.h @@ -72,8 +72,6 @@ class V8_EXPORT Template : public Data { * \param name The name of the property for which an accessor is added. * \param getter The callback to invoke when getting the property. * \param setter The callback to invoke when setting the property. - * \param data A piece of data that will be passed to the getter and setter - * callbacks whenever they are invoked. * \param attribute The attributes of the property for which an accessor * is added. */ diff --git a/deps/v8/include/v8-traced-handle.h b/deps/v8/include/v8-traced-handle.h index 04752c4f2aeed0..28fe6cf5ae3ad0 100644 --- a/deps/v8/include/v8-traced-handle.h +++ b/deps/v8/include/v8-traced-handle.h @@ -37,15 +37,13 @@ enum class TracedReferenceHandling { kDroppable }; -V8_EXPORT internal::Address* GlobalizeTracedReference( - internal::Isolate* isolate, internal::Address value, - internal::Address* slot, TracedReferenceStoreMode store_mode, - internal::TracedReferenceHandling reference_handling); -V8_EXPORT void MoveTracedReference(internal::Address** from, - internal::Address** to); -V8_EXPORT void CopyTracedReference(const internal::Address* const* from, - internal::Address** to); -V8_EXPORT void DisposeTracedReference(internal::Address* global_handle); +V8_EXPORT Address* GlobalizeTracedReference( + Isolate* isolate, Address value, Address* slot, + TracedReferenceStoreMode store_mode, + TracedReferenceHandling reference_handling); +V8_EXPORT void MoveTracedReference(Address** from, Address** to); +V8_EXPORT void CopyTracedReference(const Address* const* from, Address** to); +V8_EXPORT void DisposeTracedReference(Address* global_handle); } // namespace internal @@ -55,6 +53,9 @@ V8_EXPORT void DisposeTracedReference(internal::Address* global_handle); */ class TracedReferenceBase : public api_internal::IndirectHandleBase { public: + static_assert(sizeof(std::atomic) == + sizeof(internal::Address*)); + /** * If non-empty, destroy the underlying storage cell. |IsEmpty| will return * true after this call. @@ -73,9 +74,7 @@ class TracedReferenceBase : public api_internal::IndirectHandleBase { * Returns true if this TracedReference is empty, i.e., has not been * assigned an object. This version of IsEmpty is thread-safe. */ - bool IsEmptyThreadSafe() const { - return this->GetSlotThreadSafe() == nullptr; - } + bool IsEmptyThreadSafe() const { return GetSlotThreadSafe() == nullptr; } protected: V8_INLINE TracedReferenceBase() = default; @@ -83,17 +82,17 @@ class TracedReferenceBase : public api_internal::IndirectHandleBase { /** * Update this reference in a thread-safe way. */ - void SetSlotThreadSafe(void* new_val) { - reinterpret_cast*>(&slot())->store( + void SetSlotThreadSafe(internal::Address* new_val) { + reinterpret_cast*>(&slot())->store( new_val, std::memory_order_relaxed); } /** * Get this reference in a thread-safe way */ - const void* GetSlotThreadSafe() const { - return reinterpret_cast const*>(&slot())->load( - std::memory_order_relaxed); + const internal::Address* GetSlotThreadSafe() const { + return reinterpret_cast*>(&slot()) + ->load(std::memory_order_relaxed); } V8_EXPORT void CheckValue() const; diff --git a/deps/v8/include/v8-unwinder-state.h b/deps/v8/include/v8-unwinder-state.h index 18bb410d2b163b..235211e3abeb2b 100644 --- a/deps/v8/include/v8-unwinder-state.h +++ b/deps/v8/include/v8-unwinder-state.h @@ -18,9 +18,8 @@ struct CalleeSavedRegisters { void* arm_r10; }; #elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \ - V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_LOONG64 || \ - V8_TARGET_ARCH_RISCV32 + V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || \ + V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 struct CalleeSavedRegisters {}; #else #error Target architecture was not detected as supported by v8 diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 2f4e47742f6a25..4750214ffe3f3d 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -8,10 +8,10 @@ // These macros define the version number for the current version. // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. -#define V8_MAJOR_VERSION 12 -#define V8_MINOR_VERSION 9 -#define V8_BUILD_NUMBER 202 -#define V8_PATCH_LEVEL 28 +#define V8_MAJOR_VERSION 13 +#define V8_MINOR_VERSION 0 +#define V8_BUILD_NUMBER 245 +#define V8_PATCH_LEVEL 25 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 73a6a91d49bf0e..68d8065ccda351 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -581,15 +581,11 @@ path. Add it with -I to the command line // functions. // Use like: // V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod(); -#if V8_OS_WIN -# define V8_PRESERVE_MOST -#else #if V8_HAS_ATTRIBUTE_PRESERVE_MOST # define V8_PRESERVE_MOST __attribute__((preserve_most)) #else # define V8_PRESERVE_MOST /* NOT SUPPORTED */ #endif -#endif // A macro (V8_DEPRECATED) to mark classes or functions as deprecated. @@ -681,7 +677,7 @@ path. Add it with -I to the command line // V8_NODISCARD Foo() { ... }; // [[nodiscard]] comes in C++17 but supported in clang with -std >= c++11. #if V8_HAS_CPP_ATTRIBUTE_NODISCARD -#define V8_NODISCARD +#define V8_NODISCARD [[nodiscard]] #else #define V8_NODISCARD /* NOT SUPPORTED */ #endif @@ -833,9 +829,6 @@ V8 shared library set USING_V8_SHARED. #elif defined(__PPC64__) || defined(_ARCH_PPC64) #define V8_HOST_ARCH_PPC64 1 #define V8_HOST_ARCH_64_BIT 1 -#elif defined(__PPC__) || defined(_ARCH_PPC) -#define V8_HOST_ARCH_PPC 1 -#define V8_HOST_ARCH_32_BIT 1 #elif defined(__s390__) || defined(__s390x__) #define V8_HOST_ARCH_S390 1 #if defined(__s390x__) @@ -862,10 +855,10 @@ V8 shared library set USING_V8_SHARED. // The macros may be set externally. If not, detect in the same way as the host // architecture, that is, target the native environment as presented by the // compiler. -#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ - !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && \ - !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ - !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \ +#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ + !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ + !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ + !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \ !V8_TARGET_ARCH_RISCV32 #if defined(_M_X64) || defined(__x86_64__) #define V8_TARGET_ARCH_X64 1 @@ -881,8 +874,6 @@ V8 shared library set USING_V8_SHARED. #define V8_TARGET_ARCH_LOONG64 1 #elif defined(_ARCH_PPC64) #define V8_TARGET_ARCH_PPC64 1 -#elif defined(_ARCH_PPC) -#define V8_TARGET_ARCH_PPC 1 #elif defined(__s390__) #define V8_TARGET_ARCH_S390 1 #if defined(__s390x__) @@ -920,8 +911,6 @@ V8 shared library set USING_V8_SHARED. #define V8_TARGET_ARCH_64_BIT 1 #elif V8_TARGET_ARCH_LOONG64 #define V8_TARGET_ARCH_64_BIT 1 -#elif V8_TARGET_ARCH_PPC -#define V8_TARGET_ARCH_32_BIT 1 #elif V8_TARGET_ARCH_PPC64 #define V8_TARGET_ARCH_64_BIT 1 #elif V8_TARGET_ARCH_S390 @@ -986,12 +975,12 @@ V8 shared library set USING_V8_SHARED. #else #define V8_TARGET_LITTLE_ENDIAN 1 #endif -#elif defined(__BIG_ENDIAN__) // FOR PPCGR on AIX +#elif V8_TARGET_ARCH_PPC64 +#if V8_OS_AIX #define V8_TARGET_BIG_ENDIAN 1 -#elif V8_TARGET_ARCH_PPC_LE +#else #define V8_TARGET_LITTLE_ENDIAN 1 -#elif V8_TARGET_ARCH_PPC_BE -#define V8_TARGET_BIG_ENDIAN 1 +#endif #elif V8_TARGET_ARCH_S390 #if V8_TARGET_ARCH_S390_LE_SIM #define V8_TARGET_LITTLE_ENDIAN 1 @@ -1017,5 +1006,10 @@ V8 shared library set USING_V8_SHARED. #else #define V8_STATIC_ROOTS_BOOL true #endif +#ifdef V8_TARGET_BIG_ENDIAN +#define V8_TARGET_BIG_ENDIAN_BOOL true +#else +#define V8_TARGET_BIG_ENDIAN_BOOL false +#endif #endif // V8CONFIG_H_ diff --git a/deps/v8/infra/mb/gn_isolate_map.pyl b/deps/v8/infra/mb/gn_isolate_map.pyl index 40df0f013f7d49..924bba4adff027 100644 --- a/deps/v8/infra/mb/gn_isolate_map.pyl +++ b/deps/v8/infra/mb/gn_isolate_map.pyl @@ -71,6 +71,10 @@ "label": "//test:v8_run_num_fuzzer", "type": "script", }, + "snapshot_set": { + "label": "//:snapshot_set", + "type": "script", + }, "test262": { "label": "//test/test262:v8_test262", "type": "script", diff --git a/deps/v8/samples/OWNERS b/deps/v8/samples/OWNERS index 6df8720dc57afb..904e7d11f04881 100644 --- a/deps/v8/samples/OWNERS +++ b/deps/v8/samples/OWNERS @@ -1,2 +1,2 @@ -mathias@chromium.org +ahaas@chromium.org cbruni@chromium.org diff --git a/deps/v8/src/api/api-macros.h b/deps/v8/src/api/api-macros.h index 193dceb74683a2..0f0c72cc8ffabe 100644 --- a/deps/v8/src/api/api-macros.h +++ b/deps/v8/src/api/api-macros.h @@ -16,7 +16,7 @@ * ENTER_V8, ENTER_V8_NO_SCRIPT, ENTER_V8_NO_SCRIPT_NO_EXCEPTION. * * The latter two assume that no script is executed, and no exceptions are - * scheduled in addition (respectively). Creating a exception and + * scheduled in addition (respectively). Creating an exception and * removing it before returning is ok. * * Exceptions should be handled either by invoking one of the diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index 74e44e6d0fd790..651073fa866571 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -85,7 +85,7 @@ MaybeHandle DefineAccessorProperty(Isolate* isolate, isolate, getter, InstantiateFunction(isolate, Cast(getter))); DirectHandle trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline); - Cast(getter)->set_code(*trampoline); + Cast(getter)->UpdateCode(*trampoline); } if (IsFunctionTemplateInfo(*setter) && Cast(*setter)->BreakAtEntry(isolate)) { @@ -93,7 +93,7 @@ MaybeHandle DefineAccessorProperty(Isolate* isolate, isolate, setter, InstantiateFunction(isolate, Cast(setter))); DirectHandle trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline); - Cast(setter)->set_code(*trampoline); + Cast(setter)->UpdateCode(*trampoline); } RETURN_ON_EXCEPTION(isolate, JSObject::DefineOwnAccessorIgnoreAttributes( object, name, getter, setter, attributes)); diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 5ab671c8c4168a..eddb472d560856 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -161,11 +161,6 @@ #endif // V8_OS_LINUX || V8_OS_DARWIN || V8_OS_FREEBSD #if V8_OS_WIN -#include - -// This has to come after windows.h. -#include - #include "include/v8-wasm-trap-handler-win.h" #include "src/trap-handler/handler-inside-win.h" #if defined(V8_OS_WIN64) @@ -414,7 +409,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { backing_memory_base, backing_memory_size, kAllocationGranularity); end_of_accessible_region_ = region_alloc_->begin(); - // Install a on-merge callback to discard or decommit unused pages. + // Install an on-merge callback to discard or decommit unused pages. region_alloc_->set_on_merge_callback([this](i::Address start, size_t size) { mutex_.AssertHeld(); @@ -2297,6 +2292,11 @@ Local ModuleRequest::GetSpecifier() const { return ToApiHandle(i::direct_handle(self->specifier(), i_isolate)); } +ModuleImportPhase ModuleRequest::GetPhase() const { + auto self = Utils::OpenDirectHandle(this); + return self->phase(); +} + int ModuleRequest::GetSourceOffset() const { return Utils::OpenDirectHandle(this)->position(); } @@ -2430,11 +2430,13 @@ int Module::GetIdentityHash() const { } Maybe Module::InstantiateModule(Local context, - Module::ResolveModuleCallback callback) { + ResolveModuleCallback module_callback, + ResolveSourceCallback source_callback) { auto i_isolate = reinterpret_cast(context->GetIsolate()); ENTER_V8(i_isolate, context, Module, InstantiateModule, i::HandleScope); - has_exception = !i::Module::Instantiate(i_isolate, Utils::OpenHandle(this), - context, callback); + has_exception = + !i::Module::Instantiate(i_isolate, Utils::OpenHandle(this), context, + module_callback, source_callback); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return Just(true); } @@ -3717,11 +3719,11 @@ TYPED_ARRAYS_BASE(VALUE_IS_TYPED_ARRAY) #undef VALUE_IS_TYPED_ARRAY bool Value::IsFloat16Array() const { - Utils::ApiCheck(i::v8_flags.js_float16array, "Value::IsFloat16Array", - "Float16Array is not supported"); auto obj = *Utils::OpenDirectHandle(this); return i::IsJSTypedArray(obj) && - i::Cast(obj)->type() == i::kExternalFloat16Array; + i::Cast(obj)->type() == i::kExternalFloat16Array && + Utils::ApiCheck(i::v8_flags.js_float16array, "Value::IsFloat16Array", + "Float16Array is not supported"); } bool Value::IsDataView() const { @@ -4184,7 +4186,7 @@ std::unique_ptr v8::BackingStore::Reallocate( i::Isolate* i_isolate = reinterpret_cast(v8_isolate); API_RCS_SCOPE(i_isolate, ArrayBuffer, BackingStore_Reallocate); Utils::ApiCheck(byte_length <= i::JSArrayBuffer::kMaxByteLength, - "v8::BackingStore::Reallocate", "byte_lenght is too large"); + "v8::BackingStore::Reallocate", "byte_length is too large"); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); i::BackingStore* i_backing_store = reinterpret_cast(backing_store.get()); @@ -8197,7 +8199,7 @@ MaybeLocal v8::Array::New( Local context, size_t length, std::function()> next_value_callback) { PREPARE_FOR_EXECUTION(context, Array, New); - // We should never see a exception here as V8 will not create an + // We should never see an exception here as V8 will not create an // exception and the callback is invoked by the embedder where the exception // is already scheduled. USE(has_exception); @@ -9560,10 +9562,10 @@ void BigInt::ToWordsArray(int* sign_bit, int* word_count, words); } -void Isolate::ReportExternalAllocationLimitReached() { +void Isolate::HandleExternalMemoryInterrupt() { i::Heap* heap = reinterpret_cast(this)->heap(); if (heap->gc_state() != i::Heap::NOT_IN_GC) return; - heap->ReportExternalMemoryPressure(); + heap->HandleExternalMemoryInterrupt(); } HeapProfiler* Isolate::GetHeapProfiler() { @@ -10271,20 +10273,19 @@ void Isolate::GetStackSample(const RegisterState& state, void** frames, int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( int64_t change_in_bytes) { // Try to check for unreasonably large or small values from the embedder. - const int64_t kMaxReasonableBytes = int64_t(1) << 60; - const int64_t kMinReasonableBytes = -kMaxReasonableBytes; + static constexpr int64_t kMaxReasonableBytes = int64_t(1) << 60; + static constexpr int64_t kMinReasonableBytes = -kMaxReasonableBytes; static_assert(kMaxReasonableBytes >= i::JSArrayBuffer::kMaxByteLength); - CHECK(kMinReasonableBytes <= change_in_bytes && change_in_bytes < kMaxReasonableBytes); i::Isolate* i_isolate = reinterpret_cast(this); - int64_t amount = i_isolate->heap()->update_external_memory(change_in_bytes); + int64_t amount = i_isolate->heap()->UpdateExternalMemory(change_in_bytes); if (change_in_bytes <= 0) return amount; - if (amount > i_isolate->heap()->external_memory_limit()) { - ReportExternalAllocationLimitReached(); + if (amount > i_isolate->heap()->external_memory_limit_for_interrupt()) { + HandleExternalMemoryInterrupt(); } return amount; } diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index 4e9992d6a06ea3..970bd4a6b1e29a 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -24,23 +24,23 @@ namespace internal { namespace wasm { #ifdef DEBUG -#define FAIL_AND_RETURN(ret, msg) \ - failed_ = true; \ - failure_message_ = msg; \ - failure_location_ = static_cast(scanner_.Position()); \ - if (v8_flags.trace_asm_parser) { \ - PrintF("[asm.js failure: %s, token: '%s', see: %s:%d]\n", msg, \ - scanner_.Name(scanner_.Token()).c_str(), __FILE__, __LINE__); \ - } \ - return ret; +#define TRACE_ASM_PARSER(...) \ + if (v8_flags.trace_asm_parser) { \ + PrintF(__VA_ARGS__); \ + } #else -#define FAIL_AND_RETURN(ret, msg) \ - failed_ = true; \ - failure_message_ = msg; \ - failure_location_ = static_cast(scanner_.Position()); \ - return ret; +#define TRACE_ASM_PARSER(...) #endif +#define FAIL_AND_RETURN(ret, msg) \ + failed_ = true; \ + failure_message_ = msg; \ + failure_location_ = static_cast(scanner_.Position()); \ + TRACE_ASM_PARSER("[asm.js failure: %s, token: '%s', see: %s:%d]\n", msg, \ + scanner_.Name(scanner_.Token()).c_str(), __FILE__, \ + __LINE__); \ + return ret; + #define FAIL(msg) FAIL_AND_RETURN(, msg) #define FAILn(msg) FAIL_AND_RETURN(nullptr, msg) @@ -2572,8 +2572,18 @@ void AsmJsParser::GatherCases(ZoneVector* cases) { scanner_.Seek(start); } +#undef TOK +#undef RECURSEn +#undef RECURSE +#undef RECURSE_OR_RETURN +#undef EXPECT_TOKENn +#undef EXPECT_TOKEN +#undef EXPECT_TOKEN_OR_RETURN +#undef FAILn +#undef FAIL +#undef FAIL_AND_RETURN +#undef TRACE_ASM_PARSER + } // namespace wasm } // namespace internal } // namespace v8 - -#undef RECURSE diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 64cba134ec81d2..558f5f9631d383 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -915,7 +915,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { // so we don't care that it calls sloppy eval. if (is_script_scope()) return; - // Sloppy eval in a eval scope can only introduce variables into the outer + // Sloppy eval in an eval scope can only introduce variables into the outer // (non-eval) declaration scope, not into this eval scope. if (is_eval_scope()) { #ifdef DEBUG diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h index 59e786bdd1f23f..d64d361e0f59ea 100644 --- a/deps/v8/src/base/bit-field.h +++ b/deps/v8/src/base/bit-field.h @@ -41,21 +41,14 @@ class BitField final { static constexpr U kMask = ((U{1} << kShift) << kSize) - (U{1} << kShift); static constexpr int kLastUsedBit = kShift + kSize - 1; static constexpr U kNumValues = U{1} << kSize; - - // Value for the field with all bits set. - // If clang complains - // "constexpr variable 'kMax' must be initialized by a constant expression" - // on this line, then you're creating a BitField for an enum with more bits - // than needed for the enum values. Either reduce the BitField size, - // or give the enum an explicit underlying type. - static constexpr T kMax = static_cast(kNumValues - 1); + static constexpr U kMax = kNumValues - 1; template using Next = BitField; // Tells whether the provided value fits into the bit field. static constexpr bool is_valid(T value) { - return (static_cast(value) & ~static_cast(kMax)) == 0; + return (static_cast(value) & ~kMax) == 0; } // Returns a type U with the bit field value encoded. diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc index 42c3cc1cd6937c..e284b4b4d7aa6e 100644 --- a/deps/v8/src/base/bounded-page-allocator.cc +++ b/deps/v8/src/base/bounded-page-allocator.cc @@ -244,6 +244,10 @@ bool BoundedPageAllocator::DecommitPages(void* address, size_t size) { return page_allocator_->DecommitPages(address, size); } +bool BoundedPageAllocator::SealPages(void* address, size_t size) { + return page_allocator_->SealPages(address, size); +} + const char* BoundedPageAllocator::AllocationStatusToString( AllocationStatus allocation_status) { switch (allocation_status) { diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h index 0106f93c5d0d0a..d2f5bc3b80482a 100644 --- a/deps/v8/src/base/bounded-page-allocator.h +++ b/deps/v8/src/base/bounded-page-allocator.h @@ -119,6 +119,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator { bool DecommitPages(void* address, size_t size) override; + bool SealPages(void* address, size_t size) override; + AllocationStatus get_last_allocation_status() const { return allocation_status_; } diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h index 5708ce20c8aab2..207f46b057d2be 100644 --- a/deps/v8/src/base/build_config.h +++ b/deps/v8/src/base/build_config.h @@ -58,7 +58,7 @@ constexpr int kReturnAddressStackSlotCount = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0; // Number of bits to represent the page size for paged spaces. -#if (defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_PPC64)) && !defined(_AIX) +#if defined(V8_HOST_ARCH_PPC64) && !defined(V8_OS_AIX) // Native PPC linux has large (64KB) physical pages. // Simulator (and Aix) need to use the same value as x64. constexpr int kPageSizeBits = 19; diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h index 7c1c0a78b6410b..b2664e2cd88c36 100644 --- a/deps/v8/src/base/compiler-specific.h +++ b/deps/v8/src/base/compiler-specific.h @@ -98,10 +98,9 @@ // do not support adding noexcept to default members. // Disabled on MSVC because constructors of standard containers are not noexcept // there. -#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \ - !defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC) && \ - !defined(V8_TARGET_ARCH_PPC64) && !defined(V8_TARGET_ARCH_RISCV64) && \ - !defined(V8_TARGET_ARCH_RISCV32)) || \ +#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \ + !defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC64) && \ + !defined(V8_TARGET_ARCH_RISCV64) && !defined(V8_TARGET_ARCH_RISCV32)) || \ defined(__clang__)) #define V8_NOEXCEPT noexcept #else diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc index df630a87a4fe13..e1f7cda0d72416 100644 --- a/deps/v8/src/base/cpu.cc +++ b/deps/v8/src/base/cpu.cc @@ -20,7 +20,7 @@ #if V8_OS_QNX #include // cpuinfo #endif -#if V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64) +#if V8_OS_LINUX && V8_HOST_ARCH_PPC64 #include #endif #if V8_OS_AIX @@ -54,8 +54,6 @@ #include "src/base/platform/wrappers.h" #if V8_OS_WIN #include - -#include "src/base/win32-headers.h" #endif namespace v8 { @@ -430,6 +428,7 @@ CPU::CPU() has_avx_(false), has_avx2_(false), has_avx_vnni_(false), + has_avx_vnni_int8_(false), has_fma3_(false), has_f16c_(false), has_bmi1_(false), @@ -512,6 +511,7 @@ CPU::CPU() has_avx_ = (cpu_info[2] & 0x10000000) != 0; has_avx2_ = (cpu_info70[1] & 0x00000020) != 0; has_avx_vnni_ = (cpu_info71[0] & 0x00000010) != 0; + has_avx_vnni_int8_ = (cpu_info71[3] & 0x00000020) != 0; has_fma3_ = (cpu_info[2] & 0x00001000) != 0; has_f16c_ = (cpu_info[2] & 0x20000000) != 0; // CET shadow stack feature flag. See @@ -900,7 +900,7 @@ CPU::CPU() #endif // V8_OS_IOS #endif // V8_OS_WIN -#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 +#elif V8_HOST_ARCH_PPC64 #ifndef USE_SIMULATOR #if V8_OS_LINUX @@ -908,11 +908,7 @@ CPU::CPU() char* auxv_cpu_type = nullptr; FILE* fp = base::Fopen("/proc/self/auxv", "r"); if (fp != nullptr) { -#if V8_TARGET_ARCH_PPC64 Elf64_auxv_t entry; -#else - Elf32_auxv_t entry; -#endif for (;;) { size_t n = fread(&entry, sizeof(entry), 1, fp); if (n == 0 || entry.a_type == AT_NULL) { @@ -941,18 +937,6 @@ CPU::CPU() part_ = kPPCPower9; } else if (strcmp(auxv_cpu_type, "power8") == 0) { part_ = kPPCPower8; - } else if (strcmp(auxv_cpu_type, "power7") == 0) { - part_ = kPPCPower7; - } else if (strcmp(auxv_cpu_type, "power6") == 0) { - part_ = kPPCPower6; - } else if (strcmp(auxv_cpu_type, "power5") == 0) { - part_ = kPPCPower5; - } else if (strcmp(auxv_cpu_type, "ppc970") == 0) { - part_ = kPPCG5; - } else if (strcmp(auxv_cpu_type, "ppc7450") == 0) { - part_ = kPPCG4; - } else if (strcmp(auxv_cpu_type, "pa6t") == 0) { - part_ = kPPCPA6T; } } @@ -967,15 +951,6 @@ CPU::CPU() case POWER_8: part_ = kPPCPower8; break; - case POWER_7: - part_ = kPPCPower7; - break; - case POWER_6: - part_ = kPPCPower6; - break; - case POWER_5: - part_ = kPPCPower5; - break; } #endif // V8_OS_AIX #endif // !USE_SIMULATOR diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h index 9f94782efdec51..66cbfcabfea736 100644 --- a/deps/v8/src/base/cpu.h +++ b/deps/v8/src/base/cpu.h @@ -64,17 +64,7 @@ class V8_BASE_EXPORT CPU final { static const int kNvidiaDenverV10 = 0x002; // PPC-specific part codes - enum { - kPPCPower5, - kPPCPower6, - kPPCPower7, - kPPCPower8, - kPPCPower9, - kPPCPower10, - kPPCG4, - kPPCG5, - kPPCPA6T - }; + enum { kPPCPower8, kPPCPower9, kPPCPower10 }; // General features bool has_fpu() const { return has_fpu_; } @@ -96,6 +86,7 @@ class V8_BASE_EXPORT CPU final { bool has_avx() const { return has_avx_; } bool has_avx2() const { return has_avx2_; } bool has_avx_vnni() const { return has_avx_vnni_; } + bool has_avx_vnni_int8() const { return has_avx_vnni_int8_; } bool has_fma3() const { return has_fma3_; } bool has_f16c() const { return has_f16c_; } bool has_bmi1() const { return has_bmi1_; } @@ -180,6 +171,7 @@ class V8_BASE_EXPORT CPU final { bool has_avx_; bool has_avx2_; bool has_avx_vnni_; + bool has_avx_vnni_int8_; bool has_fma3_; bool has_f16c_; bool has_bmi1_; diff --git a/deps/v8/src/base/debug/stack_trace.h b/deps/v8/src/base/debug/stack_trace.h index 2acd0029ec5b45..fc71a5631aaa87 100644 --- a/deps/v8/src/base/debug/stack_trace.h +++ b/deps/v8/src/base/debug/stack_trace.h @@ -36,7 +36,7 @@ V8_BASE_EXPORT bool EnableInProcessStackDumping(); V8_BASE_EXPORT void DisableSignalStackDump(); // A stacktrace can be helpful in debugging. For example, you can include a -// stacktrace member in a object (probably around #ifndef NDEBUG) so that you +// stacktrace member in an object (probably around #ifndef NDEBUG) so that you // can later see where the given object was created from. class V8_BASE_EXPORT StackTrace { public: diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc index e71b63fd7c1771..276db1ee25f212 100644 --- a/deps/v8/src/base/ieee754.cc +++ b/deps/v8/src/base/ieee754.cc @@ -707,7 +707,7 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) { * Algorithm * 1. Since tan(-x) = -tan(x), we need only to consider positive x. * 2. if x < 2^-28 (hx<0x3E300000 0), return x with inexact if x!=0. - * 3. tan(x) is approximated by a odd polynomial of degree 27 on + * 3. tan(x) is approximated by an odd polynomial of degree 27 on * [0,0.67434] * 3 27 * tan(x) ~ x + T1*x + ... + T13*x diff --git a/deps/v8/src/base/iterator.h b/deps/v8/src/base/iterator.h index a7fa0de2c1477e..164b55eb58a3f4 100644 --- a/deps/v8/src/base/iterator.h +++ b/deps/v8/src/base/iterator.h @@ -139,6 +139,22 @@ auto IterateWithoutLast(const iterator_range& t) { return IterateWithoutLast(range_copy); } +// {IterateWithoutFirst} returns a container adapter usable in a range-based +// "for" statement for iterating all elements without the first in a forward +// order. It performs a check whether the container is empty. +template +auto IterateWithoutFirst(T& t) { + DCHECK_NE(std::begin(t), std::end(t)); + auto new_begin = std::begin(t); + return make_iterator_range(++new_begin, std::end(t)); +} + +template +auto IterateWithoutFirst(const iterator_range& t) { + iterator_range range_copy = {t.begin(), t.end()}; + return IterateWithoutFirst(range_copy); +} + // TupleIterator is an iterator wrapping around multiple iterators. It is use by // the `zip` function below to iterate over multiple containers at once. template diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h index 868ed15fb60302..488729dd5a9d08 100644 --- a/deps/v8/src/base/macros.h +++ b/deps/v8/src/base/macros.h @@ -423,9 +423,9 @@ bool is_inbounds(float_t v) { // Setup for Windows shared library export. #define V8_EXPORT_ENUM #ifdef BUILDING_V8_SHARED_PRIVATE -#define V8_EXPORT_PRIVATE +#define V8_EXPORT_PRIVATE __declspec(dllexport) #elif USING_V8_SHARED_PRIVATE -#define V8_EXPORT_PRIVATE +#define V8_EXPORT_PRIVATE __declspec(dllimport) #else #define V8_EXPORT_PRIVATE #endif // BUILDING_V8_SHARED @@ -435,8 +435,8 @@ bool is_inbounds(float_t v) { // Setup for Linux shared library export. #if V8_HAS_ATTRIBUTE_VISIBILITY #ifdef BUILDING_V8_SHARED_PRIVATE -#define V8_EXPORT_PRIVATE -#define V8_EXPORT_ENUM +#define V8_EXPORT_PRIVATE __attribute__((visibility("default"))) +#define V8_EXPORT_ENUM V8_EXPORT_PRIVATE #else #define V8_EXPORT_PRIVATE #define V8_EXPORT_ENUM diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc index 97ce70360bf9cf..8a0ee3cf4f3ef9 100644 --- a/deps/v8/src/base/page-allocator.cc +++ b/deps/v8/src/base/page-allocator.cc @@ -162,5 +162,9 @@ bool PageAllocator::DecommitPages(void* address, size_t size) { return base::OS::DecommitPages(address, size); } +bool PageAllocator::SealPages(void* address, size_t size) { + return base::OS::SealPages(address, size); +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h index 8d43376f91d82e..a6084d725d1ec0 100644 --- a/deps/v8/src/base/page-allocator.h +++ b/deps/v8/src/base/page-allocator.h @@ -52,6 +52,8 @@ class V8_BASE_EXPORT PageAllocator bool DecommitPages(void* address, size_t size) override; + bool SealPages(void* address, size_t size) override; + private: friend class v8::base::SharedMemory; diff --git a/deps/v8/src/base/platform/memory.h b/deps/v8/src/base/platform/memory.h index e1b7dc3a8af0ce..4d3e1876c85789 100644 --- a/deps/v8/src/base/platform/memory.h +++ b/deps/v8/src/base/platform/memory.h @@ -19,13 +19,15 @@ #if V8_OS_DARWIN #include +#elif V8_OS_OPENBSD +#include #elif V8_OS_ZOS #include #else #include #endif -#if (V8_OS_POSIX && !V8_OS_AIX && !V8_OS_SOLARIS && !V8_OS_ZOS) || V8_OS_WIN +#if (V8_OS_POSIX && !V8_OS_AIX && !V8_OS_SOLARIS && !V8_OS_ZOS && !V8_OS_OPENBSD) || V8_OS_WIN #define V8_HAS_MALLOC_USABLE_SIZE 1 #endif diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc index f472e299f938c1..1cafc3608773e2 100644 --- a/deps/v8/src/base/platform/platform-cygwin.cc +++ b/deps/v8/src/base/platform/platform-cygwin.cc @@ -9,10 +9,10 @@ #include #include #include -#include // index -#include // mmap & munmap +#include // index +#include // mmap & munmap #include -#include // sysconf +#include // sysconf #include @@ -76,7 +76,7 @@ class CygwinTimezoneCache : public PosixTimezoneCache { const char* CygwinTimezoneCache::LocalTimezone(double time) { if (std::isnan(time)) return ""; - time_t tv = static_cast(std::floor(time/msPerSecond)); + time_t tv = static_cast(std::floor(time / msPerSecond)); struct tm tm; struct tm* t = localtime_r(&tv, &tm); if (nullptr == t) return ""; @@ -204,6 +204,9 @@ bool OS::DiscardSystemPages(void* address, size_t size) { return ptr; } +// static +bool OS::SealPages(void* address, size_t size) { return false; } + // static bool OS::HasLazyCommits() { // TODO(alph): implement for the platform. @@ -252,8 +255,8 @@ std::vector OS::GetSharedLibraryAddresses() { lib_name[strlen(lib_name) - 1] = '\0'; } else { // No library name found, just record the raw address range. - snprintf(lib_name, kLibNameLen, - "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); + snprintf(lib_name, kLibNameLen, "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, + end); } result.push_back(SharedLibraryAddress(lib_name, start, end)); } else { diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index a50dde14a2b298..2135ab709048a5 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -320,6 +320,9 @@ bool OS::DecommitPages(void* address, size_t size) { DiscardSystemPages(address, size); } +// static +bool OS::SealPages(void* address, size_t size) { return false; } + // static bool OS::CanReserveAddressSpace() { return true; } diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index a0f43374fa982d..802dfeb0e65dcc 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -55,6 +55,8 @@ #if V8_OS_DARWIN #include #include +#elif V8_OS_OPENBSD +#include #elif !V8_OS_ZOS #include #endif @@ -336,21 +338,27 @@ void* OS::GetRandomMmapAddr() { raw_addr &= 0x007fffff0000ULL; raw_addr += 0x7e8000000000ULL; #else -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 +#if V8_TARGET_ARCH_X64 // Currently available CPUs have 48 bits of virtual addressing. Truncate // the hint address to 46 bits to give the kernel a fighting chance of // fulfilling our placement request. raw_addr &= uint64_t{0x3FFFFFFFF000}; +#elif V8_TARGET_ARCH_ARM64 +#if defined(V8_TARGET_OS_LINUX) || defined(V8_TARGET_OS_ANDROID) + // On Linux, the default virtual address space is limited to 39 bits when + // using 4KB pages, see arch/arm64/Kconfig. We truncate to 38 bits. + raw_addr &= uint64_t{0x3FFFFFF000}; +#else + // On macOS and elsewhere, we use 46 bits, same as on x64. + raw_addr &= uint64_t{0x3FFFFFFFF000}; +#endif #elif V8_TARGET_ARCH_PPC64 #if V8_OS_AIX - // AIX: 64 bits of virtual addressing, but we limit address range to: - // a) minimize Segment Lookaside Buffer (SLB) misses and + // AIX: 64 bits of virtual addressing, but we limit address range to minimize + // Segment Lookaside Buffer (SLB) misses. raw_addr &= uint64_t{0x3FFFF000}; // Use extra address space to isolate the mmap regions. raw_addr += uint64_t{0x400000000000}; -#elif V8_TARGET_BIG_ENDIAN - // Big-endian Linux: 42 bits of virtual addressing. - raw_addr &= uint64_t{0x03FFFFFFF000}; #else // Little-endian Linux: 46 bits of virtual addressing. raw_addr &= uint64_t{0x3FFFFFFF0000}; @@ -613,6 +621,20 @@ bool OS::DecommitPages(void* address, size_t size) { #endif // !defined(_AIX) #endif // !V8_OS_ZOS +// static +bool OS::SealPages(void* address, size_t size) { +#ifdef V8_ENABLE_MEMORY_SEALING +#if V8_OS_LINUX && defined(__NR_mseal) + long ret = syscall(__NR_mseal, address, size, 0); + return ret == 0; +#else + return false; +#endif +#else // V8_ENABLE_MEMORY_SEALING + return false; +#endif +} + // static bool OS::CanReserveAddressSpace() { return true; } @@ -729,7 +751,7 @@ void OS::DebugBreak() { asm("break"); #elif V8_HOST_ARCH_LOONG64 asm("break 0"); -#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 +#elif V8_HOST_ARCH_PPC64 asm("twge 2,2"); #elif V8_HOST_ARCH_IA32 asm("int $3"); @@ -1349,6 +1371,15 @@ bool MainThreadIsCurrentThread() { Stack::StackSlot Stack::ObtainCurrentThreadStackStart() { #if V8_OS_ZOS return __get_stack_start(); +#elif V8_OS_OPENBSD + stack_t stack; + int error = pthread_stackseg_np(pthread_self(), &stack); + if(error) { + DCHECK(MainThreadIsCurrentThread()); + return nullptr; + } + void* stack_start = reinterpret_cast(stack.ss_sp) + stack.ss_size; + return stack_start; #else pthread_attr_t attr; int error = pthread_getattr_np(pthread_self(), &attr); diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index fa03a80ef3d95e..5fb76f460bef4b 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -4,6 +4,8 @@ // Platform-specific code for Win32. +#include "src/base/platform/platform-win32.h" + // Secure API functions are not available using MinGW with msvcrt.dll // on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to // disable definition of secure API functions in standard headers that @@ -32,12 +34,10 @@ #include "src/base/bits.h" #include "src/base/lazy-instance.h" #include "src/base/macros.h" -#include "src/base/platform/platform-win32.h" #include "src/base/platform/platform.h" #include "src/base/platform/time.h" #include "src/base/timezone-cache.h" #include "src/base/utils/random-number-generator.h" -#include "src/base/win32-headers.h" #if defined(_MSC_VER) #include @@ -980,7 +980,7 @@ void* AllocateInternal(void* hint, size_t size, size_t alignment, void CheckIsOOMError(int error) { // We expect one of ERROR_NOT_ENOUGH_MEMORY or ERROR_COMMITMENT_LIMIT. We'd - // still like to get the actual error code when its not one of the expected + // still like to get the actual error code when it's not one of the expected // errors, so use the construct below to achieve that. if (error != ERROR_NOT_ENOUGH_MEMORY) CHECK_EQ(ERROR_COMMITMENT_LIMIT, error); } @@ -1121,6 +1121,9 @@ bool OS::DecommitPages(void* address, size_t size) { return VirtualFree(address, size, MEM_DECOMMIT) != 0; } +// static +bool OS::SealPages(void* address, size_t size) { return false; } + // static bool OS::CanReserveAddressSpace() { return VirtualAlloc2 != nullptr && MapViewOfFile3 != nullptr && diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 8f4901f501f684..a7b57d9fc806d7 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -64,6 +64,12 @@ extern "C" unsigned long __readfsdword(unsigned long); // NOLINT(runtime/int) #endif // V8_CC_MSVC && V8_HOST_ARCH_IA32 #endif // V8_NO_FAST_TLS +#if V8_OS_OPENBSD +#define PERMISSION_MUTABLE_SECTION __attribute__((section(".openbsd.mutable"))) +#else +#define PERMISSION_MUTABLE_SECTION +#endif + namespace heap::base { class Stack; } @@ -402,6 +408,8 @@ class V8_BASE_EXPORT OS { V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size); + V8_WARN_UNUSED_RESULT static bool SealPages(void* address, size_t size); + V8_WARN_UNUSED_RESULT static bool CanReserveAddressSpace(); V8_WARN_UNUSED_RESULT static std::optional diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc index e04e5e44204ae5..4bede53691f51a 100644 --- a/deps/v8/src/base/platform/time.cc +++ b/deps/v8/src/base/platform/time.cc @@ -38,7 +38,6 @@ #include #include "src/base/lazy-instance.h" -#include "src/base/win32-headers.h" #endif #include "src/base/cpu.h" #include "src/base/logging.h" @@ -888,8 +887,8 @@ double ThreadTicks::TSCTicksPerSecond() { static const uint64_t tsc_initial = __rdtsc(); static const uint64_t perf_counter_initial = QPCNowRaw(); - // Make a another reading of the TSC and the performance counter every time - // that this function is called. + // Make another reading of the TSC and the performance counter every time + // this function is called. uint64_t tsc_now = __rdtsc(); uint64_t perf_counter_now = QPCNowRaw(); diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc index e5d38953ee19fc..4bab9166baa1e3 100644 --- a/deps/v8/src/base/sys-info.cc +++ b/deps/v8/src/base/sys-info.cc @@ -24,8 +24,6 @@ #include "src/base/macros.h" #if V8_OS_WIN #include - -#include "src/base/win32-headers.h" #endif #if V8_OS_STARBOARD diff --git a/deps/v8/src/base/virtual-address-space-page-allocator.cc b/deps/v8/src/base/virtual-address-space-page-allocator.cc index 5e0b5beb149d98..21e2d0eb2f0fd9 100644 --- a/deps/v8/src/base/virtual-address-space-page-allocator.cc +++ b/deps/v8/src/base/virtual-address-space-page-allocator.cc @@ -73,5 +73,9 @@ bool VirtualAddressSpacePageAllocator::DecommitPages(void* address, return vas_->DecommitPages(reinterpret_cast
(address), size); } +bool VirtualAddressSpacePageAllocator::SealPages(void* address, size_t size) { + return false; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/virtual-address-space-page-allocator.h b/deps/v8/src/base/virtual-address-space-page-allocator.h index 1b9990e0d9ea57..0e5cb5bba63cd5 100644 --- a/deps/v8/src/base/virtual-address-space-page-allocator.h +++ b/deps/v8/src/base/virtual-address-space-page-allocator.h @@ -55,6 +55,8 @@ class V8_BASE_EXPORT VirtualAddressSpacePageAllocator bool DecommitPages(void* address, size_t size) override; + bool SealPages(void* address, size_t size) override; + private: // Client of this class must keep the VirtualAddressSpace alive during the // lifetime of this instance. diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index c721156bf7e1ba..e30e0036a15e27 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -60,6 +60,14 @@ namespace v8 { namespace internal { namespace baseline { +#define __ basm_. + +#define RCS_BASELINE_SCOPE(rcs) \ + RCS_SCOPE(stats_, \ + local_isolate_->is_main_thread() \ + ? RuntimeCallCounterId::kCompileBaseline##rcs \ + : RuntimeCallCounterId::kCompileBackgroundBaseline##rcs) + template Handle BytecodeOffsetTableBuilder::ToBytecodeOffsetTable( IsolateT* isolate) { @@ -294,14 +302,6 @@ BaselineCompiler::BaselineCompiler( base::bits::RoundUpToPowerOfTwo(16 + bytecode_->Size() / 4)); } -#define __ basm_. - -#define RCS_BASELINE_SCOPE(rcs) \ - RCS_SCOPE(stats_, \ - local_isolate_->is_main_thread() \ - ? RuntimeCallCounterId::kCompileBaseline##rcs \ - : RuntimeCallCounterId::kCompileBackgroundBaseline##rcs) - void BaselineCompiler::GenerateCode() { { RCS_BASELINE_SCOPE(PreVisit); @@ -521,7 +521,7 @@ void BaselineCompiler::VisitSingleBytecode() { case interpreter::Bytecode::k##name: \ Visit##name(); \ break; - BYTECODE_LIST(BYTECODE_CASE) + BYTECODE_LIST(BYTECODE_CASE, BYTECODE_CASE) #undef BYTECODE_CASE } } @@ -570,7 +570,7 @@ void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) { #endif #define DECLARE_VISITOR(name, ...) void Visit##name(); -BYTECODE_LIST(DECLARE_VISITOR) +BYTECODE_LIST(DECLARE_VISITOR, DECLARE_VISITOR) #undef DECLARE_VISITOR #define DECLARE_VISITOR(name, ...) \ @@ -2413,6 +2413,9 @@ SaveAccumulatorScope::~SaveAccumulatorScope() { assembler_->Pop(kInterpreterAccumulatorRegister); } +#undef RCS_BASELINE_SCOPE +#undef __ + } // namespace baseline } // namespace internal } // namespace v8 diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h index e83c27e8fabcf9..22da5d4c453b34 100644 --- a/deps/v8/src/baseline/baseline-compiler.h +++ b/deps/v8/src/baseline/baseline-compiler.h @@ -147,7 +147,7 @@ class BaselineCompiler { // Single bytecode visitors. #define DECLARE_VISITOR(name, ...) void Visit##name(); - BYTECODE_LIST(DECLARE_VISITOR) + BYTECODE_LIST(DECLARE_VISITOR, DECLARE_VISITOR) #undef DECLARE_VISITOR // Intrinsic call visitors. diff --git a/deps/v8/src/builtins/DEPS b/deps/v8/src/builtins/DEPS index b0a17ca69a8ca2..c10e7fe882151c 100644 --- a/deps/v8/src/builtins/DEPS +++ b/deps/v8/src/builtins/DEPS @@ -5,8 +5,12 @@ specific_include_rules = { "setup-builtins-internal.cc": [ "+src/compiler/pipeline.h", + "+src/compiler/turboshaft/builtin-compiler.h", "+src/compiler/turboshaft/phase.h", ], + "number-builtins-reducer-inl.h": [ + "+src/compiler", + ], ".*-tsa.cc": [ "+src/compiler", ], diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 6751da7cb0463e..456936b3e295aa 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -1935,8 +1935,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ cmp(maybe_target_code, Operand(Smi::zero())); __ b(ne, &jump_to_optimized_code); } @@ -2802,10 +2803,10 @@ struct SaveWasmParamsScope { // This builtin creates the following stack frame: // -// [ feedback vector ] <-- sp // Added by this builtin. -// [ Wasm instance ] // Added by this builtin. -// [ WASM frame marker ] // Already there on entry. -// [ saved fp ] <-- fp // Already there on entry. +// [ feedback vector ] <-- sp // Added by this builtin. +// [ Wasm instance data ] // Added by this builtin. +// [ WASM frame marker ] // Already there on entry. +// [ saved fp ] <-- fp // Already there on entry. void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register func_index = wasm::kLiftoffFrameSetupFunctionReg; Register vector = r5; @@ -2813,13 +2814,13 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Label allocate_vector, done; __ ldr(vector, - FieldMemOperand(kWasmInstanceRegister, + FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset)); __ add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2)); __ ldr(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); - __ push(kWasmInstanceRegister); + __ push(kWasmImplicitArgRegister); __ push(vector); __ Ret(); @@ -2833,8 +2834,8 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ str(scratch, MemOperand(sp)); { SaveWasmParamsScope save_params(masm); - // Arguments to the runtime function: instance, func_index. - __ push(kWasmInstanceRegister); + // Arguments to the runtime function: instance data, func_index. + __ push(kWasmImplicitArgRegister); __ SmiTag(func_index); __ push(func_index); // Allocate a stack slot where the runtime function can spill a pointer @@ -2861,8 +2862,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { { SaveWasmParamsScope save_params(masm); - // Push the Wasm instance as an explicit argument to the runtime function. - __ push(kWasmInstanceRegister); + // Push the instance data as an explicit argument to the runtime function. + __ push(kWasmImplicitArgRegister); // Push the function index as second argument. __ push(kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to @@ -2876,9 +2877,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Saved parameters are restored at the end of this block. } - // After the instance register has been restored, we can add the jump table - // start to the jump table offset already stored in r8. - __ ldr(r9, FieldMemOperand(kWasmInstanceRegister, + // After the instance data register has been restored, we can add the jump + // table start to the jump table offset already stored in r8. + __ ldr(r9, FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kJumpTableStartOffset)); __ add(r8, r8, r9); } @@ -3107,7 +3108,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { __ Zero(MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset), - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } // TODO(irezvov): Consolidate with arm64 RegisterAllocator. @@ -3241,19 +3242,21 @@ class RegisterAllocator { #define FREE_REG(Name) regs.Free(&Name); // Loads the context field of the WasmTrustedInstanceData or WasmImportData -// depending on the ref's type, and places the result in the input register. -void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { - __ LoadTaggedField(scratch, FieldMemOperand(ref, HeapObject::kMapOffset)); +// depending on the data's type, and places the result in the input register. +void GetContextFromImplicitArg(MacroAssembler* masm, Register data, + Register scratch) { + __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset)); __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE); Label instance; Label end; __ b(eq, &instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmImportData::kNativeContextOffset)); + data, FieldMemOperand(data, WasmImportData::kNativeContextOffset)); __ jmp(&end); __ bind(&instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); + data, + FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset)); __ bind(&end); } @@ -3274,14 +3277,7 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { // arbitrarily. __ Push(r6, wasm::kGpParamRegisters[3], wasm::kGpParamRegisters[2], wasm::kGpParamRegisters[1]); - // Reserve fixed slots for the CSA wrapper. - // Two slots for stack-switching (central stack pointer and secondary stack - // limit): - Register scratch = r1; - __ mov(scratch, Operand::Zero()); - __ Push(scratch); - __ Push(scratch); - // One slot for the signature: + // Reserve a slot for the signature. __ Push(r0); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); } @@ -3649,8 +3645,8 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs, promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ ldr(kContextRegister, - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); - GetContextFromRef(masm, kContextRegister, tmp); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp, tmp2, tmp3); @@ -3698,8 +3694,8 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm, DEFINE_SCOPED(tmp2); DEFINE_SCOPED(tmp3); __ ldr(kContextRegister, - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); - GetContextFromRef(masm, kContextRegister, tmp); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2, tmp3); RestoreParentSuspender(masm, tmp, tmp2); @@ -3727,8 +3723,10 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { __ AllocateStackSpace(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize); - DEFINE_PINNED(ref, kWasmInstanceRegister); - __ ldr(ref, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + // Load the implicit argument (instance data or import data) from the frame. + DEFINE_PINNED(implicit_arg, kWasmImplicitArgRegister); + __ ldr(implicit_arg, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); DEFINE_PINNED(wrapper_buffer, WasmJSToWasmWrapperDescriptor::WrapperBufferRegister()); @@ -3737,20 +3735,22 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Register original_fp = no_reg; Register new_wrapper_buffer = no_reg; if (stack_switch) { - SwitchToAllocatedStack(masm, regs, ref, wrapper_buffer, original_fp, - new_wrapper_buffer, &suspend); + SwitchToAllocatedStack(masm, regs, implicit_arg, wrapper_buffer, + original_fp, new_wrapper_buffer, &suspend); } else { original_fp = fp; new_wrapper_buffer = wrapper_buffer; } - regs.ResetExcept(original_fp, wrapper_buffer, ref, new_wrapper_buffer); + regs.ResetExcept(original_fp, wrapper_buffer, implicit_arg, + new_wrapper_buffer); { __ str(new_wrapper_buffer, MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset)); if (stack_switch) { - __ str(ref, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ str(implicit_arg, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); DEFINE_SCOPED(scratch) __ ldr( scratch, @@ -3777,12 +3777,13 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { if (stack_switch) { FREE_REG(new_wrapper_buffer) } - FREE_REG(ref) + FREE_REG(implicit_arg) for (auto reg : wasm::kGpParamRegisters) { regs.Reserve(reg); } - // The first GP parameter is the instance, which we handle specially. + // The first GP parameter holds the trusted instance data or the import data. + // This is handled specially. int stack_params_offset = (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + arraysize(wasm::kFpParamRegisters) * kDoubleSize; @@ -3896,14 +3897,15 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // r2: pointer to the byte buffer which contains all parameters. if (stack_switch) { __ ldr(r1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ ldr(r0, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ ldr(r0, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } else { __ ldr(r1, MemOperand( fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); - __ ldr(r0, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + __ ldr(r0, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); } Register scratch = r3; - GetContextFromRef(masm, r0, scratch); + GetContextFromImplicitArg(masm, r0, scratch); __ CallBuiltin(Builtin::kJSToWasmHandleReturns); @@ -4202,6 +4204,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Jump(scratch); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + __ Trap(); +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label negate, done; diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 63ea6e63f9cbf7..64acf944ee2e83 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -570,6 +570,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Resume (Ignition/TurboFan) generator object. { + // TODO(40931165): use parameter count from JSDispatchTable and validate + // that it matches the number of values in the JSGeneratorObject. __ LoadTaggedField( x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset)); __ Ldrh(w0, FieldMemOperand( @@ -1519,12 +1521,20 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing); +#ifndef V8_ENABLE_LEAPTIERING + // TODO(olivf, 42204201): This fastcase is difficult to support with the + // sandbox as it requires getting write access to the dispatch table. See + // `JSFunction::UpdateCode`. We might want to remove it for all + // configurations as it does not seem to be performance sensitive. + // Load the baseline code into the closure. __ Move(x2, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(x2, closure); __ JumpCodeObject(x2, kJSEntrypointTag); +#endif // V8_ENABLE_LEAPTIERING + __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); } @@ -2184,8 +2194,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code); } @@ -2837,9 +2848,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- cp : the function context. // ----------------------------------- +#ifdef V8_ENABLE_LEAPTIERING + __ InvokeFunctionCode(x1, no_reg, x0, InvokeType::kJump); +#else __ Ldrh(x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump); +#endif // V8_ENABLE_LEAPTIERING } namespace { @@ -3194,8 +3209,8 @@ constexpr RegList kSavedGpRegs = ([]() constexpr { for (Register gp_param_reg : wasm::kGpParamRegisters) { saved_gp_regs.set(gp_param_reg); } - // The instance has already been stored in the fixed part of the frame. - saved_gp_regs.clear(kWasmInstanceRegister); + // The instance data has already been stored in the fixed part of the frame. + saved_gp_regs.clear(kWasmImplicitArgRegister); // All set registers were unique. The instance is skipped. CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1); // We push a multiple of 16 bytes. @@ -3219,19 +3234,19 @@ constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { // When entering this builtin, we have just created a Wasm stack frame: // -// [ Wasm instance ] <-- sp -// [ WASM frame marker ] -// [ saved fp ] <-- fp +// [ Wasm instance data ] <-- sp +// [ WASM frame marker ] +// [ saved fp ] <-- fp // // Due to stack alignment restrictions, this builtin adds the feedback vector // plus a filler to the stack. The stack pointer will be // moved an appropriate distance by {PatchPrepareStackFrame}. // -// [ (unused) ] <-- sp -// [ feedback vector ] -// [ Wasm instance ] -// [ WASM frame marker ] -// [ saved fp ] <-- fp +// [ (unused) ] <-- sp +// [ feedback vector ] +// [ Wasm instance data ] +// [ WASM frame marker ] +// [ saved fp ] <-- fp void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register func_index = wasm::kLiftoffFrameSetupFunctionReg; Register vector = x9; @@ -3239,7 +3254,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Label allocate_vector, done; __ LoadTaggedField( - vector, FieldMemOperand(kWasmInstanceRegister, + vector, FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset)); __ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2)); __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); @@ -3259,11 +3274,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ PushQRegList(kSavedFpRegs); __ Push(lr, xzr); // xzr is for alignment. - // Arguments to the runtime function: instance, func_index, and an + // Arguments to the runtime function: instance data, func_index, and an // additional stack slot for the NativeModule. The first pushed register // is for alignment. {x0} and {x1} are picked arbitrarily. __ SmiTag(func_index); - __ Push(x0, kWasmInstanceRegister, func_index, x1); + __ Push(x0, kWasmImplicitArgRegister, func_index, x1); __ Mov(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3); __ Mov(vector, kReturnRegister0); @@ -3272,9 +3287,9 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Pop(xzr, lr); __ PopQRegList(kSavedFpRegs); __ PopXRegList(kSavedGpRegs); - // Restore the instance from the frame. - __ Ldr(kWasmInstanceRegister, - MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); + // Restore the instance data from the frame. + __ Ldr(kWasmImplicitArgRegister, + MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset)); __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM)); __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); __ B(&done); @@ -3292,19 +3307,19 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::INTERNAL); - // Manually save the instance (which kSavedGpRegs skips because its + // Manually save the instance data (which kSavedGpRegs skips because its // other use puts it into the fixed frame anyway). The stack slot is valid // because the {FrameScope} (via {EnterFrame}) always reserves it (for stack // alignment reasons). The instance is needed because once this builtin is // done, we'll call a regular Wasm function. - __ Str(kWasmInstanceRegister, - MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); + __ Str(kWasmImplicitArgRegister, + MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset)); // Save registers that we need to keep alive across the runtime call. __ PushXRegList(kSavedGpRegs); __ PushQRegList(kSavedFpRegs); - __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); + __ Push(kWasmImplicitArgRegister, kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ Mov(cp, Smi::zero()); @@ -3317,9 +3332,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Restore registers. __ PopQRegList(kSavedFpRegs); __ PopXRegList(kSavedGpRegs); - // Restore the instance from the frame. - __ Ldr(kWasmInstanceRegister, - MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); + // Restore the instance data from the frame. + __ Ldr(kWasmImplicitArgRegister, + MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset)); } // The runtime function returned the jump table slot offset as a Smi (now in @@ -3327,7 +3342,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // target, to be compliant with CFI. constexpr Register temp = x8; static_assert(!kSavedGpRegs.has(temp)); - __ ldr(temp, FieldMemOperand(kWasmInstanceRegister, + __ ldr(temp, FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kJumpTableStartOffset)); __ add(x17, temp, Operand(x17)); // Finally, jump to the jump table slot for the function. @@ -3552,7 +3567,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { __ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ Str(xzr, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } // TODO(irezvov): Consolidate with arm RegisterAllocator. @@ -3685,19 +3700,21 @@ class RegisterAllocator { #define FREE_REG(Name) regs.Free(&Name); // Loads the context field of the WasmTrustedInstanceData or WasmImportData -// depending on the ref's type, and places the result in the input register. -void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { - __ LoadTaggedField(scratch, FieldMemOperand(ref, HeapObject::kMapOffset)); +// depending on the data's type, and places the result in the input register. +void GetContextFromImplicitArg(MacroAssembler* masm, Register data, + Register scratch) { + __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset)); __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE); Label instance; Label end; __ B(eq, &instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmImportData::kNativeContextOffset)); + data, FieldMemOperand(data, WasmImportData::kNativeContextOffset)); __ jmp(&end); __ bind(&instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); + data, + FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset)); __ bind(&end); } @@ -3714,10 +3731,8 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { __ Push(wasm::kGpParamRegisters[6], wasm::kGpParamRegisters[5], wasm::kGpParamRegisters[4], wasm::kGpParamRegisters[3]); __ Push(wasm::kGpParamRegisters[2], wasm::kGpParamRegisters[1]); - // Push four more slots that will be used as fixed spill slots in the torque - // wrapper. Two slots for stack-switching (central stack pointer and secondary - // stack limit), one for the signature, and one for stack alignment. - __ Push(xzr, xzr, xzr, xzr); + // Reserve a slot for the signature, and one for stack alignment. + __ Push(xzr, xzr); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); } @@ -4068,8 +4083,8 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs, promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ Ldr(kContextRegister, - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); - GetContextFromRef(masm, kContextRegister, tmp); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp, tmp2, tmp3); @@ -4114,12 +4129,12 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm, promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ Ldr(kContextRegister, - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); DEFINE_SCOPED(tmp); DEFINE_SCOPED(tmp2); DEFINE_SCOPED(tmp3); - GetContextFromRef(masm, kContextRegister, tmp); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2, tmp3); RestoreParentSuspender(masm, tmp, tmp2); @@ -4148,8 +4163,10 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Immediate(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize)); - DEFINE_PINNED(ref, kWasmInstanceRegister); - __ Ldr(ref, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + // Load the implicit argument (instance data or import data) from the frame. + DEFINE_PINNED(implicit_arg, kWasmImplicitArgRegister); + __ Ldr(implicit_arg, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); DEFINE_PINNED(wrapper_buffer, WasmJSToWasmWrapperDescriptor::WrapperBufferRegister()); @@ -4158,20 +4175,22 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Register original_fp = no_reg; Register new_wrapper_buffer = no_reg; if (stack_switch) { - SwitchToAllocatedStack(masm, regs, ref, wrapper_buffer, original_fp, - new_wrapper_buffer, &suspend); + SwitchToAllocatedStack(masm, regs, implicit_arg, wrapper_buffer, + original_fp, new_wrapper_buffer, &suspend); } else { original_fp = fp; new_wrapper_buffer = wrapper_buffer; } - regs.ResetExcept(original_fp, wrapper_buffer, ref, new_wrapper_buffer); + regs.ResetExcept(original_fp, wrapper_buffer, implicit_arg, + new_wrapper_buffer); { __ Str(new_wrapper_buffer, MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset)); if (stack_switch) { - __ Str(ref, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ Str(implicit_arg, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); DEFINE_SCOPED(scratch) __ Ldr( scratch, @@ -4203,12 +4222,13 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { if (stack_switch) { FREE_REG(new_wrapper_buffer) } - FREE_REG(ref) + FREE_REG(implicit_arg) for (auto reg : wasm::kGpParamRegisters) { regs.Reserve(reg); } - // The first GP parameter is the instance, which we handle specially. + // The first GP parameter holds the trusted instance data or the import data. + // This is handled specially. int stack_params_offset = (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + arraysize(wasm::kFpParamRegisters) * kDoubleSize; @@ -4333,14 +4353,15 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // x2: pointer to the byte buffer which contains all parameters. if (stack_switch) { __ Ldr(x1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ Ldr(x0, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ Ldr(x0, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } else { __ Ldr(x1, MemOperand( fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); - __ Ldr(x0, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + __ Ldr(x0, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); } Register scratch = x3; - GetContextFromRef(masm, x0, scratch); + GetContextFromImplicitArg(masm, x0, scratch); __ CallBuiltin(Builtin::kJSToWasmHandleReturns); Label return_promise; @@ -4668,6 +4689,67 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Br(x17); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + using ER = ExternalReference; + Register frame_base = WasmHandleStackOverflowDescriptor::FrameBaseRegister(); + Register gap = WasmHandleStackOverflowDescriptor::GapRegister(); + { + DCHECK_NE(kCArgRegs[1], frame_base); + DCHECK_NE(kCArgRegs[3], frame_base); + __ Mov(kCArgRegs[3], gap); + __ Mov(kCArgRegs[1], sp); + __ Sub(kCArgRegs[2], frame_base, kCArgRegs[1]); + __ Mov(kCArgRegs[4], fp); + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kCArgRegs[3], padreg); + __ Mov(kCArgRegs[0], ER::isolate_address()); + __ CallCFunction(ER::wasm_grow_stack(), 5); + __ Pop(padreg, gap); + DCHECK_NE(kReturnRegister0, gap); + } + Label call_runtime; + // wasm_grow_stack returns zero if it cannot grow a stack. + __ Cbz(kReturnRegister0, &call_runtime); + { + UseScratchRegisterScope temps(masm); + Register new_fp = temps.AcquireX(); + // Calculate old FP - SP offset to adjust FP accordingly to new SP. + __ Mov(new_fp, sp); + __ Sub(new_fp, fp, new_fp); + __ Add(new_fp, kReturnRegister0, new_fp); + __ Mov(fp, new_fp); + } + SwitchSimulatorStackLimit(masm); + __ Mov(sp, kReturnRegister0); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START)); + __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); + } + __ Ret(); + + __ bind(&call_runtime); + // If wasm_grow_stack returns zero interruption or stack overflow + // should be handled by runtime call. + { + __ Ldr(kWasmImplicitArgRegister, + MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset)); + __ LoadTaggedField( + cp, FieldMemOperand(kWasmImplicitArgRegister, + WasmTrustedInstanceData::kNativeContextOffset)); + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ SmiTag(gap); + __ PushArgument(gap); + __ CallRuntime(Runtime::kWasmStackGuard); + __ LeaveFrame(StackFrame::INTERNAL); + __ Ret(); + } +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label done; Register result = x7; @@ -4853,7 +4935,24 @@ void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, __ EnterExitFrame(scratch, FC::getExtraSlotsCountFrom(), StackFrame::API_CALLBACK_EXIT); - MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset); + // This is a workaround for performance regression observed on Apple Silicon + // (https://crbug.com/347741609): reading argc value after the call via + // MemOperand argc_operand = MemOperand(fp, FC::kFCIArgcOffset); + // is noticeably slower than using sp-based access: + MemOperand argc_operand = ExitFrameStackSlotOperand(FCA::kLengthOffset); + if (v8_flags.debug_code) { + // Ensure sp-based calculation of FC::length_'s address matches the + // fp-based one. + Label ok; + // +kSystemPointerSize is for the slot at [sp] which is reserved in all + // ExitFrames for storing the return PC. + __ Add(scratch, sp, + FCA::kLengthOffset + kSystemPointerSize - FC::kFCIArgcOffset); + __ cmp(scratch, fp); + __ B(eq, &ok); + __ DebugBreak(); + __ Bind(&ok); + } { ASM_CODE_COMMENT_STRING(masm, "Initialize v8::FunctionCallbackInfo"); // FunctionCallbackInfo::length_. @@ -5205,7 +5304,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ Mov(x5, unwind_limit); __ CopyDoubleWords(x3, x1, x5); // Since {unwind_limit} is the frame size up to the parameter count, we might - // end up with a unaligned stack pointer. This is later recovered when + // end up with an unaligned stack pointer. This is later recovered when // setting the stack pointer to {caller_frame_top_offset}. __ Bic(unwind_limit, unwind_limit, 1); __ Drop(unwind_limit); @@ -5497,8 +5596,13 @@ void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) { // The arguments are already in the stack (including any necessary padding), // we should not try to massage the arguments again. +#ifdef V8_ENABLE_LEAPTIERING + __ InvokeFunction(x1, x0, InvokeType::kJump, + ArgumentAdaptionMode::kDontAdapt); +#else __ Mov(x2, kDontAdaptArgumentsSentinel); __ InvokeFunction(x1, x2, x0, InvokeType::kJump); +#endif } #undef __ diff --git a/deps/v8/src/builtins/array-from-async.tq b/deps/v8/src/builtins/array-from-async.tq index a6b534d1028a1b..52c8e59fa92446 100644 --- a/deps/v8/src/builtins/array-from-async.tq +++ b/deps/v8/src/builtins/array-from-async.tq @@ -92,7 +92,7 @@ extern enum ArrayFromAsyncIterableResolveContextSlots extends intptr } extern macro AllocateRootFunctionWithContext( - constexpr intptr, FunctionContext): JSFunction; + constexpr intptr, FunctionContext, NativeContext): JSFunction; const kArrayFromAsyncIterableOnFulfilledSharedFun: constexpr intptr generates 'RootIndex::kArrayFromAsyncIterableOnFulfilledSharedFun'; @@ -139,13 +139,15 @@ macro CreateArrayFromAsyncIterableResolveContext( ArrayFromAsyncIterableResolveContextSlots:: kArrayFromAsyncIterableResolveOnFulfilledFunctionSlot, AllocateRootFunctionWithContext( - kArrayFromAsyncIterableOnFulfilledSharedFun, resolveContext)); + kArrayFromAsyncIterableOnFulfilledSharedFun, resolveContext, + nativeContext)); InitContextSlot( resolveContext, ArrayFromAsyncIterableResolveContextSlots:: kArrayFromAsyncIterableResolveOnRejectedFunctionSlot, AllocateRootFunctionWithContext( - kArrayFromAsyncIterableOnRejectedSharedFun, resolveContext)); + kArrayFromAsyncIterableOnRejectedSharedFun, resolveContext, + nativeContext)); InitContextSlot( resolveContext, ArrayFromAsyncIterableResolveContextSlots:: @@ -559,13 +561,15 @@ macro CreateArrayFromAsyncArrayLikeResolveContext( ArrayFromAsyncArrayLikeResolveContextSlots:: kArrayFromAsyncArrayLikeResolveOnFulfilledFunctionSlot, AllocateRootFunctionWithContext( - kArrayFromAsyncArrayLikeOnFulfilledSharedFun, resolveContext)); + kArrayFromAsyncArrayLikeOnFulfilledSharedFun, resolveContext, + nativeContext)); InitContextSlot( resolveContext, ArrayFromAsyncArrayLikeResolveContextSlots:: kArrayFromAsyncArrayLikeResolveOnRejectedFunctionSlot, AllocateRootFunctionWithContext( - kArrayFromAsyncArrayLikeOnRejectedSharedFun, resolveContext)); + kArrayFromAsyncArrayLikeOnRejectedSharedFun, resolveContext, + nativeContext)); InitContextSlot( resolveContext, ArrayFromAsyncArrayLikeResolveContextSlots:: diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index d18bfe09778185..5a50e440490be6 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -26,6 +26,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + ArrayBuiltinsAssembler::ArrayBuiltinsAssembler( compiler::CodeAssemblerState* state) : CodeStubAssembler(state), @@ -2244,5 +2246,7 @@ TF_BUILTIN(CreateObjectFromSlowBoilerplateHelper, } } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index d7f19f2ad994f8..62c01fa3c59580 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -699,7 +699,7 @@ class ArrayConcatVisitor { set_exceeds_array_limit(true); // Exception hasn't been thrown at this point. Return true to // break out, and caller will throw. !visit would imply that - // there is already a exception. + // there is already an exception. return true; } diff --git a/deps/v8/src/builtins/builtins-async-disposable-stack.cc b/deps/v8/src/builtins/builtins-async-disposable-stack.cc index daabc511135504..3154f80b5e0caa 100644 --- a/deps/v8/src/builtins/builtins-async-disposable-stack.cc +++ b/deps/v8/src/builtins/builtins-async-disposable-stack.cc @@ -2,17 +2,67 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/api/api.h" #include "src/base/logging.h" +#include "src/base/macros.h" #include "src/builtins/builtins-utils-inl.h" +#include "src/builtins/builtins.h" +#include "src/execution/isolate.h" #include "src/handles/maybe-handles.h" +#include "src/objects/heap-object.h" #include "src/objects/js-disposable-stack-inl.h" #include "src/objects/js-disposable-stack.h" +#include "src/objects/js-objects.h" +#include "src/objects/js-promise-inl.h" #include "src/objects/js-promise.h" +#include "src/objects/objects.h" #include "src/roots/roots.h" namespace v8 { namespace internal { +BUILTIN(AsyncDisposableStackOnFulfilled) { + HandleScope scope(isolate); + + DirectHandle stack( + Cast(isolate->context()->get(static_cast( + JSDisposableStackBase::AsyncDisposableStackContextSlots::kStack))), + isolate); + Handle promise( + Cast(isolate->context()->get(static_cast( + JSDisposableStackBase::AsyncDisposableStackContextSlots:: + kOuterPromise))), + isolate); + + MAYBE_RETURN(JSAsyncDisposableStack::NextDisposeAsyncIteration(isolate, stack, + promise), + ReadOnlyRoots(isolate).exception()); + return ReadOnlyRoots(isolate).undefined_value(); +} + +BUILTIN(AsyncDisposableStackOnRejected) { + HandleScope scope(isolate); + + Handle stack( + Cast(isolate->context()->get(static_cast( + JSDisposableStackBase::AsyncDisposableStackContextSlots::kStack))), + isolate); + Handle promise( + Cast(isolate->context()->get(static_cast( + JSDisposableStackBase::AsyncDisposableStackContextSlots:: + kOuterPromise))), + isolate); + + Handle rejection_error = args.at(1); + DCHECK(isolate->is_catchable_by_javascript(*rejection_error)); + JSDisposableStackBase::HandleErrorInDisposal(isolate, stack, rejection_error); + + MAYBE_RETURN(JSAsyncDisposableStack::NextDisposeAsyncIteration(isolate, stack, + promise), + ReadOnlyRoots(isolate).exception()); + return ReadOnlyRoots(isolate).undefined_value(); +} + // Part of // https://tc39.es/proposal-explicit-resource-management/#sec-getdisposemethod BUILTIN(AsyncDisposeFromSyncDispose) { @@ -32,6 +82,11 @@ BUILTIN(AsyncDisposeFromSyncDispose) { JSDisposableStackBase::AsyncDisposeFromSyncDisposeContextSlots:: kMethod))), isolate); + + v8::TryCatch try_catch(reinterpret_cast(isolate)); + try_catch.SetVerbose(false); + try_catch.SetCaptureMessage(false); + MaybeHandle result = Execution::Call( isolate, sync_method, ReadOnlyRoots(isolate).undefined_value_handle(), 0, nullptr); @@ -43,13 +98,309 @@ BUILTIN(AsyncDisposeFromSyncDispose) { // undefined »). JSPromise::Resolve(promise, result_handle).ToHandleChecked(); } else { + Tagged exception = isolate->exception(); + if (!isolate->is_catchable_by_javascript(exception)) { + return {}; + } // d. IfAbruptRejectPromise(result, promiseCapability). - UNIMPLEMENTED(); + DCHECK(try_catch.HasCaught()); + JSPromise::Reject(promise, handle(exception, isolate)); } // f. Return promiseCapability.[[Promise]]. return *promise; } +// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack +BUILTIN(AsyncDisposableStackConstructor) { + const char kMethodName[] = "AsyncDisposableStack"; + HandleScope scope(isolate); + + // 1. If NewTarget is undefined, throw a TypeError exception. + if (!IsJSReceiver(*args.new_target(), isolate)) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kConstructorNotFunction, + isolate->factory()->NewStringFromAsciiChecked( + kMethodName))); + } + + // 2. Let asyncDisposableStack be ? OrdinaryCreateFromConstructor(NewTarget, + // "%AsyncDisposableStack.prototype%", « [[AsyncDisposableState]], + // [[DisposeCapability]] »). + DirectHandle map; + Handle target = args.target(); + Handle new_target = Cast(args.new_target()); + + DCHECK_EQ(*target, + target->native_context()->js_async_disposable_stack_function()); + + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); + + DirectHandle async_disposable_stack = + isolate->factory()->NewJSAsyncDisposableStack(map); + // 3. Set asyncDisposableStack.[[AsyncDisposableState]] to pending. + // 4. Set asyncDisposableStack.[[DisposeCapability]] to + // NewDisposeCapability(). + JSDisposableStackBase::InitializeJSDisposableStackBase( + isolate, async_disposable_stack); + // 5. Return asyncDisposableStack. + return *async_disposable_stack; +} + +// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.use +BUILTIN(AsyncDisposableStackPrototypeUse) { + const char kMethodName[] = "AsyncDisposableStack.prototype.use"; + HandleScope scope(isolate); + + // 1. Let asyncDisposableStack be the this value. + // 2. Perform ? RequireInternalSlot(asyncDisposableStack, + // [[AsyncDisposableState]]). + CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName); + Handle value = args.at(1); + + // 3. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, throw a + // ReferenceError exception. + if (async_disposable_stack->state() == DisposableStackState::kDisposed) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewReferenceError( + MessageTemplate::kDisposableStackIsDisposed, + isolate->factory()->NewStringFromAsciiChecked(kMethodName))); + } + + // 4. Perform ? + // AddDisposableResource(asyncDisposableStack.[[DisposeCapability]], + // value, async-dispose). + Handle method; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, method, + JSDisposableStackBase::CheckValueAndGetDisposeMethod( + isolate, value, DisposeMethodHint::kAsyncDispose)); + + JSDisposableStackBase::Add( + isolate, async_disposable_stack, + (IsNullOrUndefined(*value) + ? ReadOnlyRoots(isolate).undefined_value_handle() + : value), + method, DisposeMethodCallType::kValueIsReceiver, + DisposeMethodHint::kAsyncDispose); + + // 5. Return value. + return *value; +} + +// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.disposeAsync +BUILTIN(AsyncDisposableStackPrototypeDisposeAsync) { + HandleScope scope(isolate); + + // 1. Let asyncDisposableStack be the this value. + Handle receiver = args.receiver(); + + // 2. Let promiseCapability be ! NewPromiseCapability(%Promise%). + Handle promise = isolate->factory()->NewJSPromise(); + + // 3. If asyncDisposableStack does not have an [[AsyncDisposableState]] + // internal slot, then + if (!IsJSAsyncDisposableStack(*receiver)) { + // a. Perform ! Call(promiseCapability.[[Reject]], undefined, « a newly + // created TypeError object »). + JSPromise::Reject(promise, + isolate->factory()->NewTypeError( + MessageTemplate::kNotAnAsyncDisposableStack)); + // b. Return promiseCapability.[[Promise]]. + return *promise; + } + + Handle async_disposable_stack = + Cast(receiver); + + // 4. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, then + if (async_disposable_stack->state() == DisposableStackState::kDisposed) { + // a. Perform ! Call(promiseCapability.[[Resolve]], undefined, « + // undefined »). + JSPromise::Resolve( + promise, handle(ReadOnlyRoots(isolate).undefined_value(), isolate)) + .ToHandleChecked(); + // b. Return promiseCapability.[[Promise]]. + return *promise; + } + + // 5. Set asyncDisposableStack.[[AsyncDisposableState]] to disposed. + async_disposable_stack->set_state(DisposableStackState::kDisposed); + + // 6. Let result be + // DisposeResources(asyncDisposableStack.[[DisposeCapability]], + // NormalCompletion(undefined)). + // 7. IfAbruptRejectPromise(result, promiseCapability). + // 8. Perform ! Call(promiseCapability.[[Resolve]], undefined, « result + // »). + // 9. Return promiseCapability.[[Promise]]. + MAYBE_RETURN(JSAsyncDisposableStack::NextDisposeAsyncIteration( + isolate, async_disposable_stack, promise), + ReadOnlyRoots(isolate).exception()); + return *promise; +} + +// https://tc39.es/proposal-explicit-resource-management/#sec-get-asyncdisposablestack.prototype.disposed +BUILTIN(AsyncDisposableStackPrototypeGetDisposed) { + const char kMethodName[] = "get AsyncDisposableStack.prototype.disposed"; + HandleScope scope(isolate); + + // 1. Let AsyncdisposableStack be the this value. + // 2. Perform ? RequireInternalSlot(asyncDisposableStack, + // [[AsyncDisposableState]]). + CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName); + + // 3. If AsyncdisposableStack.[[AsyncDisposableState]] is disposed, return + // true. + // 4. Otherwise, return false. + return *(isolate->factory()->ToBoolean(async_disposable_stack->state() == + DisposableStackState::kDisposed)); +} + +// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.adopt +BUILTIN(AsyncDisposableStackPrototypeAdopt) { + const char kMethodName[] = "AsyncDisposableStack.prototype.adopt"; + HandleScope scope(isolate); + Handle value = args.at(1); + Handle on_dispose_async = args.at(2); + + // 1. Let asyncDisposableStack be the this value. + // 2. Perform ? RequireInternalSlot(asyncDisposableStack, + // [[AsyncDisposableState]]). + CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName); + + // 3. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, throw a + // ReferenceError exception. + if (async_disposable_stack->state() == DisposableStackState::kDisposed) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewReferenceError( + MessageTemplate::kDisposableStackIsDisposed, + isolate->factory()->NewStringFromAsciiChecked(kMethodName))); + } + + // 4. If IsCallable(onDisposeAsync) is false, throw a TypeError exception. + if (!IsCallable(*on_dispose_async)) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kNotCallable, on_dispose_async)); + } + + // 5. Let closure be a new Abstract Closure with no parameters that captures + // value and onDisposeAsync and performs the following steps when called: + // a. Return ? Call(onDisposeAsync, undefined, « value »). + // 6. Let F be CreateBuiltinFunction(closure, 0, "", « »). + // 7. Perform ? + // AddDisposableResource(asyncDisposableStack.[[DisposeCapability]], + // undefined, async-dispose, F). + // Instead of creating an abstract closure and a function, we pass + // DisposeMethodCallType::kArgument so at the time of disposal, the value will + // be passed as the argument to the method. + JSDisposableStackBase::Add(isolate, async_disposable_stack, value, + on_dispose_async, + DisposeMethodCallType::kValueIsArgument, + DisposeMethodHint::kAsyncDispose); + + // 8. Return value. + return *value; +} + +// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.defer +BUILTIN(AsyncDisposableStackPrototypeDefer) { + const char kMethodName[] = "AsyncDisposableStack.prototype.defer"; + HandleScope scope(isolate); + Handle on_dispose_async = args.at(1); + + // 1. Let asyncDisposableStack be the this value. + // 2. Perform ? RequireInternalSlot(asyncDisposableStack, + // [[AsyncDisposableState]]). + CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName); + + // 3. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, throw a + // ReferenceError exception. + if (async_disposable_stack->state() == DisposableStackState::kDisposed) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewReferenceError( + MessageTemplate::kDisposableStackIsDisposed, + isolate->factory()->NewStringFromAsciiChecked(kMethodName))); + } + + // 4. If IsCallable(onDisposeAsync) is false, throw a TypeError exception. + if (!IsCallable(*on_dispose_async)) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kNotCallable, on_dispose_async)); + } + + // 5. Perform ? + // AddDisposableResource(asyncDisposableStack.[[DisposeCapability]], + // undefined, async-dispose, onDisposeAsync). + JSDisposableStackBase::Add(isolate, async_disposable_stack, + ReadOnlyRoots(isolate).undefined_value_handle(), + on_dispose_async, + DisposeMethodCallType::kValueIsReceiver, + DisposeMethodHint::kAsyncDispose); + + // 6. Return undefined. + return ReadOnlyRoots(isolate).undefined_value(); +} + +// https://tc39.es/proposal-explicit-resource-management/#sec-asyncdisposablestack.prototype.move +BUILTIN(AsyncDisposableStackPrototypeMove) { + const char kMethodName[] = "AsyncDisposableStack.prototype.move"; + HandleScope scope(isolate); + + // 1. Let asyncDisposableStack be the this value. + // 2. Perform ? RequireInternalSlot(asyncDisposableStack, + // [[AsyncDisposableState]]). + CHECK_RECEIVER(JSAsyncDisposableStack, async_disposable_stack, kMethodName); + + // 3. If asyncDisposableStack.[[AsyncDisposableState]] is disposed, throw a + // ReferenceError exception. + if (async_disposable_stack->state() == DisposableStackState::kDisposed) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewReferenceError( + MessageTemplate::kDisposableStackIsDisposed, + isolate->factory()->NewStringFromAsciiChecked(kMethodName))); + } + + // 4. Let newAsyncDisposableStack be ? + // OrdinaryCreateFromConstructor(%AsyncDisposableStack%, + // "%AsyncDisposableStack.prototype%", « [[AsyncDisposableState]], + // [[DisposeCapability]] »). + // 5. Set newAsyncDisposableStack.[[AsyncDisposableState]] to pending. + + Tagged constructor_function = + Cast(isolate->native_context()->get( + Context::JS_ASYNC_DISPOSABLE_STACK_FUNCTION_INDEX)); + DirectHandle map(constructor_function->initial_map(), isolate); + + DirectHandle new_async_disposable_stack = + isolate->factory()->NewJSAsyncDisposableStack(map); + + // 6. Set newAsyncDisposableStack.[[DisposeCapability]] to + // asyncDisposableStack.[[DisposeCapability]]. + new_async_disposable_stack->set_stack(async_disposable_stack->stack()); + new_async_disposable_stack->set_length(async_disposable_stack->length()); + new_async_disposable_stack->set_state(DisposableStackState::kPending); + new_async_disposable_stack->set_error( + *(isolate->factory()->uninitialized_value())); + + // 7. Set asyncDisposableStack.[[DisposeCapability]] to + // NewDisposeCapability(). + async_disposable_stack->set_stack(ReadOnlyRoots(isolate).empty_fixed_array()); + async_disposable_stack->set_length(0); + async_disposable_stack->set_error( + *(isolate->factory()->uninitialized_value())); + + // 8. Set disposableStack.[[DisposableState]] to disposed. + async_disposable_stack->set_state(DisposableStackState::kDisposed); + + // 9. Return newDisposableStack. + return *new_async_disposable_stack; +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc index cd1f4f29c4f146..663c0b930944aa 100644 --- a/deps/v8/src/builtins/builtins-async-function-gen.cc +++ b/deps/v8/src/builtins/builtins-async-function-gen.cc @@ -13,6 +13,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler { public: explicit AsyncFunctionBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -200,14 +202,11 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait() { auto value = Parameter(Descriptor::kValue); auto context = Parameter(Descriptor::kContext); - TNode on_resolve_sfi = - AsyncFunctionAwaitResolveSharedFunConstant(); - TNode on_reject_sfi = - AsyncFunctionAwaitRejectSharedFunConstant(); TNode outer_promise = LoadObjectField( async_function_object, JSAsyncFunctionObject::kPromiseOffset); - Await(context, async_function_object, value, outer_promise, on_resolve_sfi, - on_reject_sfi); + Await(context, async_function_object, value, outer_promise, + RootIndex::kAsyncFunctionAwaitResolveClosureSharedFun, + RootIndex::kAsyncFunctionAwaitRejectClosureSharedFun); // Return outer promise to avoid adding an load of the outer promise before // suspending in BytecodeGenerator. @@ -219,5 +218,7 @@ TF_BUILTIN(AsyncFunctionAwait, AsyncFunctionBuiltinsAssembler) { AsyncFunctionAwait(); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc index 9b4daad85629ae..9a4237b1740f75 100644 --- a/deps/v8/src/builtins/builtins-async-gen.cc +++ b/deps/v8/src/builtins/builtins-async-gen.cc @@ -13,6 +13,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + namespace { // Describe fields of Context associated with the AsyncIterator unwrap closure. class ValueUnwrapContext { @@ -22,11 +24,27 @@ class ValueUnwrapContext { } // namespace +TNode AsyncBuiltinsAssembler::Await(TNode context, + TNode generator, + TNode value, + TNode outer_promise, + RootIndex on_resolve_sfi, + RootIndex on_reject_sfi) { + return Await( + context, generator, value, outer_promise, + [&](TNode context, TNode native_context) { + auto on_resolve = AllocateRootFunctionWithContext( + on_resolve_sfi, context, native_context); + auto on_reject = AllocateRootFunctionWithContext(on_reject_sfi, context, + native_context); + return std::make_pair(on_resolve, on_reject); + }); +} + TNode AsyncBuiltinsAssembler::Await( TNode context, TNode generator, TNode value, TNode outer_promise, - TNode on_resolve_sfi, - TNode on_reject_sfi) { + const CreateClosures& CreateClosures) { const TNode native_context = LoadNativeContext(context); // We do the `PromiseResolve(%Promise%,value)` avoiding to unnecessarily @@ -103,17 +121,9 @@ TNode AsyncBuiltinsAssembler::Await( generator); } - // Allocate and initialize resolve handler - TNode on_resolve = - AllocateInNewSpace(JSFunction::kSizeWithoutPrototype); - InitializeNativeClosure(closure_context, native_context, on_resolve, - on_resolve_sfi); - - // Allocate and initialize reject handler - TNode on_reject = - AllocateInNewSpace(JSFunction::kSizeWithoutPrototype); - InitializeNativeClosure(closure_context, native_context, on_reject, - on_reject_sfi); + // Allocate and initialize resolve and reject handlers + auto [on_resolve, on_reject] = + CreateClosures(closure_context, native_context); // Deal with PromiseHooks and debug support in the runtime. This // also allocates the throwaway promise, which is only needed in @@ -148,52 +158,13 @@ TNode AsyncBuiltinsAssembler::Await( on_resolve, on_reject, var_throwaway.value()); } -void AsyncBuiltinsAssembler::InitializeNativeClosure( - TNode context, TNode native_context, - TNode function, TNode shared_info) { - TNode function_map = CAST(LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); - // Ensure that we don't have to initialize prototype_or_initial_map field of - // JSFunction. - CSA_DCHECK(this, - IntPtrEqual(LoadMapInstanceSizeInWords(function_map), - IntPtrConstant(JSFunction::kSizeWithoutPrototype / - kTaggedSize))); - static_assert(JSFunction::kSizeWithoutPrototype == - (7 + V8_ENABLE_LEAPTIERING_BOOL) * kTaggedSize); - StoreMapNoWriteBarrier(function, function_map); - StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset, - RootIndex::kEmptyFixedArray); - StoreObjectFieldRoot(function, JSObject::kElementsOffset, - RootIndex::kEmptyFixedArray); - StoreObjectFieldRoot(function, JSFunction::kFeedbackCellOffset, - RootIndex::kManyClosuresCell); -#ifdef V8_ENABLE_LEAPTIERING - // TODO(saelo): obtain an appropriate dispatch handle here. - StoreObjectFieldNoWriteBarrier(function, JSFunction::kDispatchHandleOffset, - Int32Constant(kNullJSDispatchHandle)); -#endif // V8_ENABLE_LEAPTIERING - - StoreObjectFieldNoWriteBarrier( - function, JSFunction::kSharedFunctionInfoOffset, shared_info); - StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context); - - // For the native closures that are initialized here (for `await`) - // we know that their SharedFunctionInfo::function_data(kAcquireLoad) slot - // contains a builtin index (as Smi), so there's no need to use - // CodeStubAssembler::GetSharedFunctionInfoCode() helper here, - // which almost doubles the size of `await` builtins (unnecessarily). - TNode builtin_id = LoadSharedFunctionInfoBuiltinId(shared_info); - TNode code = LoadBuiltin(builtin_id); - StoreCodePointerFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code); -} - TNode AsyncBuiltinsAssembler::CreateUnwrapClosure( TNode native_context, TNode done) { const TNode closure_context = AllocateAsyncIteratorValueUnwrapContext(native_context, done); return AllocateRootFunctionWithContext( - RootIndex::kAsyncIteratorValueUnwrapSharedFun, closure_context); + RootIndex::kAsyncIteratorValueUnwrapSharedFun, closure_context, + native_context); } TNode AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext( @@ -221,5 +192,7 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) { Return(unwrapped_value); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h index c62c4128d154c1..61968bd559e2ae 100644 --- a/deps/v8/src/builtins/builtins-async-gen.h +++ b/deps/v8/src/builtins/builtins-async-gen.h @@ -21,11 +21,17 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler { // `on_reject` is the SharedFunctioninfo instance used to create the reject // closure. `on_resolve` is the SharedFunctioninfo instance used to create the // resolve closure. Returns the Promise-wrapped `value`. + using CreateClosures = + std::function, TNode>( + TNode, TNode)>; TNode Await(TNode context, TNode generator, TNode value, TNode outer_promise, - TNode on_resolve_sfi, - TNode on_reject_sfi); + const CreateClosures& CreateClosures); + TNode Await(TNode context, + TNode generator, TNode value, + TNode outer_promise, RootIndex on_resolve_sfi, + RootIndex on_reject_sfi); // Return a new built-in function object as defined in // Async Iterator Value Unwrap Functions @@ -33,10 +39,6 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler { TNode done); private: - void InitializeNativeClosure(TNode context, - TNode native_context, - TNode function, - TNode shared_info); TNode AllocateAsyncIteratorValueUnwrapContext( TNode native_context, TNode done); }; diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc index e465a14898a9e1..002f7453382be0 100644 --- a/deps/v8/src/builtins/builtins-async-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc @@ -13,6 +13,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + namespace { class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler { @@ -258,8 +260,8 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait() { request, AsyncGeneratorRequest::kPromiseOffset); Await(context, async_generator_object, value, outer_promise, - AsyncGeneratorAwaitResolveSharedFunConstant(), - AsyncGeneratorAwaitRejectSharedFunConstant()); + RootIndex::kAsyncGeneratorAwaitResolveClosureSharedFun, + RootIndex::kAsyncGeneratorAwaitRejectClosureSharedFun); SetGeneratorAwaiting(async_generator_object); Return(UndefinedConstant()); } @@ -591,8 +593,8 @@ TF_BUILTIN(AsyncGeneratorYieldWithAwait, AsyncGeneratorBuiltinsAssembler) { LoadPromiseFromAsyncGeneratorRequest(request); Await(context, generator, value, outer_promise, - AsyncGeneratorYieldWithAwaitResolveSharedFunConstant(), - AsyncGeneratorAwaitRejectSharedFunConstant()); + RootIndex::kAsyncGeneratorYieldWithAwaitResolveClosureSharedFun, + RootIndex::kAsyncGeneratorAwaitRejectClosureSharedFun); SetGeneratorAwaiting(generator); Return(UndefinedConstant()); } @@ -637,21 +639,35 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) { const TNode req = CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator)); - Label perform_await(this); - TVARIABLE(SharedFunctionInfo, var_on_resolve, - AsyncGeneratorReturnClosedResolveSharedFunConstant()); - - TVARIABLE(SharedFunctionInfo, var_on_reject, - AsyncGeneratorReturnClosedRejectSharedFunConstant()); - const TNode state = LoadGeneratorState(generator); - GotoIf(IsGeneratorStateClosed(state), &perform_await); - var_on_resolve = AsyncGeneratorReturnResolveSharedFunConstant(); - var_on_reject = AsyncGeneratorAwaitRejectSharedFunConstant(); + auto MakeClosures = [&](TNode context, + TNode native_context) { + TVARIABLE(JSFunction, var_on_resolve); + TVARIABLE(JSFunction, var_on_reject); + Label closed(this), not_closed(this), done(this); + Branch(IsGeneratorStateClosed(state), &closed, ¬_closed); + + BIND(&closed); + var_on_resolve = AllocateRootFunctionWithContext( + RootIndex::kAsyncGeneratorReturnClosedResolveClosureSharedFun, context, + native_context); + var_on_reject = AllocateRootFunctionWithContext( + RootIndex::kAsyncGeneratorReturnClosedRejectClosureSharedFun, context, + native_context); + Goto(&done); - Goto(&perform_await); + BIND(¬_closed); + var_on_resolve = AllocateRootFunctionWithContext( + RootIndex::kAsyncGeneratorReturnResolveClosureSharedFun, context, + native_context); + var_on_reject = AllocateRootFunctionWithContext( + RootIndex::kAsyncGeneratorAwaitRejectClosureSharedFun, context, + native_context); + Goto(&done); - BIND(&perform_await); + BIND(&done); + return std::make_pair(var_on_resolve.value(), var_on_reject.value()); + }; SetGeneratorAwaiting(generator); auto context = Parameter(Descriptor::kContext); @@ -664,9 +680,7 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) { { compiler::ScopedExceptionHandler handler(this, &await_exception, &var_exception); - - Await(context, generator, value, outer_promise, var_on_resolve.value(), - var_on_reject.value()); + Await(context, generator, value, outer_promise, MakeClosures); } Goto(&done); @@ -730,5 +744,7 @@ TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure, AsyncGeneratorReturnClosedReject(context, generator, value); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc index cd5a3460f5f8fd..56b13850f6b1e2 100644 --- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc @@ -13,6 +13,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + namespace { class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { public: @@ -302,7 +304,7 @@ TNode AsyncFromSyncBuiltinsAssembler:: sync_iterator); return AllocateRootFunctionWithContext( RootIndex::kAsyncFromSyncIteratorCloseSyncAndRethrowSharedFun, - closure_context); + closure_context, native_context); } TNode AsyncFromSyncBuiltinsAssembler:: @@ -448,5 +450,7 @@ TF_BUILTIN(AsyncFromSyncIteratorCloseSyncAndRethrow, Return(CallRuntime(Runtime::kReThrow, context, error)); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc index cdd7ed07d1c6a1..4d166e883c1041 100644 --- a/deps/v8/src/builtins/builtins-bigint-gen.cc +++ b/deps/v8/src/builtins/builtins-bigint-gen.cc @@ -12,6 +12,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + // https://tc39.github.io/proposal-bigint/#sec-to-big-int64 TF_BUILTIN(BigIntToI64, CodeStubAssembler) { if (!Is64()) { @@ -73,5 +75,7 @@ TF_BUILTIN(I32PairToBigInt, CodeStubAssembler) { Return(BigIntFromInt32Pair(low, high)); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index f85434d7309ea1..1ad24b510036c1 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -20,6 +20,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined( MacroAssembler* masm) { Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined); @@ -907,5 +909,7 @@ TF_BUILTIN(HandleApiCallOrConstruct, CallOrConstructBuiltinsAssembler) { } } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index 8bf9f8031daa33..37ebb276583964 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -19,6 +19,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + template using TVariable = compiler::TypedCodeAssemblerVariable; @@ -3047,5 +3049,7 @@ TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) { Return(FalseConstant()); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index 8fef432d6f11f0..e71cfd46e0f23c 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -21,6 +21,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) { Generate_CallOrConstructVarargs(masm, Builtin::kConstruct); } @@ -276,8 +278,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { BIND(&done); } - static_assert(JSFunction::kSizeWithoutPrototype == - (7 + V8_ENABLE_LEAPTIERING_BOOL) * kTaggedSize); + static_assert(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize); StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackCellOffset, feedback_cell); StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset, @@ -290,10 +291,11 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { Int32Constant(kNullJSDispatchHandle))); StoreObjectFieldNoWriteBarrier(result, JSFunction::kDispatchHandleOffset, dispatch_handle); -#endif // V8_ENABLE_LEAPTIERING +#else TNode lazy_builtin = HeapConstantNoHole(BUILTIN_CODE(isolate(), CompileLazy)); StoreCodePointerField(result, JSFunction::kCodeOffset, lazy_builtin); +#endif // V8_ENABLE_LEAPTIERING Return(result); } @@ -761,5 +763,7 @@ void ConstructorBuiltinsAssembler::CopyMutableHeapNumbersInObject( kTaggedSize, LoopUnrollingMode::kNo, IndexAdvanceMode::kPost); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc index 1c5f873defbe31..0ddb6d7c347a26 100644 --- a/deps/v8/src/builtins/builtins-conversion-gen.cc +++ b/deps/v8/src/builtins/builtins-conversion-gen.cc @@ -12,6 +12,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + // ES6 section 7.1.3 ToNumber ( argument ) TF_BUILTIN(ToNumber, CodeStubAssembler) { auto context = Parameter(Descriptor::kContext); @@ -127,5 +129,7 @@ TF_BUILTIN(Typeof_Baseline, CodeStubAssembler) { Return(Typeof(object, slot, feedback_vector)); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc index e72e4948b39026..44ad0ee953cb98 100644 --- a/deps/v8/src/builtins/builtins-date-gen.cc +++ b/deps/v8/src/builtins/builtins-date-gen.cc @@ -10,6 +10,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + // ----------------------------------------------------------------------------- // ES6 section 20.3 Date Objects @@ -256,5 +258,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) { } } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 1eb05f060c0eca..c43f367a55329d 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -618,7 +618,16 @@ namespace internal { CPP(DisposableStackPrototypeMove) \ \ /* Async DisposabeStack*/ \ + CPP(AsyncDisposableStackOnFulfilled) \ + CPP(AsyncDisposableStackOnRejected) \ CPP(AsyncDisposeFromSyncDispose) \ + CPP(AsyncDisposableStackConstructor) \ + CPP(AsyncDisposableStackPrototypeUse) \ + CPP(AsyncDisposableStackPrototypeDisposeAsync) \ + CPP(AsyncDisposableStackPrototypeGetDisposed) \ + CPP(AsyncDisposableStackPrototypeAdopt) \ + CPP(AsyncDisposableStackPrototypeDefer) \ + CPP(AsyncDisposableStackPrototypeMove) \ \ /* Error */ \ CPP(ErrorConstructor) \ @@ -836,7 +845,7 @@ namespace internal { TFC(Decrement_Baseline, UnaryOp_Baseline) \ TFC(Increment_Baseline, UnaryOp_Baseline) \ TFC(Negate_Baseline, UnaryOp_Baseline) \ - TFC(BitwiseNot_WithFeedback, UnaryOp_WithFeedback) \ + IF_TSA(TSC, TFC)(BitwiseNot_WithFeedback, UnaryOp_WithFeedback) \ TFC(Decrement_WithFeedback, UnaryOp_WithFeedback) \ TFC(Increment_WithFeedback, UnaryOp_WithFeedback) \ TFC(Negate_WithFeedback, UnaryOp_WithFeedback) \ @@ -1155,6 +1164,7 @@ namespace internal { IF_WASM(ASM, WasmLiftoffFrameSetup, WasmDummy) \ IF_WASM(ASM, WasmDebugBreak, WasmDummy) \ IF_WASM(ASM, WasmOnStackReplace, WasmDummy) \ + IF_WASM(ASM, WasmHandleStackOverflow, WasmHandleStackOverflow) \ IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \ IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToTagged) \ IF_WASM(TFC, WasmFloat64ToString, WasmFloat64ToTagged) \ @@ -1876,7 +1886,8 @@ namespace internal { /* Temporal #sec-temporal.calendar.prototype.inleapyear */ \ CPP(TemporalCalendarPrototypeInLeapYear) \ /* Temporal #sec-temporal.calendar.prototype.fields */ \ - TFJ(TemporalCalendarPrototypeFields, kJSArgcReceiverSlots, kIterable) \ + TFJ(TemporalCalendarPrototypeFields, kJSArgcReceiverSlots + 1, kReceiver, \ + kIterable) \ /* Temporal #sec-temporal.calendar.prototype.mergefields */ \ CPP(TemporalCalendarPrototypeMergeFields) \ /* Temporal #sec-temporal.calendar.prototype.tostring */ \ @@ -1887,8 +1898,10 @@ namespace internal { CPP(DatePrototypeToTemporalInstant) \ \ /* "Private" (created but not exposed) Bulitins needed by Temporal */ \ - TFJ(StringFixedArrayFromIterable, kJSArgcReceiverSlots, kIterable) \ - TFJ(TemporalInstantFixedArrayFromIterable, kJSArgcReceiverSlots, kIterable) + TFJ(StringFixedArrayFromIterable, kJSArgcReceiverSlots + 1, kReceiver, \ + kIterable) \ + TFJ(TemporalInstantFixedArrayFromIterable, kJSArgcReceiverSlots + 1, \ + kReceiver, kIterable) #define BUILTIN_LIST_BASE(CPP, TSJ, TFJ, TSC, TFC, TFS, TFH, ASM) \ BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM) \ diff --git a/deps/v8/src/builtins/builtins-disposable-stack.cc b/deps/v8/src/builtins/builtins-disposable-stack.cc index a11129dcc84689..4f445c5cab9c34 100644 --- a/deps/v8/src/builtins/builtins-disposable-stack.cc +++ b/deps/v8/src/builtins/builtins-disposable-stack.cc @@ -253,10 +253,12 @@ BUILTIN(DisposableStackPrototypeMove) { new_disposable_stack->set_stack(disposable_stack->stack()); new_disposable_stack->set_length(disposable_stack->length()); new_disposable_stack->set_state(DisposableStackState::kPending); + new_disposable_stack->set_error(*(isolate->factory()->uninitialized_value())); // 7. Set disposableStack.[[DisposeCapability]] to NewDisposeCapability(). disposable_stack->set_stack(ReadOnlyRoots(isolate).empty_fixed_array()); disposable_stack->set_length(0); + disposable_stack->set_error(*(isolate->factory()->uninitialized_value())); // 8. Set disposableStack.[[DisposableState]] to disposed. disposable_stack->set_state(DisposableStackState::kDisposed); diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc index d76152181a3685..7081007ba76f94 100644 --- a/deps/v8/src/builtins/builtins-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-generator-gen.cc @@ -12,6 +12,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class GeneratorBuiltinsAssembler : public CodeStubAssembler { public: explicit GeneratorBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -311,5 +313,7 @@ TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) { Return(LoadJSGeneratorObjectInputOrDebugPos(generator)); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-global-gen.cc b/deps/v8/src/builtins/builtins-global-gen.cc index fc29247df65754..a58f796714dc97 100644 --- a/deps/v8/src/builtins/builtins-global-gen.cc +++ b/deps/v8/src/builtins/builtins-global-gen.cc @@ -9,6 +9,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + // ES #sec-isfinite-number TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) { auto context = Parameter(Descriptor::kContext); @@ -106,5 +108,7 @@ TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) { Return(FalseConstant()); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index 382844f865c0ee..59f67f21019d14 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -13,6 +13,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class HandlerBuiltinsAssembler : public CodeStubAssembler { public: explicit HandlerBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -474,5 +476,7 @@ TF_BUILTIN(HasIndexedInterceptorIC, CodeStubAssembler) { vector); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-inl.h b/deps/v8/src/builtins/builtins-inl.h index 8a1447bcaa3f3f..f47050b6808a9b 100644 --- a/deps/v8/src/builtins/builtins-inl.h +++ b/deps/v8/src/builtins/builtins-inl.h @@ -217,6 +217,29 @@ constexpr bool Builtins::IsJSEntryVariant(Builtin builtin) { UNREACHABLE(); } +#ifdef V8_ENABLE_WEBASSEMBLY + +// static +template +constexpr size_t Builtins::WasmBuiltinHandleArrayIndex() { + constexpr size_t index = + std::find(std::begin(Builtins::kWasmIndirectlyCallableBuiltins), + std::end(Builtins::kWasmIndirectlyCallableBuiltins), builtin) - + std::begin(Builtins::kWasmIndirectlyCallableBuiltins); + static_assert(Builtins::kWasmIndirectlyCallableBuiltins[index] == builtin); + return index; +} + +// static +template +wasm::WasmCodePointerTable::Handle Builtins::WasmBuiltinHandleOf( + Isolate* isolate) { + return isolate + ->wasm_builtin_code_handles()[WasmBuiltinHandleArrayIndex()]; +} + +#endif // V8_ENABLE_WEBASSEMBLY + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index de8df3b82c12c2..585195eb16473a 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -25,6 +25,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + // ----------------------------------------------------------------------------- // TurboFan support builtins. @@ -327,7 +329,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { shared_barrier_slow(this), generational_barrier_slow(this); // During incremental marking we always reach this slow path, so we need to - // check whether this is a old-to-new or old-to-shared reference. + // check whether this is an old-to-new or old-to-shared reference. TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); @@ -1686,5 +1688,7 @@ TF_BUILTIN(GetOwnPropertyDescriptor, CodeStubAssembler) { key); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc index 1691b35405f256..ea1e9a3c350d3a 100644 --- a/deps/v8/src/builtins/builtins-intl-gen.cc +++ b/deps/v8/src/builtins/builtins-intl-gen.cc @@ -17,6 +17,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class IntlBuiltinsAssembler : public CodeStubAssembler { public: explicit IntlBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -289,5 +291,7 @@ TF_BUILTIN(ListFormatPrototypeFormatToParts, IntlBuiltinsAssembler) { Runtime::kFormatListToParts, "Intl.ListFormat.prototype.formatToParts"); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index 59988362f9d6c2..f1ae419481a75c 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -18,6 +18,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + using IteratorRecord = TorqueStructIteratorRecord; TNode IteratorBuiltinsAssembler::GetIteratorMethod( @@ -535,5 +537,7 @@ TF_BUILTIN(IterableToFixedArrayWithSymbolLookupSlow, iterator_fn); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc index c439b09d139782..cacf4b93e9be49 100644 --- a/deps/v8/src/builtins/builtins-lazy-gen.cc +++ b/deps/v8/src/builtins/builtins-lazy-gen.cc @@ -8,12 +8,14 @@ #include "src/builtins/builtins.h" #include "src/common/globals.h" #include "src/objects/code-inl.h" -#include "src/objects/feedback-vector.h" +#include "src/objects/feedback-vector-inl.h" #include "src/objects/shared-function-info.h" namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + void LazyBuiltinsAssembler::GenerateTailCallToJSCode( TNode code, TNode function) { auto argc = UncheckedParameter(Descriptor::kActualArgumentsCount); @@ -38,11 +40,10 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot( LoadObjectField(feedback_vector, FeedbackVector::kFlagsOffset); // Fall through if no optimization trigger or optimized code. - GotoIfNot( - IsSetWord32(flags, FeedbackVector::kFlagsHasAnyOptimizedCode | - FeedbackVector::kFlagsTieringStateIsAnyRequested | - FeedbackVector::kFlagsLogNextExecution), - &fallthrough); + constexpr uint32_t kFlagMask = + FeedbackVector::FlagMaskForNeedsProcessingCheckFrom( + CodeKind::INTERPRETED_FUNCTION); + GotoIfNot(IsSetWord32(flags, kFlagMask), &fallthrough); GotoIfNot( IsSetWord32(flags, FeedbackVector::kFlagsTieringStateIsAnyRequested), @@ -51,12 +52,21 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot( BIND(&maybe_needs_logging); { +#ifdef V8_ENABLE_LEAPTIERING + // In the leaptiering case, we don't tier up to optimized code through the + // feedback vector (but instead through the dispatch table), so we can only + // get here if kFlagsLogNextExecution is set. + CSA_DCHECK(this, + IsSetWord32(flags, FeedbackVector::kFlagsLogNextExecution)); +#else GotoIfNot(IsSetWord32(flags, FeedbackVector::kFlagsLogNextExecution), &may_have_optimized_code); +#endif GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution, function); } +#ifndef V8_ENABLE_LEAPTIERING BIND(&may_have_optimized_code); { Label heal_optimized_code_slot(this); @@ -86,6 +96,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot( BIND(&heal_optimized_code_slot); GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot, function); } +#endif // V8_ENABLE_LEAPTIERING // Fall-through if the optimized code cell is clear and the tiering state is // kNone. @@ -112,7 +123,12 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstantNoHole(BUILTIN_CODE( isolate(), CompileLazy)))); + USE(sfi_code); +#ifndef V8_ENABLE_LEAPTIERING + // In the leaptiering case, the code is installed below, through the + // InstallSFICode runtime function. StoreCodePointerField(function, JSFunction::kCodeOffset, sfi_code); +#endif // V8_ENABLE_LEAPTIERING Label maybe_use_sfi_code(this); // If there is no feedback, don't check for optimized code. @@ -131,6 +147,13 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { // A usual case would be the InterpreterEntryTrampoline to start executing // existing bytecode. BIND(&maybe_use_sfi_code); +#ifdef V8_ENABLE_LEAPTIERING + // In the leaptiering case, we now simply install the code of the SFI on the + // function's dispatch table entry and call it. Installing the code is + // necessary as the dispatch table entry may still contain the CompileLazy + // builtin at this point (we can only update dispatch table code from C++). + GenerateTailCallToReturnedCode(Runtime::kInstallSFICode, function); +#else Label tailcall_code(this), baseline(this); TVARIABLE(Code, code); @@ -153,6 +176,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { BIND(&tailcall_code); GenerateTailCallToJSCode(code.value(), function); +#endif // V8_ENABLE_LEAPTIERING BIND(&compile_function); GenerateTailCallToReturnedCode(Runtime::kCompileLazy, function); @@ -168,10 +192,14 @@ TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) { auto function = Parameter(Descriptor::kTarget); TNode code = HeapConstantNoHole(BUILTIN_CODE(isolate(), CompileLazy)); +#ifndef V8_ENABLE_LEAPTIERING // Set the code slot inside the JSFunction to CompileLazy. StoreCodePointerField(function, JSFunction::kCodeOffset, code); +#endif // V8_ENABLE_LEAPTIERING GenerateTailCallToJSCode(code, function); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc index 5fb3d3badeb59e..8d419906cbbc1e 100644 --- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc +++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc @@ -14,6 +14,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + using compiler::ScopedExceptionHandler; class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler { @@ -623,5 +625,7 @@ TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) { } } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc index 12ea3c2dd63e47..28e29cd61871da 100644 --- a/deps/v8/src/builtins/builtins-number-gen.cc +++ b/deps/v8/src/builtins/builtins-number-gen.cc @@ -11,6 +11,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + // ----------------------------------------------------------------------------- // ES6 section 20.1 Number Objects @@ -118,7 +120,9 @@ DEF_BINOP_RHS_SMI(ShiftRightLogicalSmi_Baseline, \ Return(result); \ } +#ifndef V8_ENABLE_EXPERIMENTAL_TSA_BUILTINS DEF_UNOP(BitwiseNot_WithFeedback, Generate_BitwiseNotWithFeedback) +#endif DEF_UNOP(Decrement_WithFeedback, Generate_DecrementWithFeedback) DEF_UNOP(Increment_WithFeedback, Generate_IncrementWithFeedback) DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback) @@ -293,5 +297,7 @@ TF_BUILTIN(StrictEqual_Baseline, CodeStubAssembler) { Return(result); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-number-tsa.cc b/deps/v8/src/builtins/builtins-number-tsa.cc new file mode 100644 index 00000000000000..0a390f14374195 --- /dev/null +++ b/deps/v8/src/builtins/builtins-number-tsa.cc @@ -0,0 +1,47 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/builtins/builtins-utils-gen.h" +#include "src/builtins/number-builtins-reducer-inl.h" +#include "src/codegen/turboshaft-builtins-assembler-inl.h" + +namespace v8::internal { + +#include "src/compiler/turboshaft/define-assembler-macros.inc" + +using namespace compiler::turboshaft; // NOLINT(build/namespaces) + +class NumberBuiltinsAssemblerTS + : public TurboshaftBuiltinsAssembler { + public: + using Base = TurboshaftBuiltinsAssembler; + + using Base::Asm; + using Base::Base; +}; + +#ifdef V8_ENABLE_EXPERIMENTAL_TSA_BUILTINS + +TS_BUILTIN(BitwiseNot_WithFeedback, NumberBuiltinsAssemblerTS) { + // TODO(nicohartmann): It would be great to deduce the parameter type from the + // Descriptor directly. + V value = Parameter(Descriptor::kValue); + V context = Parameter(Descriptor::kContext); + V feedback_vector = + Parameter(Descriptor::kFeedbackVector); + V slot = Parameter(Descriptor::kSlot); + + SetFeedbackSlot(slot); + SetFeedbackVector(feedback_vector); + + V result = BitwiseNot(context, value); + Return(result); +} + +#endif // V8_ENABLE_EXPERIMENTAL_TSA_BUILTINS + +#include "src/compiler/turboshaft/undef-assembler-macros.inc" + +} // namespace v8::internal diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index a7f202b0fccf0f..9eab7c3cb267a3 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -24,6 +24,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { public: explicit ObjectEntriesValuesBuiltinsAssembler( @@ -430,8 +432,37 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) { TNode from = ToObject_Inline(context, source); TNode from_map = LoadMap(from); + // For the fast case we want the source to be a JSObject. + GotoIfNot(IsJSObjectMap(from_map), &slow_path); + TNode to_map = LoadMap(to); + // Chances that the fast cloning is possible is very low in case source + // and target maps belong to different native contexts (the only case + // it'd work is if the |from| object doesn't have enumerable properties) + // or if one of them is a remote JS object. + // TODO(olivf): Re-Evaluate this once we have a representation for "no + // enumerable properties" state in an Object.assign sidestep transition. + { + TNode to_meta_map = LoadMap(to_map); + GotoIfNot(TaggedEqual(LoadMap(from_map), to_meta_map), &slow_path); + + // For the fast case we want the target to be a fresh empty object + // literal from current context. + // TODO(olivf): consider extending the fast path to a case when source + // and target objects are from the same context but not necessarily from + // current one. + TNode native_context = LoadNativeContext(context); + TNode empty_object_literal_map = + LoadObjectFunctionInitialMap(native_context); + GotoIfNot(TaggedEqual(to_map, empty_object_literal_map), &slow_path); + // Double-check that the meta map is not contextless. + CSA_DCHECK(this, + TaggedEqual(native_context, + LoadMapConstructorOrBackPointerOrNativeContext( + to_meta_map))); + } + // Chances are very slim that cloning is possible if we have different // instance sizes. // TODO(olivf): Re-Evaluate this once we have a faster target map lookup @@ -461,14 +492,6 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) { Word32And(target_field3, field3_descriptors_and_extensible_mask)), &slow_path); - // For the fastcase we want the source to be a JSObject and the target a - // fresh empty object literal. - TNode native_context = LoadNativeContext(context); - TNode empty_object_literal_map = - LoadObjectFunctionInitialMap(native_context); - GotoIfNot(TaggedEqual(to_map, empty_object_literal_map), &slow_path); - GotoIfNot(IsJSObjectMap(from_map), &slow_path); - // Check that the source is in fastmode, not a prototype and not deprecated. TNode source_field3 = LoadMapBitField3(from_map); TNode field3_exclusion_mask_const = @@ -488,12 +511,19 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) { GotoIfNot(TaggedEqual(LoadElements(CAST(to)), EmptyFixedArrayConstant()), &slow_path); + // Ensure the properties field is not used to store a hash. + TNode properties = LoadJSReceiverPropertiesOrHash(to); + GotoIf(TaggedIsSmi(properties), &slow_path); + CSA_DCHECK(this, + Word32Or(TaggedEqual(properties, EmptyFixedArrayConstant()), + IsPropertyArray(CAST(properties)))); + Label continue_fast_path(this), runtime_map_lookup(this, Label::kDeferred); // Check if our particular source->target combination is fast clonable. // E.g., this ensures that we only have fast properties and in general that // the binary layout is compatible for `FastCloneJSObject`. - // If suche a clone map exists then it can be found in the transition array + // If such a clone map exists then it can be found in the transition array // with object_assign_clone_transition_symbol as a key. If this transition // slot is cleared, then the map is not clonable. If the key is missing // from the transitions we rely on the runtime function @@ -1712,5 +1742,8 @@ TNode ObjectBuiltinsAssembler::GetAccessorOrUndefined( BIND(&return_result); return result.value(); } + +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc index 6852bb95102381..207c2d6521347c 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.cc +++ b/deps/v8/src/builtins/builtins-proxy-gen.cc @@ -17,6 +17,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + TNode ProxiesCodeStubAssembler::AllocateProxy( TNode context, TNode target, TNode handler) { @@ -75,11 +77,10 @@ TNode ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext( TNode ProxiesCodeStubAssembler::AllocateProxyRevokeFunction( TNode context, TNode proxy) { const TNode native_context = LoadNativeContext(context); - const TNode proxy_context = CreateProxyRevokeFunctionContext(proxy, native_context); return AllocateRootFunctionWithContext(RootIndex::kProxyRevokeSharedFun, - proxy_context); + proxy_context, native_context); } TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) { @@ -427,5 +428,7 @@ void ProxiesCodeStubAssembler::CheckDeleteTrapResult(TNode context, BIND(&check_passed); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 706f80861b6312..39fb71ebec0886 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -24,6 +24,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + // Tail calls the regular expression interpreter. // static void Builtins::Generate_RegExpInterpreterTrampoline(MacroAssembler* masm) { @@ -1702,5 +1704,7 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( return var_result.value(); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-shadow-realm-gen.cc b/deps/v8/src/builtins/builtins-shadow-realm-gen.cc index b634d58b86e839..8e7a29bd0bed3e 100644 --- a/deps/v8/src/builtins/builtins-shadow-realm-gen.cc +++ b/deps/v8/src/builtins/builtins-shadow-realm-gen.cc @@ -12,6 +12,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class ShadowRealmBuiltinsAssembler : public CodeStubAssembler { public: explicit ShadowRealmBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -82,7 +84,8 @@ ShadowRealmBuiltinsAssembler::AllocateImportValueFulfilledFunction( CreateImportValueFulfilledFunctionContext(caller_context, eval_context, specifier, export_name); return AllocateRootFunctionWithContext( - RootIndex::kShadowRealmImportValueFulfilledSharedFun, function_context); + RootIndex::kShadowRealmImportValueFulfilledSharedFun, function_context, + {}); } void ShadowRealmBuiltinsAssembler::CheckAccessor(TNode array, @@ -423,5 +426,7 @@ TF_BUILTIN(ShadowRealmImportValueRejected, ShadowRealmBuiltinsAssembler) { exception); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc index a29b1cebee48bf..466bd7c8ca3cbb 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -10,6 +10,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { public: explicit SharedArrayBufferBuiltinsAssembler( @@ -817,5 +819,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index 420e107e9501a7..5906cd3b902a81 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -20,6 +20,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + TNode StringBuiltinsAssembler::DirectStringData( TNode string, TNode string_instance_type) { // Compute the effective offset of the first character. @@ -2099,5 +2101,7 @@ TNode StringBuiltinsAssembler::SubString(TNode string, return var_result.value(); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-string-tsa.cc b/deps/v8/src/builtins/builtins-string-tsa.cc index 91ee86b9aa468c..b33658b8acad57 100644 --- a/deps/v8/src/builtins/builtins-string-tsa.cc +++ b/deps/v8/src/builtins/builtins-string-tsa.cc @@ -137,7 +137,8 @@ class StringBuiltinsReducer : public Next { }; class StringBuiltinsAssemblerTS - : public TurboshaftBuiltinsAssembler { + : public TurboshaftBuiltinsAssembler { public: using Base = TurboshaftBuiltinsAssembler; diff --git a/deps/v8/src/builtins/builtins-temporal-gen.cc b/deps/v8/src/builtins/builtins-temporal-gen.cc index 3417d9929b1544..7e873b3757a641 100644 --- a/deps/v8/src/builtins/builtins-temporal-gen.cc +++ b/deps/v8/src/builtins/builtins-temporal-gen.cc @@ -13,6 +13,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class TemporalBuiltinsAssembler : public IteratorBuiltinsAssembler { public: explicit TemporalBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -218,5 +220,7 @@ TF_BUILTIN(TemporalCalendarPrototypeFields, TemporalBuiltinsAssembler) { Return(CalendarFieldsArrayFromIterable(context, calendar, iterable)); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index d27119a31a231a..2c093accbb1d49 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -17,6 +17,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + // ----------------------------------------------------------------------------- // ES6 section 22.2 TypedArray Objects @@ -655,5 +657,8 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) { BIND(&return_undefined); Return(UndefinedConstant()); } + +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h index 6e46ad149dcc18..260b1afd0ee28d 100644 --- a/deps/v8/src/builtins/builtins-utils-gen.h +++ b/deps/v8/src/builtins/builtins-utils-gen.h @@ -58,26 +58,33 @@ class CodeAssemblerState; } \ void Name##Assembler::Generate##Name##Impl() -#define TS_BUILTIN(Name, BaseAssembler) \ - class Name##Assembler : public BaseAssembler { \ - public: \ - using Descriptor = Builtin_##Name##_InterfaceDescriptor; \ - Name##Assembler(compiler::turboshaft::PipelineData* data, \ - Isolate* isolate, compiler::turboshaft::Graph& graph, \ - Zone* phase_zone) \ - : BaseAssembler(data, graph, phase_zone) {} \ - void Generate##Name##Impl(); \ - using BaseAssembler::Asm; \ - }; \ - void Builtins::Generate_##Name( \ - compiler::turboshaft::PipelineData* data, Isolate* isolate, \ - compiler::turboshaft::Graph& graph, Zone* phase_zone) { \ - Name##Assembler assembler(data, isolate, graph, phase_zone); \ - assembler.EmitBuiltinProlog(Builtin::k##Name); \ - assembler.Generate##Name##Impl(); \ - /* Builtin definition must generate something! */ \ - DCHECK_GT(graph.op_id_count(), 0); \ - } \ +#define TS_BUILTIN(Name, BaseAssembler) \ + class Name##Assembler : public BaseAssembler { \ + public: \ + using Descriptor = Builtin_##Name##_InterfaceDescriptor; \ + Name##Assembler(compiler::turboshaft::PipelineData* data, \ + Isolate* isolate, compiler::turboshaft::Graph& graph, \ + Zone* phase_zone) \ + : BaseAssembler(data, graph, phase_zone) {} \ + void Generate##Name##Impl(); \ + }; \ + void Builtins::Generate_##Name( \ + compiler::turboshaft::PipelineData* data, Isolate* isolate, \ + compiler::turboshaft::Graph& graph, Zone* phase_zone) { \ + Name##Assembler assembler(data, isolate, graph, phase_zone); \ + assembler.EmitBuiltinProlog(Builtin::k##Name); \ + Block* catch_block = nullptr; \ + std::optional catch_scope; \ + /* If this builtin collects feedback, we need to setup a catch block */ \ + if (assembler.HasFeedbackCollector()) { \ + catch_block = assembler.NewBlock(); \ + catch_scope.emplace(assembler, catch_block); \ + } \ + assembler.Generate##Name##Impl(); \ + /* Builtin definition must generate something! */ \ + DCHECK_GT(graph.op_id_count(), 0); \ + assembler.EmitEpilog(catch_block); \ + } \ void Name##Assembler::Generate##Name##Impl() } // namespace internal diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc index b24bc52a4b2294..0064167e04f91c 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.cc +++ b/deps/v8/src/builtins/builtins-wasm-gen.cc @@ -13,9 +13,11 @@ namespace v8::internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + TNode WasmBuiltinsAssembler::LoadInstanceDataFromFrame() { - return CAST(LoadFromParentFrame(WasmFrameConstants::kWasmInstanceOffset)); + return CAST(LoadFromParentFrame(WasmFrameConstants::kWasmInstanceDataOffset)); } TNode @@ -28,32 +30,32 @@ WasmBuiltinsAssembler::LoadTrustedDataFromInstance( TNode WasmBuiltinsAssembler::LoadContextFromWasmOrJsFrame() { static_assert(BuiltinFrameConstants::kFunctionOffset == - WasmFrameConstants::kWasmInstanceOffset); + WasmFrameConstants::kWasmInstanceDataOffset); TVARIABLE(NativeContext, context_result); TNode function_or_instance = - CAST(LoadFromParentFrame(WasmFrameConstants::kWasmInstanceOffset)); - Label js(this); - Label apifunc(this); + CAST(LoadFromParentFrame(WasmFrameConstants::kWasmInstanceDataOffset)); + Label is_js_function(this); + Label is_import_data(this); Label done(this); TNode instance_type = LoadMapInstanceType(LoadMap(function_or_instance)); - GotoIf(IsJSFunctionInstanceType(instance_type), &js); + GotoIf(IsJSFunctionInstanceType(instance_type), &is_js_function); GotoIf(Word32Equal(instance_type, Int32Constant(WASM_IMPORT_DATA_TYPE)), - &apifunc); + &is_import_data); context_result = LoadContextFromInstanceData(CAST(function_or_instance)); Goto(&done); - BIND(&js); + BIND(&is_js_function); TNode function = CAST(function_or_instance); TNode context = LoadObjectField(function, JSFunction::kContextOffset); context_result = LoadNativeContext(context); Goto(&done); - BIND(&apifunc); - TNode apiref = CAST(function_or_instance); + BIND(&is_import_data); + TNode import_data = CAST(function_or_instance); context_result = LoadObjectField( - apiref, WasmImportData::kNativeContextOffset); + import_data, WasmImportData::kNativeContextOffset); Goto(&done); BIND(&done); @@ -167,13 +169,15 @@ TF_BUILTIN(WasmToJsWrapperCSA, WasmBuiltinsAssembler) { } TF_BUILTIN(WasmToJsWrapperInvalidSig, WasmBuiltinsAssembler) { - TNode ref = + TNode data = UncheckedParameter(Descriptor::kWasmImportData); TNode context = - LoadObjectField(ref, WasmImportData::kNativeContextOffset); + LoadObjectField(data, WasmImportData::kNativeContextOffset); CallRuntime(Runtime::kWasmThrowJSTypeError, context); Unreachable(); } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace v8::internal diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index 90ede49832c3ce..14eaade68709b9 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -434,6 +434,7 @@ Handle Builtins::CreateInterpreterEntryTrampolineForProfiling( desc.handler_table_offset = instruction_size; desc.constant_pool_offset = instruction_size; desc.code_comments_offset = instruction_size; + desc.builtin_jump_table_info_offset = instruction_size; CodeDesc::Verify(&desc); diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h index f3974d2813ad29..5500654255e471 100644 --- a/deps/v8/src/builtins/builtins.h +++ b/deps/v8/src/builtins/builtins.h @@ -11,6 +11,10 @@ #include "src/objects/type-hints.h" #include "src/sandbox/code-entrypoint-tag.h" +#ifdef V8_ENABLE_WEBASSEMBLY +#include "src/wasm/wasm-code-pointer-table.h" +#endif + namespace v8 { namespace internal { @@ -107,6 +111,21 @@ class Builtins { kLastBytecodeHandlerPlusOne == kBuiltinCount; static_assert(kBytecodeHandlersAreSortedLast); +#ifdef V8_ENABLE_WEBASSEMBLY + // The list of builtins that can be called indirectly from Wasm and need an + // entry in the WasmCodePointerTable. + static constexpr Builtin kWasmIndirectlyCallableBuiltins[] = { + Builtin::kWasmToOnHeapWasmToJsTrampoline, + Builtin::kWasmToJsWrapperInvalidSig, Builtin::kWasmToJsWrapperAsm}; + static constexpr size_t kNumWasmIndirectlyCallableBuiltins = + arraysize(kWasmIndirectlyCallableBuiltins); + using WasmBuiltinHandleArray = + wasm::WasmCodePointerTable::Handle[kNumWasmIndirectlyCallableBuiltins]; + // TODO(sroettger): this can be consteval, but the gcc bot doesn't support it. + template + static constexpr size_t WasmBuiltinHandleArrayIndex(); +#endif + static constexpr bool IsBuiltinId(Builtin builtin) { return builtin != Builtin::kNoBuiltinId; } @@ -201,6 +220,13 @@ class Builtins { // builtin_entry_table, initialized earlier via {InitializeIsolateDataTables}. static inline Address EntryOf(Builtin builtin, Isolate* isolate); +#ifdef V8_ENABLE_WEBASSEMBLY + // Returns a handle to the WasmCodePointerTable entry for a given builtin. + template + static inline wasm::WasmCodePointerTable::Handle WasmBuiltinHandleOf( + Isolate* isolate); +#endif + V8_EXPORT_PRIVATE static Kind KindOf(Builtin builtin); static const char* KindNameOf(Builtin builtin); diff --git a/deps/v8/src/builtins/data-view-ops.h b/deps/v8/src/builtins/data-view-ops.h index 151cf964e37d2a..e72ecf19dcd933 100644 --- a/deps/v8/src/builtins/data-view-ops.h +++ b/deps/v8/src/builtins/data-view-ops.h @@ -7,6 +7,8 @@ #include +#include "src/base/logging.h" + // DataView operations that are handled as well-known imports. #define DATAVIEW_OP_LIST(V) \ V(BigInt64) \ @@ -34,10 +36,12 @@ constexpr const char* ToString(DataViewOp op) { return "DataView.prototype.get" #Name; \ case DataViewOp::kSet##Name: \ return "DataView.prototype.set" #Name; - DATAVIEW_OP_LIST(V) + DATAVIEW_OP_LIST(V) #undef V - case DataViewOp::kByteLength: - return "get DataView.prototype.byteLength"; + case DataViewOp::kByteLength: + return "get DataView.prototype.byteLength"; + default: + UNREACHABLE(); } } diff --git a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc index 5b3c297360ce37..57f100727c06d5 100644 --- a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc +++ b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc @@ -61,13 +61,13 @@ void WriteHeader(const char* header_filename) { WriteBytecode(out, Bytecode::k##Name, operand_scale, &count, offset_table, \ index++); OperandScale operand_scale = OperandScale::kSingle; - BYTECODE_LIST(ADD_BYTECODES) + BYTECODE_LIST(ADD_BYTECODES, ADD_BYTECODES) int single_count = count; operand_scale = OperandScale::kDouble; - BYTECODE_LIST(ADD_BYTECODES) + BYTECODE_LIST(ADD_BYTECODES, ADD_BYTECODES) int wide_count = count - single_count; operand_scale = OperandScale::kQuadruple; - BYTECODE_LIST(ADD_BYTECODES) + BYTECODE_LIST(ADD_BYTECODES, ADD_BYTECODES) #undef ADD_BYTECODES int extra_wide_count = count - wide_count - single_count; CHECK_GT(single_count, wide_count); diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc index f361964b50baa1..67bbbc87b25660 100644 --- a/deps/v8/src/builtins/growable-fixed-array-gen.cc +++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc @@ -11,6 +11,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + void GrowableFixedArray::Push(const TNode value) { const TNode length = var_length_.value(); const TNode capacity = var_capacity_.value(); @@ -100,5 +102,7 @@ TNode GrowableFixedArray::ResizeFixedArray( return to_array; } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index fedea9612d0a19..3435f122cb18ed 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -2971,8 +2971,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ cmp(maybe_target_code, Immediate(0)); __ j(not_equal, &jump_to_optimized_code, Label::kNear); } @@ -3094,11 +3095,11 @@ void RestoreWasmParams(MacroAssembler* masm, int offset) { // When this builtin is called, the topmost stack entry is the calling pc. // This is replaced with the following: // -// [ calling pc ] <-- esp; popped by {ret}. -// [ feedback vector ] -// [ Wasm instance ] -// [ WASM frame marker ] -// [ saved ebp ] <-- ebp; this is where "calling pc" used to be. +// [ calling pc ] <-- esp; popped by {ret}. +// [ feedback vector ] +// [ Wasm instance data ] +// [ WASM frame marker ] +// [ saved ebp ] <-- ebp; this is where "calling pc" used to be. void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { constexpr Register func_index = wasm::kLiftoffFrameSetupFunctionReg; @@ -3116,14 +3117,15 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Push(tmp); // This is the "instance" slot. // Stack layout is now: - // [calling pc] <-- instance_slot <-- esp + // [calling pc] <-- instance_data_slot <-- esp // [saved tmp] <-- marker_slot // [saved ebp] Operand marker_slot = Operand(ebp, WasmFrameConstants::kFrameTypeOffset); - Operand instance_slot = Operand(ebp, WasmFrameConstants::kWasmInstanceOffset); + Operand instance_data_slot = + Operand(ebp, WasmFrameConstants::kWasmInstanceDataOffset); - // Load the feedback vector. - __ mov(tmp, FieldOperand(kWasmInstanceRegister, + // Load the feedback vector from the trusted instance data. + __ mov(tmp, FieldOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset)); __ mov(tmp, FieldOperand(tmp, func_index, times_tagged_size, FixedArray::kHeaderSize)); @@ -3132,9 +3134,9 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { // Vector exists. Finish setting up the stack frame. __ Push(tmp); // Feedback vector. - __ mov(tmp, instance_slot); // Calling PC. + __ mov(tmp, instance_data_slot); // Calling PC. __ Push(tmp); - __ mov(instance_slot, kWasmInstanceRegister); + __ mov(instance_data_slot, kWasmImplicitArgRegister); __ mov(tmp, marker_slot); __ mov(marker_slot, Immediate(StackFrame::TypeToMarker(StackFrame::WASM))); __ ret(0); @@ -3147,7 +3149,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { // // [ reserved slot for NativeModule ] <-- arg[2] // [ ("declared") function index ] <-- arg[1] for runtime func. - // [ Wasm instance ] <-- arg[0] + // [ Wasm instance data ] <-- arg[0] // [ ...spilled Wasm parameters... ] // [ calling pc ] <-- already in place // [ WASM_LIFTOFF_SETUP marker ] @@ -3160,7 +3162,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { int offset = SaveWasmParams(masm); // Arguments to the runtime function: instance, func_index. - __ Push(kWasmInstanceRegister); + __ Push(kWasmImplicitArgRegister); __ SmiTag(func_index); __ Push(func_index); // Allocate a stack slot where the runtime function can spill a pointer @@ -3174,16 +3176,16 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { RestoreWasmParams(masm, offset); // Finish setting up the stack frame: - // [ calling pc ] - // (tmp reg) ---> [ feedback vector ] - // [ calling pc ] => [ Wasm instance ] <-- instance_slot - // [ WASM_LIFTOFF_SETUP marker ] [ WASM marker ] <-- marker_slot - // [ saved ebp ] [ saved ebp ] + // [ calling pc ] + // (tmp reg) ---> [ feedback vector ] + // [ calling pc ] => [ Wasm instance data ] <-- instance_data_slot + // [ WASM_LIFTOFF_SETUP ] [ WASM ] <-- marker_slot + // [ saved ebp ] [ saved ebp ] __ mov(marker_slot, Immediate(StackFrame::TypeToMarker(StackFrame::WASM))); __ Push(tmp); // Feedback vector. - __ mov(tmp, instance_slot); // Calling PC. + __ mov(tmp, instance_data_slot); // Calling PC. __ Push(tmp); - __ mov(instance_slot, kWasmInstanceRegister); + __ mov(instance_data_slot, kWasmImplicitArgRegister); __ ret(0); } @@ -3197,7 +3199,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { int offset = SaveWasmParams(masm); // Push arguments for the runtime function. - __ Push(kWasmInstanceRegister); + __ Push(kWasmImplicitArgRegister); __ Push(kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. @@ -3210,9 +3212,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { RestoreWasmParams(masm, offset); - // After the instance register has been restored, we can add the jump table - // start to the jump table offset already stored in edi. - __ add(edi, MemOperand(kWasmInstanceRegister, + // After the instance data register has been restored, we can add the jump + // table start to the jump table offset already stored in edi. + __ add(edi, MemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kJumpTableStartOffset - kHeapObjectTag)); } @@ -3393,18 +3395,19 @@ void ReloadParentContinuation(MacroAssembler* masm, Register promise, } // Loads the context field of the WasmTrustedInstanceData or WasmImportData -// depending on the ref's type, and places the result in the input register. -void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { - __ Move(scratch, FieldOperand(ref, HeapObject::kMapOffset)); +// depending on the data's type, and places the result in the input register. +void GetContextFromImplicitArg(MacroAssembler* masm, Register data, + Register scratch) { + __ Move(scratch, FieldOperand(data, HeapObject::kMapOffset)); __ CmpInstanceType(scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE); Label instance; Label end; __ j(equal, &instance); - __ Move(ref, FieldOperand(ref, WasmImportData::kNativeContextOffset)); + __ Move(data, FieldOperand(data, WasmImportData::kNativeContextOffset)); __ jmp(&end); __ bind(&instance); - __ Move(ref, - FieldOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); + __ Move(data, + FieldOperand(data, WasmTrustedInstanceData::kNativeContextOffset)); __ bind(&end); } @@ -3436,7 +3439,8 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, } void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { - __ mov(MemOperand(ebp, StackSwitchFrameConstants::kRefOffset), Immediate(0)); + __ mov(MemOperand(ebp, StackSwitchFrameConstants::kImplicitArgOffset), + Immediate(0)); __ mov(MemOperand(ebp, StackSwitchFrameConstants::kResultArrayOffset), Immediate(0)); } @@ -3526,8 +3530,8 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, Register tmp, __ LoadRoot(promise, RootIndex::kActiveSuspender); __ Move(promise, FieldOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ mov(kContextRegister, - MemOperand(ebp, StackSwitchFrameConstants::kRefOffset)); - GetContextFromRef(masm, kContextRegister, tmp); + MemOperand(ebp, StackSwitchFrameConstants::kImplicitArgOffset)); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp, tmp2); @@ -3572,12 +3576,12 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm, __ Move(promise, FieldOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ mov(kContextRegister, - MemOperand(ebp, StackSwitchFrameConstants::kRefOffset)); + MemOperand(ebp, StackSwitchFrameConstants::kImplicitArgOffset)); constexpr Register tmp1 = edi; static_assert(tmp1 != promise && tmp1 != reason && tmp1 != kContextRegister); constexpr Register tmp2 = edx; static_assert(tmp2 != promise && tmp2 != reason && tmp2 != kContextRegister); - GetContextFromRef(masm, kContextRegister, tmp1); + GetContextFromImplicitArg(masm, kContextRegister, tmp1); ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp1, tmp2); __ Push(promise); RestoreParentSuspender(masm, promise, edi); @@ -3624,8 +3628,8 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { if (stack_switch) { // Preserve wasm_instance across the switch. __ mov(eax, MemOperand(original_fp, - JSToWasmWrapperFrameConstants::kRefParamOffset)); - __ mov(MemOperand(ebp, StackSwitchFrameConstants::kRefOffset), eax); + JSToWasmWrapperFrameConstants::kImplicitArgOffset)); + __ mov(MemOperand(ebp, StackSwitchFrameConstants::kImplicitArgOffset), eax); Register result_array = eax; __ mov(result_array, @@ -3670,7 +3674,8 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Register last_stack_param = ecx; - // The first GP parameter is the instance, which we handle specially. + // The first GP parameter holds the trusted instance data or the import data. + // This is handled specially. int stack_params_offset = (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + arraysize(wasm::kFpParamRegisters) * kDoubleSize; @@ -3719,13 +3724,13 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // parameter registers. Make sure it overlaps with the last one we fill. DCHECK_EQ(params_start, wasm::kGpParamRegisters[1]); - // Pick up the instance from frame. + // Load the implicit argument (instance data or import data) from the frame. if (stack_switch) { - __ mov(kWasmInstanceRegister, - MemOperand(ebp, StackSwitchFrameConstants::kRefOffset)); + __ mov(kWasmImplicitArgRegister, + MemOperand(ebp, StackSwitchFrameConstants::kImplicitArgOffset)); } else { - __ mov(kWasmInstanceRegister, - MemOperand(ebp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + __ mov(kWasmImplicitArgRegister, + MemOperand(ebp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); } Register call_target = edi; @@ -3770,17 +3775,17 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // ecx: the result JSArray for multi-return. // edx: pointer to the byte buffer which contains all parameters. if (stack_switch) { - __ mov(eax, MemOperand(ebp, StackSwitchFrameConstants::kRefOffset)); + __ mov(eax, MemOperand(ebp, StackSwitchFrameConstants::kImplicitArgOffset)); __ mov(ecx, MemOperand(ebp, StackSwitchFrameConstants::kResultArrayOffset)); } else { __ mov(eax, - MemOperand(ebp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + MemOperand(ebp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); __ mov(ecx, MemOperand(ebp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); } Register scratch = edx; - GetContextFromRef(masm, eax, scratch); + GetContextFromImplicitArg(masm, eax, scratch); __ mov(edx, wrapper_buffer); __ CallBuiltin(Builtin::kJSToWasmHandleReturns); @@ -3835,12 +3840,7 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { for (size_t i = arraysize(wasm::kGpParamRegisters) - 1; i > 0; --i) { __ push(wasm::kGpParamRegisters[i]); } - // Reserve fixed slots for the CSA wrapper. - // Two slots for stack-switching (central stack pointer and secondary stack - // limit): - __ push(Immediate(0)); - __ push(Immediate(0)); - // One slot for the signature: + // Reserve a slot for the signature. __ push(eax); // Push the return address again. __ push(scratch); @@ -4338,6 +4338,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ jmp(edi); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + __ int3(); // Unused on this architecture. +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label check_negative, process_64_bits, done; diff --git a/deps/v8/src/builtins/js-to-js.tq b/deps/v8/src/builtins/js-to-js.tq index c0afd23ac9260d..8c513bc38d65ee 100644 --- a/deps/v8/src/builtins/js-to-js.tq +++ b/deps/v8/src/builtins/js-to-js.tq @@ -63,8 +63,11 @@ macro ConvertToAndFromWasm(context: Context, wasmType: int32, value: JSAny): extern runtime WasmThrowJSTypeError(Context): never; +// The varargs arguments is just there so that the generated Code has a +// parameter_count of 0 (kDontAdaptArgumentsSentinel) and so becomes compatible +// with an existing entry in the JSDispatchTable. transitioning javascript builtin JSToJSWrapperInvalidSig( - js-implicit context: NativeContext)(): JSAny { + js-implicit context: NativeContext)(...arguments): JSAny { runtime::WasmThrowJSTypeError(context); } diff --git a/deps/v8/src/builtins/js-to-wasm.tq b/deps/v8/src/builtins/js-to-wasm.tq index ea17bbb8026246..974647345fe1a1 100644 --- a/deps/v8/src/builtins/js-to-wasm.tq +++ b/deps/v8/src/builtins/js-to-wasm.tq @@ -8,7 +8,8 @@ namespace runtime { extern runtime WasmGenericJSToWasmObject( Context, WasmTrustedInstanceData|Undefined, JSAny, Smi): JSAny; extern runtime WasmGenericWasmToJSObject(Context, Object): JSAny; -extern runtime WasmCompileWrapper(NoContext, WasmExportedFunctionData): JSAny; +extern runtime TierUpJSToWasmWrapper(NoContext, WasmExportedFunctionData): + JSAny; extern runtime WasmAllocateSuspender(Context): JSAny; } // namespace runtime @@ -428,7 +429,7 @@ macro JSToWasmObject( targetType: int32, value: JSAny): Object { const heapType = (targetType >> kValueTypeKindBits) & kValueTypeHeapTypeMask; const kind = targetType & kValueTypeKindBitsMask; - if (heapType == HeapType::kExtern || heapType == HeapType::kNoExtern) { + if (heapType == HeapType::kExtern) { if (kind == ValueKind::kRef && value == Null) { ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError); } @@ -445,6 +446,7 @@ macro JSToWasmObject( if (kind == ValueKind::kRef) { ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError); } else { + dcheck(kind == ValueKind::kRefNull); return kWasmNull; } } @@ -490,7 +492,7 @@ macro JSToWasmWrapperHelper( UnsafeCast(functionData.wrapper_budget.value) - SmiConstant(1); functionData.wrapper_budget.value = budget; if (budget == SmiConstant(0)) { - runtime::WasmCompileWrapper(kNoContext, functionData); + runtime::TierUpJSToWasmWrapper(kNoContext, functionData); } } diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc index 037355fdc3d85f..9687905c112d1e 100644 --- a/deps/v8/src/builtins/loong64/builtins-loong64.cc +++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc @@ -1325,11 +1325,18 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing); +#ifndef V8_ENABLE_LEAPTIERING + // TODO(loong64, 42204201): This fastcase is difficult to support with the + // sandbox as it requires getting write access to the dispatch table. See + // `JSFunction::UpdateCode`. We might want to remove it for all + // configurations as it does not seem to be performance sensitive. + // Load the baseline code into the closure. __ Move(a2, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(a2, closure); __ JumpCodeObject(a2, kJSEntrypointTag); +#endif // V8_ENABLE_LEAPTIERING __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -1923,8 +1930,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code, Operand(Smi::zero())); } @@ -2813,8 +2821,8 @@ constexpr RegList kSavedGpRegs = ([]() constexpr { saved_gp_regs.set(gp_param_reg); } - // The instance has already been stored in the fixed part of the frame. - saved_gp_regs.clear(kWasmInstanceRegister); + // The instance data has already been stored in the fixed part of the frame. + saved_gp_regs.clear(kWasmImplicitArgRegister); // All set registers were unique. CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1); CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs, @@ -2836,16 +2844,16 @@ constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { // When entering this builtin, we have just created a Wasm stack frame: // -// [ Wasm instance ] <-- sp -// [ WASM frame marker ] -// [ saved fp ] <-- fp +// [ Wasm instance data ] <-- sp +// [ WASM frame marker ] +// [ saved fp ] <-- fp // // Add the feedback vector to the stack. // -// [ feedback vector ] <-- sp -// [ Wasm instance ] -// [ WASM frame marker ] -// [ saved fp ] <-- fp +// [ feedback vector ] <-- sp +// [ Wasm instance data ] +// [ WASM frame marker ] +// [ saved fp ] <-- fp void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register func_index = wasm::kLiftoffFrameSetupFunctionReg; Register vector = t1; @@ -2853,7 +2861,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Label allocate_vector, done; __ LoadTaggedField( - vector, FieldMemOperand(kWasmInstanceRegister, + vector, FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset)); __ Alsl_d(vector, func_index, vector, kTaggedSizeLog2); __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); @@ -2874,10 +2882,10 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ MultiPushFPU(kSavedFpRegs); __ Push(ra); - // Arguments to the runtime function: instance, func_index, and an + // Arguments to the runtime function: instance data, func_index, and an // additional stack slot for the NativeModule. __ SmiTag(func_index); - __ Push(kWasmInstanceRegister, func_index, zero_reg); + __ Push(kWasmImplicitArgRegister, func_index, zero_reg); __ Move(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3); __ mov(vector, kReturnRegister0); @@ -2886,8 +2894,8 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Pop(ra); __ MultiPopFPU(kSavedFpRegs); __ MultiPop(kSavedGpRegs); - __ Ld_d(kWasmInstanceRegister, - MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); + __ Ld_d(kWasmImplicitArgRegister, + MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset)); __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM)); __ St_d(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); __ Branch(&done); @@ -2903,7 +2911,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::INTERNAL); // Save registers that we need to keep alive across the runtime call. - __ Push(kWasmInstanceRegister); + __ Push(kWasmImplicitArgRegister); __ MultiPush(kSavedGpRegs); __ MultiPushFPU(kSavedFpRegs); @@ -2912,7 +2920,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // as if they were saved. __ Sub_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize); - __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); + __ Push(kWasmImplicitArgRegister, kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. @@ -2927,13 +2935,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Restore registers. __ MultiPopFPU(kSavedFpRegs); __ MultiPop(kSavedGpRegs); - __ Pop(kWasmInstanceRegister); + __ Pop(kWasmImplicitArgRegister); } // The runtime function returned the jump table slot offset as a Smi (now in // t7). Use that to compute the jump target. static_assert(!kSavedGpRegs.has(t8)); - __ Ld_d(t8, FieldMemOperand(kWasmInstanceRegister, + __ Ld_d(t8, FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kJumpTableStartOffset)); __ Add_d(t7, t8, Operand(t7)); @@ -3147,7 +3155,8 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { __ St_d(zero_reg, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ St_d(zero_reg, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ St_d(zero_reg, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } // TODO(irezvov): Consolidate with arm64 RegisterAllocator. @@ -3284,21 +3293,23 @@ class RegisterAllocator { #define FREE_REG(Name) regs.Free(&Name); // Loads the context field of the WasmTrustedInstanceData or WasmImportData -// depending on the ref's type, and places the result in the input register. -void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { +// depending on the data's type, and places the result in the input register. +void GetContextFromImplicitArg(MacroAssembler* masm, Register data, + Register scratch) { Label instance; Label end; - __ LoadTaggedField(scratch, FieldMemOperand(ref, HeapObject::kMapOffset)); + __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset)); __ Ld_hu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); __ Branch(&instance, eq, scratch, Operand(WASM_TRUSTED_INSTANCE_DATA_TYPE)); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmImportData::kNativeContextOffset)); + data, FieldMemOperand(data, WasmImportData::kNativeContextOffset)); __ jmp(&end); __ bind(&instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); + data, + FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset)); __ bind(&end); } @@ -3321,10 +3332,8 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { __ St_d(wasm::kGpParamRegisters[i], MemOperand(sp, (i - 1) * kSystemPointerSize)); } - // Reserve fixed slots for the CSA wrapper. - // Two slots for stack-switching (central stack pointer and secondary stack - // limit), one for the signature. - __ Push(zero_reg, zero_reg, zero_reg); + // Reserve a slot for the signature. + __ Push(zero_reg); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); } @@ -3698,8 +3707,8 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, RegisterAllocator& regs, promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ Ld_d(kContextRegister, - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); - GetContextFromRef(masm, kContextRegister, tmp); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp, tmp2); @@ -3744,11 +3753,11 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm, promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ Ld_d(kContextRegister, - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); DEFINE_SCOPED(tmp); DEFINE_SCOPED(tmp2); - GetContextFromRef(masm, kContextRegister, tmp); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2); RestoreParentSuspender(masm, tmp, tmp2); @@ -3775,8 +3784,10 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { __ AllocateStackSpace(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize); - DEFINE_PINNED(ref, kWasmInstanceRegister); - __ Ld_d(ref, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + // Load the implicit argument (instance data or import data) from the frame. + DEFINE_PINNED(implicit_arg, kWasmImplicitArgRegister); + __ Ld_d(implicit_arg, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); DEFINE_PINNED(wrapper_buffer, WasmJSToWasmWrapperDescriptor::WrapperBufferRegister()); @@ -3785,21 +3796,23 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Register original_fp = no_reg; Register new_wrapper_buffer = no_reg; if (stack_switch) { - SwitchToAllocatedStack(masm, regs, ref, wrapper_buffer, original_fp, - new_wrapper_buffer, &suspend); + SwitchToAllocatedStack(masm, regs, implicit_arg, wrapper_buffer, + original_fp, new_wrapper_buffer, &suspend); } else { original_fp = fp; new_wrapper_buffer = wrapper_buffer; } - regs.ResetExcept(original_fp, wrapper_buffer, ref, new_wrapper_buffer); + regs.ResetExcept(original_fp, wrapper_buffer, implicit_arg, + new_wrapper_buffer); { __ St_d( new_wrapper_buffer, MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset)); if (stack_switch) { - __ St_d(ref, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ St_d(implicit_arg, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); DEFINE_SCOPED(scratch) __ Ld_d( scratch, @@ -3827,12 +3840,13 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { if (stack_switch) { FREE_REG(new_wrapper_buffer) } - FREE_REG(ref) + FREE_REG(implicit_arg) for (auto reg : wasm::kGpParamRegisters) { regs.Reserve(reg); } - // The first GP parameter is the instance, which we handle specially. + // The first GP parameter holds the trusted instance data or the import data. + // This is handled specially. int stack_params_offset = (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + arraysize(wasm::kFpParamRegisters) * kDoubleSize; @@ -3952,16 +3966,17 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // a2: pointer to the byte buffer which contains all parameters. if (stack_switch) { __ Ld_d(a1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ Ld_d(a0, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ Ld_d(a0, MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } else { __ Ld_d( a1, MemOperand(fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); - __ Ld_d(a0, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + __ Ld_d(a0, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); } Register scratch = a3; - GetContextFromRef(masm, a0, scratch); + GetContextFromImplicitArg(masm, a0, scratch); __ Call(BUILTIN_CODE(masm->isolate(), JSToWasmHandleReturns), RelocInfo::CODE_TARGET); @@ -4252,6 +4267,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Jump(scratch); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + __ Trap(); +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label done; Register result_reg = t0; diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 77a8aaa2c992cf..0345a67a0655a4 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -1290,11 +1290,18 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing); +#ifndef V8_ENABLE_LEAPTIERING + // TODO(mips64, 42204201): This fastcase is difficult to support with the + // sandbox as it requires getting write access to the dispatch table. See + // `JSFunction::UpdateCode`. We might want to remove it for all + // configurations as it does not seem to be performance sensitive. + // Load the baseline code into the closure. __ Move(a2, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(a2, closure, t0, t1); __ JumpCodeObject(a2, kJSEntrypointTag); +#endif // V8_ENABLE_LEAPTIERING __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -1882,8 +1889,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ Branch(&jump_to_optimized_code, ne, maybe_target_code, Operand(Smi::zero())); } @@ -2749,8 +2757,8 @@ constexpr RegList kSavedGpRegs = ([]() constexpr { saved_gp_regs.set(gp_param_reg); } - // The instance has already been stored in the fixed part of the frame. - saved_gp_regs.clear(kWasmInstanceRegister); + // The instance data has already been stored in the fixed part of the frame. + saved_gp_regs.clear(kWasmImplicitArgRegister); // All set registers were unique. CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1); CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs, @@ -2772,16 +2780,16 @@ constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { // When entering this builtin, we have just created a Wasm stack frame: // -// [ Wasm instance ] <-- sp -// [ WASM frame marker ] -// [ saved fp ] <-- fp +// [ Wasm instance data ] <-- sp +// [ WASM frame marker ] +// [ saved fp ] <-- fp // // Add the feedback vector to the stack. // -// [ feedback vector ] <-- sp -// [ Wasm instance ] -// [ WASM frame marker ] -// [ saved fp ] <-- fp +// [ feedback vector ] <-- sp +// [ Wasm instance data ] +// [ WASM frame marker ] +// [ saved fp ] <-- fp void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register func_index = wasm::kLiftoffFrameSetupFunctionReg; Register vector = t1; @@ -2789,7 +2797,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Label allocate_vector, done; __ Ld(vector, - FieldMemOperand(kWasmInstanceRegister, + FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset)); __ Dlsa(vector, vector, func_index, kTaggedSizeLog2); __ Ld(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); @@ -2810,10 +2818,10 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ MultiPushFPU(kSavedFpRegs); __ Push(ra); - // Arguments to the runtime function: instance, func_index, and an + // Arguments to the runtime function: instance data, func_index, and an // additional stack slot for the NativeModule. __ SmiTag(func_index); - __ Push(kWasmInstanceRegister, func_index, zero_reg); + __ Push(kWasmImplicitArgRegister, func_index, zero_reg); __ Move(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3); __ mov(vector, kReturnRegister0); @@ -2822,8 +2830,8 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Pop(ra); __ MultiPopFPU(kSavedFpRegs); __ MultiPop(kSavedGpRegs); - __ Ld(kWasmInstanceRegister, - MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); + __ Ld(kWasmImplicitArgRegister, + MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset)); __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM)); __ Sd(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); __ Branch(&done); @@ -2839,7 +2847,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::INTERNAL); // Save registers that we need to keep alive across the runtime call. - __ Push(kWasmInstanceRegister); + __ Push(kWasmImplicitArgRegister); __ MultiPush(kSavedGpRegs); // Check if machine has simd enabled, if so push vector registers. If not // then only push double registers. @@ -2863,7 +2871,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ Dsubu(sp, sp, kSavedFpRegs.Count() * kDoubleSize); __ bind(&simd_pushed); - __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); + __ Push(kWasmImplicitArgRegister, kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. @@ -2888,7 +2896,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ MultiPopFPU(kSavedFpRegs); __ bind(&simd_popped); __ MultiPop(kSavedGpRegs); - __ Pop(kWasmInstanceRegister); + __ Pop(kWasmImplicitArgRegister); } // Untag the returned Smi, for later use. @@ -2898,7 +2906,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The runtime function returned the jump table slot offset as a Smi (now in // t8). Use that to compute the jump target. static_assert(!kSavedGpRegs.has(t8)); - __ Ld(t8, FieldMemOperand(kWasmInstanceRegister, + __ Ld(t8, FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kJumpTableStartOffset)); __ Daddu(t8, v0, t8); @@ -2949,10 +2957,8 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { __ Sd(wasm::kGpParamRegisters[i], MemOperand(sp, (i - 1) * kSystemPointerSize)); } - // Reserve fixed slots for the CSA wrapper. - // Two slots for stack-switching (central stack pointer and secondary stack - // limit), one for the signature. - __ Push(zero_reg, zero_reg, zero_reg); + // Reserve a slot for the signature. + __ Push(zero_reg); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); } @@ -3147,6 +3153,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Jump(t9); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + __ Trap(); +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label done; Register result_reg = t0; diff --git a/deps/v8/src/builtins/number-builtins-reducer-inl.h b/deps/v8/src/builtins/number-builtins-reducer-inl.h new file mode 100644 index 00000000000000..c8b23c8918bc14 --- /dev/null +++ b/deps/v8/src/builtins/number-builtins-reducer-inl.h @@ -0,0 +1,64 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BUILTINS_NUMBER_BUILTINS_REDUCER_INL_H_ +#define V8_BUILTINS_NUMBER_BUILTINS_REDUCER_INL_H_ + +#include "src/codegen/turboshaft-builtins-assembler-inl.h" + +namespace v8::internal { + +#include "src/compiler/turboshaft/define-assembler-macros.inc" + +using namespace compiler::turboshaft; // NOLINT(build/namespaces) + +template +class NumberBuiltinsReducer : public Next { + public: + BUILTIN_REDUCER(NumberBuiltins) + + V BitwiseNot(V context, V input) { + Label done(this); + Label if_number(this); + Label if_bigint(this); + __ template TaggedToWord32OrBigIntImpl( + context, input, IsKnownTaggedPointer::kNo, if_number, &if_bigint, + nullptr); + + // Number case. + { + BIND(if_number, w32); + V temp = __ ConvertInt32ToNumber(__ Word32BitwiseNot(w32)); + IF (__ IsSmi(temp)) { + __ CombineFeedback(BinaryOperationFeedback::kSignedSmall); + } ELSE { + __ CombineFeedback(BinaryOperationFeedback::kNumber); + } + GOTO(done, temp); + } + + // BigInt case. + { + BIND(if_bigint, bigint_value); + if (__ HasFeedbackCollector()) { + // Feedback has been set already in `TaggedToWord32OrBigIntImpl`. + TSA_DCHECK(this, __ FeedbackIs(BinaryOperationFeedback::kBigInt)); + } + GOTO(done, __ CallRuntime_BigIntUnaryOp(isolate_, context, bigint_value, + ::Operation::kBitwiseNot)); + } + + BIND(done, result); + return result; + } + + private: + Isolate* isolate_ = __ data() -> isolate(); +}; + +#include "src/compiler/turboshaft/undef-assembler-macros.inc" + +} // namespace v8::internal + +#endif // V8_BUILTINS_NUMBER_BUILTINS_REDUCER_INL_H_ diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index fed1179a1ff650..dda6a4489b4afc 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 +#if V8_TARGET_ARCH_PPC64 #include "src/api/api-arguments.h" #include "src/builtins/builtins-inl.h" @@ -431,8 +431,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ CmpSmiLiteral(maybe_target_code, Smi::zero(), r0); __ bne(&jump_to_optimized_code); } @@ -1631,12 +1632,20 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing); +#ifndef V8_ENABLE_LEAPTIERING + // TODO(olivf, 42204201): This fastcase is difficult to support with the + // sandbox as it requires getting write access to the dispatch table. See + // `JSFunction::UpdateCode`. We might want to remove it for all + // configurations as it does not seem to be performance sensitive. + // Load the baseline code into the closure. __ mr(r5, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(r5, closure, ip, r7); __ JumpCodeObject(r5); +#endif // V8_ENABLE_LEAPTIERING + __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); } @@ -3039,7 +3048,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ LoadTaggedField( vector, - FieldMemOperand(kWasmInstanceRegister, + FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset), scratch); __ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2)); @@ -3048,7 +3057,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { scratch); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); - __ push(kWasmInstanceRegister); + __ push(kWasmImplicitArgRegister); __ push(vector); __ Ret(); @@ -3066,8 +3075,8 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ push(scratch); { SaveWasmParamsScope save_params(masm); // Will use r0 and ip as scratch. - // Arguments to the runtime function: instance, func_index. - __ push(kWasmInstanceRegister); + // Arguments to the runtime function: instance data, func_index. + __ push(kWasmImplicitArgRegister); __ SmiTag(func_index); __ push(func_index); // Allocate a stack slot where the runtime function can spill a pointer @@ -3098,8 +3107,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { { SaveWasmParamsScope save_params(masm); // Will use r0 and ip as scratch. - // Push the Wasm instance as an explicit argument to the runtime function. - __ push(kWasmInstanceRegister); + // Push the instance data as an explicit argument to the runtime function. + __ push(kWasmImplicitArgRegister); // Push the function index as second argument. __ push(kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to @@ -3114,10 +3123,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Saved parameters are restored at the end of this block. } - // After the instance register has been restored, we can add the jump table - // start to the jump table offset already stored in r11. + // After the instance data register has been restored, we can add the jump + // table start to the jump table offset already stored in r11. __ LoadU64(ip, - FieldMemOperand(kWasmInstanceRegister, + FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kJumpTableStartOffset), r0); __ AddS64(r11, r11, ip); @@ -3158,20 +3167,22 @@ void Builtins::Generate_WasmReturnPromiseOnSuspendAsm(MacroAssembler* masm) { } // Loads the context field of the WasmTrustedInstanceData or WasmImportData -// depending on the ref's type, and places the result in the input register. -void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { - __ LoadTaggedField(scratch, FieldMemOperand(ref, HeapObject::kMapOffset), r0); +// depending on the data's type, and places the result in the input register. +void GetContextFromImplicitArg(MacroAssembler* masm, Register data, + Register scratch) { + __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset), + r0); __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE); Label instance; Label end; __ beq(&instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmImportData::kNativeContextOffset), r0); + data, FieldMemOperand(data, WasmImportData::kNativeContextOffset), r0); __ jmp(&end); __ bind(&instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmTrustedInstanceData::kNativeContextOffset), - r0); + data, + FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset), r0); __ bind(&end); } @@ -3191,15 +3202,8 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { gp_regs.set(wasm::kGpParamRegisters[i]); } __ MultiPush(gp_regs); - // Reserve fixed slots for the CSA wrapper. - // Two slots for stack-switching (central stack pointer and secondary stack - // limit): - Register scratch = r4; - __ mov(scratch, Operand::Zero()); - __ Push(scratch); - __ Push(scratch); - // One slot for the signature: - __ Push(r0); + // Reserve a slot for the signature. + __ Push(r3); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); } @@ -3232,7 +3236,8 @@ void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { __ Move(zero, Smi::zero()); __ StoreU64(zero, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ StoreU64(zero, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ StoreU64(zero, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { @@ -3278,7 +3283,8 @@ void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { JSToWasmWrapperFrameConstants::kWrapperBufferParamStart), r0); - // The first GP parameter is the instance, which we handle specially. + // The first GP parameter holds the trusted instance data or the import data. + // This is handled specially. int stack_params_offset = (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + arraysize(wasm::kFpParamRegisters) * kDoubleSize; @@ -3322,9 +3328,9 @@ void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { } DCHECK_EQ(next_offset, stack_params_offset); - // Load the instance into r6. - __ LoadU64(kWasmInstanceRegister, - MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset), + // Load the instance data (implicit arg) into r6. + __ LoadU64(kWasmImplicitArgRegister, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset), r0); { @@ -3389,10 +3395,11 @@ void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { r4, MemOperand(fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset), r0); - __ LoadU64(r3, MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset), + __ LoadU64(r3, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset), r0); Register scratch = r6; - GetContextFromRef(masm, r3, scratch); + GetContextFromImplicitArg(masm, r3, scratch); __ CallBuiltin(Builtin::kJSToWasmHandleReturns); @@ -3592,6 +3599,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Jump(scratch); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + __ Trap(); +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label out_of_range, only_low, negate, done, fastpath_done; Register result_reg = r3; @@ -3614,17 +3627,10 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { // Do fast-path convert from double to int. __ ConvertDoubleToInt64(double_scratch, -#if !V8_TARGET_ARCH_PPC64 - scratch, -#endif result_reg, d0); // Test for overflow -#if V8_TARGET_ARCH_PPC64 __ TestIfInt32(result_reg, r0); -#else - __ TestIfInt32(scratch, result_reg, r0); -#endif __ beq(&fastpath_done); __ Push(scratch_high, scratch_low); @@ -3690,9 +3696,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1. // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result. __ srawi(r0, scratch_high, 31); -#if V8_TARGET_ARCH_PPC64 __ srdi(r0, r0, Operand(32)); -#endif __ xor_(result_reg, result_reg, r0); __ srwi(r0, scratch_high, Operand(31)); __ add(result_reg, result_reg, r0); @@ -4265,4 +4269,4 @@ void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) { } // namespace internal } // namespace v8 -#endif // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_PPC64 +#endif // V8_TARGET_ARCH_PPC64 diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq index 8866a39c8ab47e..f4e17456651516 100644 --- a/deps/v8/src/builtins/promise-abstract-operations.tq +++ b/deps/v8/src/builtins/promise-abstract-operations.tq @@ -35,7 +35,7 @@ const kResolveString: String = ResolveStringConstant(); extern macro IsPromiseResolveProtectorCellInvalid(): bool; extern macro AllocateRootFunctionWithContext( - constexpr intptr, FunctionContext): JSFunction; + constexpr intptr, FunctionContext, NativeContext): JSFunction; extern macro PromiseReactionMapConstant(): Map; extern macro PromiseFulfillReactionJobTaskMapConstant(): Map; @@ -329,9 +329,9 @@ macro CreatePromiseResolvingFunctions( promise, debugEvent, nativeContext); const resolve: JSFunction = AllocateRootFunctionWithContext( - kPromiseCapabilityDefaultResolveSharedFun, promiseContext); + kPromiseCapabilityDefaultResolveSharedFun, promiseContext, nativeContext); const reject: JSFunction = AllocateRootFunctionWithContext( - kPromiseCapabilityDefaultRejectSharedFun, promiseContext); + kPromiseCapabilityDefaultRejectSharedFun, promiseContext, nativeContext); return PromiseResolvingFunctions{ resolve: resolve, reject: reject, @@ -361,7 +361,8 @@ transitioning macro InnerNewPromiseCapability( const executorContext = CreatePromiseCapabilitiesExecutorContext(nativeContext, capability); const executor = AllocateRootFunctionWithContext( - kPromiseGetCapabilitiesExecutorSharedFun, executorContext); + kPromiseGetCapabilitiesExecutorSharedFun, executorContext, + nativeContext); const promiseConstructor = UnsafeCast(constructor); const promise = Construct(promiseConstructor, executor); @@ -487,6 +488,18 @@ transitioning macro PerformPromiseThenImpl( promise.SetHasHandler(); } +transitioning javascript builtin PerformPromiseThenFunction( + js-implicit context: NativeContext, receiver: JSAny)(onFulfilled: JSAny, + onRejected: JSAny): JSAny { + const jsPromise = Cast(receiver) otherwise unreachable; + const callableOnFulfilled = Cast(onFulfilled) otherwise unreachable; + const callableOnRejected = Cast(onRejected) otherwise unreachable; + + PerformPromiseThenImpl( + jsPromise, callableOnFulfilled, callableOnRejected, Undefined); + return Undefined; +} + // https://tc39.es/ecma262/#sec-performpromisethen transitioning builtin PerformPromiseThen( implicit context: Context)(promise: JSPromise, diff --git a/deps/v8/src/builtins/promise-all.tq b/deps/v8/src/builtins/promise-all.tq index 6a067c6bebb609..83aeb1bc9cbcf4 100644 --- a/deps/v8/src/builtins/promise-all.tq +++ b/deps/v8/src/builtins/promise-all.tq @@ -45,8 +45,9 @@ macro CreatePromiseAllResolveElementFunction( dcheck(index > 0); dcheck(index < kPropertyArrayHashFieldMax); - const resolve = - AllocateRootFunctionWithContext(resolveFunction, resolveElementContext); + const resolve = AllocateRootFunctionWithContext( + resolveFunction, resolveElementContext, + LoadNativeContext(resolveElementContext)); dcheck(kPropertyArrayNoHashSentinel == 0); resolve.properties_or_hash = index; diff --git a/deps/v8/src/builtins/promise-any.tq b/deps/v8/src/builtins/promise-any.tq index 5208e718459954..f4df2bbd67f8d2 100644 --- a/deps/v8/src/builtins/promise-any.tq +++ b/deps/v8/src/builtins/promise-any.tq @@ -59,11 +59,12 @@ const kPromiseAnyRejectElementClosureSharedFun: constexpr intptr macro CreatePromiseAnyRejectElementFunction( implicit context: Context)( rejectElementContext: PromiseAnyRejectElementContext, index: Smi, - _nativeContext: NativeContext): JSFunction { + nativeContext: NativeContext): JSFunction { dcheck(index > 0); dcheck(index < kPropertyArrayHashFieldMax); const reject = AllocateRootFunctionWithContext( - kPromiseAnyRejectElementClosureSharedFun, rejectElementContext); + kPromiseAnyRejectElementClosureSharedFun, rejectElementContext, + nativeContext); dcheck(kPropertyArrayNoHashSentinel == 0); reject.properties_or_hash = index; return reject; diff --git a/deps/v8/src/builtins/promise-finally.tq b/deps/v8/src/builtins/promise-finally.tq index 035e3a1de02b05..bdeebb086bbb67 100644 --- a/deps/v8/src/builtins/promise-finally.tq +++ b/deps/v8/src/builtins/promise-finally.tq @@ -51,7 +51,7 @@ macro CreateThrowerFunction( InitContextSlot( throwerContext, PromiseValueThunkOrReasonContextSlot::kValueSlot, reason); return AllocateRootFunctionWithContext( - kPromiseThrowerFinallySharedFun, throwerContext); + kPromiseThrowerFinallySharedFun, throwerContext, nativeContext); } transitioning javascript builtin PromiseCatchFinally( @@ -98,7 +98,7 @@ macro CreateValueThunkFunction( valueThunkContext, PromiseValueThunkOrReasonContextSlot::kValueSlot, value); return AllocateRootFunctionWithContext( - kPromiseValueThunkFinallySharedFun, valueThunkContext); + kPromiseValueThunkFinallySharedFun, valueThunkContext, nativeContext); } transitioning javascript builtin PromiseThenFinally( @@ -152,9 +152,9 @@ macro CreatePromiseFinallyFunctions( InitContextSlot( promiseContext, PromiseFinallyContextSlot::kConstructorSlot, constructor); const thenFinally = AllocateRootFunctionWithContext( - kPromiseThenFinallySharedFun, promiseContext); + kPromiseThenFinallySharedFun, promiseContext, nativeContext); const catchFinally = AllocateRootFunctionWithContext( - kPromiseCatchFinallySharedFun, promiseContext); + kPromiseCatchFinallySharedFun, promiseContext, nativeContext); return PromiseFinallyFunctions{ then_finally: thenFinally, catch_finally: catchFinally diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq index 8accbc33c6872b..1f90230bcafe42 100644 --- a/deps/v8/src/builtins/promise-misc.tq +++ b/deps/v8/src/builtins/promise-misc.tq @@ -36,6 +36,9 @@ extern macro SetContinuationPreservedEmbedderData(Object): void; } namespace promise { +const kInvalidAsyncTaskId: + constexpr uint32 generates 'JSPromise::kInvalidAsyncTaskId'; + extern macro IsFunctionWithPrototypeSlotMap(Map): bool; @export @@ -50,7 +53,7 @@ macro PromiseInit(promise: JSPromise): void { status: PromiseState::kPending, has_handler: false, is_silent: false, - async_task_id: 0 + async_task_id: kInvalidAsyncTaskId }); promise_internal::ZeroOutEmbedderOffsets(promise); } @@ -69,7 +72,7 @@ macro InnerNewJSPromise(implicit context: Context)(): JSPromise { status: PromiseState::kPending, has_handler: false, is_silent: false, - async_task_id: 0 + async_task_id: kInvalidAsyncTaskId }); return promise; } diff --git a/deps/v8/src/builtins/riscv/builtins-riscv.cc b/deps/v8/src/builtins/riscv/builtins-riscv.cc index 6d7bf8eb1d08f4..e55e97ceb1c7a9 100644 --- a/deps/v8/src/builtins/riscv/builtins-riscv.cc +++ b/deps/v8/src/builtins/riscv/builtins-riscv.cc @@ -1341,11 +1341,17 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing); +#ifndef V8_ENABLE_LEAPTIERING + // TODO(olivf, 42204201): This fastcase is difficult to support with the + // sandbox as it requires getting write access to the dispatch table. See + // `JSFunction::UpdateCode`. We might want to remove it for all + // configurations as it does not seem to be performance sensitive. // Load the baseline code into the closure. __ Move(a2, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(a2, closure); __ JumpCodeObject(a2, kJSEntrypointTag); +#endif // V8_ENABLE_LEAPTIERING __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -1940,8 +1946,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code, Operand(Smi::zero())); } @@ -2846,8 +2853,8 @@ constexpr RegList kSavedGpRegs = ([]() constexpr { saved_gp_regs.set(gp_param_reg); } - // The instance has already been stored in the fixed part of the frame. - saved_gp_regs.clear(kWasmInstanceRegister); + // The instance data has already been stored in the fixed part of the frame. + saved_gp_regs.clear(kWasmImplicitArgRegister); // All set registers were unique. CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) - 1); CHECK_EQ(WasmLiftoffSetupFrameConstants::kNumberOfSavedGpParamRegs, @@ -2872,16 +2879,16 @@ constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { // When entering this builtin, we have just created a Wasm stack frame: // -// [ Wasm instance ] <-- sp -// [ WASM frame marker ] -// [ saved fp ] <-- fp +// [ Wasm instance data ] <-- sp +// [ WASM frame marker ] +// [ saved fp ] <-- fp // // Add the feedback vector to the stack. // -// [ feedback vector ] <-- sp -// [ Wasm instance ] -// [ WASM frame marker ] -// [ saved fp ] <-- fp +// [ feedback vector ] <-- sp +// [ Wasm instance data ] +// [ WASM frame marker ] +// [ saved fp ] <-- fp void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register func_index = wasm::kLiftoffFrameSetupFunctionReg; Register vector = t1; @@ -2889,7 +2896,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Label allocate_vector, done; __ LoadTaggedField( - vector, FieldMemOperand(kWasmInstanceRegister, + vector, FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset)); __ CalcScaledAddress(vector, vector, func_index, kTaggedSizeLog2); __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); @@ -2910,10 +2917,10 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ MultiPushFPU(kSavedFpRegs); __ Push(ra); - // Arguments to the runtime function: instance, func_index, and an + // Arguments to the runtime function: instance data, func_index, and an // additional stack slot for the NativeModule. __ SmiTag(func_index); - __ Push(kWasmInstanceRegister, func_index, zero_reg); + __ Push(kWasmImplicitArgRegister, func_index, zero_reg); __ Move(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmAllocateFeedbackVector, 3); __ mv(vector, kReturnRegister0); @@ -2922,8 +2929,8 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Pop(ra); __ MultiPopFPU(kSavedFpRegs); __ MultiPop(kSavedGpRegs); - __ LoadWord(kWasmInstanceRegister, - MemOperand(fp, WasmFrameConstants::kWasmInstanceOffset)); + __ LoadWord(kWasmImplicitArgRegister, + MemOperand(fp, WasmFrameConstants::kWasmInstanceDataOffset)); __ li(scratch, StackFrame::TypeToMarker(StackFrame::WASM)); __ StoreWord(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); __ Branch(&done); @@ -2939,11 +2946,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::INTERNAL); // Save registers that we need to keep alive across the runtime call. - __ Push(kWasmInstanceRegister); + __ Push(kWasmImplicitArgRegister); __ MultiPush(kSavedGpRegs); __ MultiPushFPU(kSavedFpRegs); - __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); + __ Push(kWasmImplicitArgRegister, kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ Move(kContextRegister, Smi::zero()); @@ -2956,13 +2963,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Restore registers. __ MultiPopFPU(kSavedFpRegs); __ MultiPop(kSavedGpRegs); - __ Pop(kWasmInstanceRegister); + __ Pop(kWasmImplicitArgRegister); } // The runtime function returned the jump table slot offset as a Smi (now in // x17). Use that to compute the jump target. __ LoadWord(kScratchReg, - FieldMemOperand(kWasmInstanceRegister, + FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kJumpTableStartOffset)); __ AddWord(s1, s1, Operand(kScratchReg)); // Finally, jump to the entrypoint. @@ -3225,6 +3232,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Jump(scratch); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + __ Trap(); +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label done; Register result_reg = t0; @@ -3364,9 +3377,8 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { __ StoreWord(wasm::kGpParamRegisters[i], MemOperand(sp, (i - 1) * kSystemPointerSize)); } - // Decrement the stack to allocate a stack slot. The signature gets written - // into the slot in Torque. - __ Push(zero_reg, zero_reg, zero_reg); + // Reserve a slot for the signature. + __ Push(zero_reg); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); } @@ -3539,7 +3551,8 @@ void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { ASM_CODE_COMMENT(masm); __ StoreWord(zero_reg, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ StoreWord(zero_reg, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ StoreWord(zero_reg, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation, @@ -3913,9 +3926,10 @@ void SwitchToAllocatedStack(MacroAssembler* masm, Register wasm_instance, } // Loads the context field of the WasmTrustedInstanceData or WasmImportData -// depending on the ref's type, and places the result in the input register. -void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { - __ LoadTaggedField(scratch, FieldMemOperand(ref, HeapObject::kMapOffset)); +// depending on the data's type, and places the result in the input register. +void GetContextFromImplicitArg(MacroAssembler* masm, Register data, + Register scratch) { + __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset)); Label instance; Label end; __ GetInstanceTypeRange(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE, @@ -3923,11 +3937,12 @@ void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { // __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE); __ Branch(&instance, eq, scratch, Operand(zero_reg)); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmImportData::kNativeContextOffset)); + data, FieldMemOperand(data, WasmImportData::kNativeContextOffset)); __ Branch(&end); __ bind(&instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); + data, + FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset)); __ bind(&end); } @@ -3947,8 +3962,8 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, Label* return_promise) { promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ LoadWord(kContextRegister, - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); - GetContextFromRef(masm, kContextRegister, tmp); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp, tmp2); @@ -3990,10 +4005,10 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm, promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ LoadWord(kContextRegister, - MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); Register tmp = kScratchReg; Register tmp2 = kScratchReg2; - GetContextFromRef(masm, kContextRegister, tmp); + GetContextFromImplicitArg(masm, kContextRegister, tmp); ReloadParentContinuation(masm, promise, reason, kContextRegister, tmp, tmp2); RestoreParentSuspender(masm, tmp, tmp2); @@ -4021,9 +4036,11 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { sp, sp, Operand(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize)); - Register ref = kWasmInstanceRegister; - __ LoadWord(ref, - MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + // Load the implicit argument (instance data or import data) from the frame. + Register implicit_arg = kWasmImplicitArgRegister; + __ LoadWord( + implicit_arg, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); Register wrapper_buffer = WasmJSToWasmWrapperDescriptor::WrapperBufferRegister(); @@ -4031,7 +4048,7 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Register original_fp = kScratchReg; Register new_wrapper_buffer = kScratchReg2; if (stack_switch) { - SwitchToAllocatedStack(masm, ref, wrapper_buffer, original_fp, + SwitchToAllocatedStack(masm, implicit_arg, wrapper_buffer, original_fp, new_wrapper_buffer, &suspend); } else { original_fp = fp; @@ -4043,7 +4060,9 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { new_wrapper_buffer, MemOperand(fp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset)); if (stack_switch) { - __ StoreWord(ref, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ StoreWord( + implicit_arg, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ LoadWord( @@ -4083,7 +4102,8 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { original_fp = no_reg; new_wrapper_buffer = no_reg; - // The first GP parameter is the instance, which we handle specially. + // The first GP parameter holds the trusted instance data or the import data. + // This is handled specially. int stack_params_offset = (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + arraysize(wasm::kFpParamRegisters) * kDoubleSize; @@ -4204,17 +4224,18 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { if (stack_switch) { __ LoadWord(a1, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ LoadWord(a0, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ LoadWord(a0, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } else { __ LoadWord( a1, MemOperand(fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); - __ LoadWord(a0, - MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + __ LoadWord( + a0, MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); } { UseScratchRegisterScope temps(masm); - GetContextFromRef(masm, a0, temps.Acquire()); + GetContextFromImplicitArg(masm, a0, temps.Acquire()); } __ CallBuiltin(Builtin::kJSToWasmHandleReturns); diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 996f0e4c55d535..80f05bb133b414 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -298,8 +298,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ CmpSmiLiteral(maybe_target_code, Smi::zero(), r0); __ bne(&jump_to_optimized_code); } @@ -1664,12 +1665,20 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing); +#ifndef V8_ENABLE_LEAPTIERING + // TODO(olivf, 42204201): This fastcase is difficult to support with the + // sandbox as it requires getting write access to the dispatch table. See + // `JSFunction::UpdateCode`. We might want to remove it for all + // configurations as it does not seem to be performance sensitive. + // Load the baseline code into the closure. __ mov(r4, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(r4, closure, ip, r1); __ JumpCodeObject(r4); +#endif // V8_ENABLE_LEAPTIERING + __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); } @@ -3063,14 +3072,14 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Label allocate_vector, done; __ LoadTaggedField( - vector, FieldMemOperand(kWasmInstanceRegister, + vector, FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset)); __ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2)); __ AddS64(vector, vector, scratch); __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); - __ push(kWasmInstanceRegister); + __ push(kWasmImplicitArgRegister); __ push(vector); __ Ret(); @@ -3087,8 +3096,8 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ push(r14); { SaveWasmParamsScope save_params(masm); - // Arguments to the runtime function: instance, func_index. - __ push(kWasmInstanceRegister); + // Arguments to the runtime function: instance data, func_index. + __ push(kWasmImplicitArgRegister); __ SmiTag(func_index); __ push(func_index); // Allocate a stack slot where the runtime function can spill a pointer @@ -3118,8 +3127,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { { SaveWasmParamsScope save_params(masm); - // Push the Wasm instance as an explicit argument to the runtime function. - __ push(kWasmInstanceRegister); + // Push the instance data as an explicit argument to the runtime function. + __ push(kWasmImplicitArgRegister); // Push the function index as second argument. __ push(kWasmCompileLazyFuncIndexRegister); // Initialize the JavaScript context with 0. CEntry will use it to @@ -3134,10 +3143,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Saved parameters are restored at the end of this block. } - // After the instance register has been restored, we can add the jump table - // start to the jump table offset already stored in r8. + // After the instance data register has been restored, we can add the jump + // table start to the jump table offset already stored in r8. __ LoadU64(r0, - FieldMemOperand(kWasmInstanceRegister, + FieldMemOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kJumpTableStartOffset)); __ AddS64(ip, ip, r0); } @@ -3173,19 +3182,21 @@ void Builtins::Generate_WasmReturnPromiseOnSuspendAsm(MacroAssembler* masm) { } // Loads the context field of the WasmTrustedInstanceData or WasmImportData -// depending on the ref's type, and places the result in the input register. -void GetContextFromRef(MacroAssembler* masm, Register ref, Register scratch) { - __ LoadTaggedField(scratch, FieldMemOperand(ref, HeapObject::kMapOffset)); +// depending on the data's type, and places the result in the input register. +void GetContextFromImplicitArg(MacroAssembler* masm, Register data, + Register scratch) { + __ LoadTaggedField(scratch, FieldMemOperand(data, HeapObject::kMapOffset)); __ CompareInstanceType(scratch, scratch, WASM_TRUSTED_INSTANCE_DATA_TYPE); Label instance; Label end; __ beq(&instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmImportData::kNativeContextOffset)); + data, FieldMemOperand(data, WasmImportData::kNativeContextOffset)); __ jmp(&end); __ bind(&instance); __ LoadTaggedField( - ref, FieldMemOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); + data, + FieldMemOperand(data, WasmTrustedInstanceData::kNativeContextOffset)); __ bind(&end); } @@ -3205,15 +3216,8 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { gp_regs.set(wasm::kGpParamRegisters[i]); } __ MultiPush(gp_regs); - // Reserve fixed slots for the CSA wrapper. - // Two slots for stack-switching (central stack pointer and secondary stack - // limit): - Register scratch = r3; - __ mov(scratch, Operand::Zero()); - __ Push(scratch); - __ Push(scratch); - // One slot for the signature: - __ Push(r0); + // Reserve a slot for the signature. + __ Push(r2); __ TailCallBuiltin(Builtin::kWasmToJsWrapperCSA); } @@ -3246,7 +3250,8 @@ void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { __ Move(zero, Smi::zero()); __ StoreU64(zero, MemOperand(fp, StackSwitchFrameConstants::kResultArrayOffset)); - __ StoreU64(zero, MemOperand(fp, StackSwitchFrameConstants::kRefOffset)); + __ StoreU64(zero, + MemOperand(fp, StackSwitchFrameConstants::kImplicitArgOffset)); } void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { @@ -3289,7 +3294,8 @@ void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { params_start, MemOperand(wrapper_buffer, JSToWasmWrapperFrameConstants::kWrapperBufferParamStart)); - // The first GP parameter is the instance, which we handle specially. + // The first GP parameter holds the trusted instance data or the import data. + // This is handled specially. int stack_params_offset = (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + arraysize(wasm::kFpParamRegisters) * kDoubleSize; @@ -3333,9 +3339,9 @@ void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { } DCHECK_EQ(next_offset, stack_params_offset); - // Load the instance into r5. - __ LoadU64(kWasmInstanceRegister, - MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + // Load the implicit argument into r5. + __ LoadU64(kWasmImplicitArgRegister, + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); { Register thread_in_wasm_flag_addr = r3; @@ -3397,9 +3403,9 @@ void Builtins::Generate_JSToWasmWrapperAsm(MacroAssembler* masm) { r3, MemOperand(fp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); __ LoadU64(r2, - MemOperand(fp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + MemOperand(fp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); Register scratch = r5; - GetContextFromRef(masm, r2, scratch); + GetContextFromImplicitArg(masm, r2, scratch); __ CallBuiltin(Builtin::kJSToWasmHandleReturns); @@ -3639,6 +3645,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Jump(scratch); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + __ Trap(); +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label out_of_range, only_low, negate, done, fastpath_done; Register result_reg = r2; diff --git a/deps/v8/src/builtins/set-difference.tq b/deps/v8/src/builtins/set-difference.tq index e0df3b8b69ed15..3363c620170018 100644 --- a/deps/v8/src/builtins/set-difference.tq +++ b/deps/v8/src/builtins/set-difference.tq @@ -85,8 +85,7 @@ transitioning javascript builtin SetPrototypeDifference( } } label SlowPath { // 6. If thisSize ≤ otherRec.[[Size]], then - if (otherRec.size == V8_INFINITY || - thisSize <= Convert(otherRec.size)) { + if (Convert(thisSize) <= otherRec.size) { // a. Let index be 0. let thisIter = collections::NewOrderedHashSetIterator(table.GetTable()); diff --git a/deps/v8/src/builtins/set-intersection.tq b/deps/v8/src/builtins/set-intersection.tq index 017d72d3e6280d..ddf5d8908d459b 100644 --- a/deps/v8/src/builtins/set-intersection.tq +++ b/deps/v8/src/builtins/set-intersection.tq @@ -81,8 +81,7 @@ transitioning javascript builtin SetPrototypeIntersection( } } label SlowPath { // 6. If thisSize ≤ otherRec.[[Size]], then - if (otherRec.size == V8_INFINITY || - thisSize <= Convert(otherRec.size)) { + if (Convert(thisSize) <= otherRec.size) { // a. Let index be 0. let thisIter = collections::NewOrderedHashSetIterator(table.GetTable()); diff --git a/deps/v8/src/builtins/set-is-disjoint-from.tq b/deps/v8/src/builtins/set-is-disjoint-from.tq index 62936ea0b741dd..8b45eb0be27867 100644 --- a/deps/v8/src/builtins/set-is-disjoint-from.tq +++ b/deps/v8/src/builtins/set-is-disjoint-from.tq @@ -73,8 +73,7 @@ transitioning javascript builtin SetPrototypeIsDisjointFrom( } } label SlowPath { // 5. If thisSize ≤ otherRec.[[Size]], then - if (otherRec.size == V8_INFINITY || - thisSize <= Convert(otherRec.size)) { + if (Convert(thisSize) <= otherRec.size) { // a. Let index be 0. let thisIter = collections::NewOrderedHashSetIterator(table.GetTable()); diff --git a/deps/v8/src/builtins/set-is-subset-of.tq b/deps/v8/src/builtins/set-is-subset-of.tq index b7096ca823f679..4135fcff3ea470 100644 --- a/deps/v8/src/builtins/set-is-subset-of.tq +++ b/deps/v8/src/builtins/set-is-subset-of.tq @@ -25,8 +25,7 @@ transitioning javascript builtin SetPrototypeIsSubsetOf( const thisSize = table.LoadSize(); // 5. If thisSize > otherRec.[[Size]], return false. - if (!(otherRec.size == V8_INFINITY) && - thisSize > Convert(otherRec.size)) { + if (Convert(thisSize) > otherRec.size) { return False; } diff --git a/deps/v8/src/builtins/set-is-superset-of.tq b/deps/v8/src/builtins/set-is-superset-of.tq index a95aea1496cb02..27d4f6494dc6ba 100644 --- a/deps/v8/src/builtins/set-is-superset-of.tq +++ b/deps/v8/src/builtins/set-is-superset-of.tq @@ -26,8 +26,7 @@ transitioning javascript builtin SetPrototypeIsSupersetOf( const thisSize = table.LoadSize(); // 5. If thisSize < otherRec.[[Size]], return false. - if (otherRec.size == V8_INFINITY || - thisSize < Convert(otherRec.size)) { + if (Convert(thisSize) < otherRec.size) { return False; } diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index 7c870a7e4c2768..60dcd4de458509 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -14,6 +14,7 @@ #include "src/common/globals.h" #include "src/compiler/code-assembler.h" #include "src/compiler/pipeline.h" +#include "src/compiler/turboshaft/builtin-compiler.h" #include "src/compiler/turboshaft/phase.h" #include "src/execution/isolate.h" #include "src/handles/handles-inl.h" @@ -93,9 +94,6 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, Builtin builtin) { using MacroAssemblerGenerator = void (*)(MacroAssembler*); using CodeAssemblerGenerator = void (*)(compiler::CodeAssemblerState*); -using TurboshaftAssemblerGenerator = - void (*)(compiler::turboshaft::PipelineData*, Isolate*, - compiler::turboshaft::Graph&, Zone*); Handle BuildPlaceholder(Isolate* isolate, Builtin builtin) { HandleScope scope(isolate); @@ -196,50 +194,20 @@ Tagged BuildAdaptor(Isolate* isolate, Builtin builtin, return *code; } -inline constexpr char kTempZoneName[] = "temp-zone"; -inline constexpr char kBuiltinCompilationZoneName[] = - "builtin-compilation-zone"; - -Tagged BuildWithTurboshaftAssemblerImpl( - Isolate* isolate, Builtin builtin, TurboshaftAssemblerGenerator generator, - std::function call_descriptor_builder, - const char* name) { - HandleScope scope(isolate); - using namespace compiler::turboshaft; // NOLINT(build/namespaces) - - compiler::ZoneStats zone_stats(isolate->allocator()); - ZoneWithName zone(&zone_stats, - kBuiltinCompilationZoneName); - OptimizedCompilationInfo info(base::CStrVector(name), zone, CodeKind::BUILTIN, - builtin); - compiler::CallDescriptor* call_descriptor = call_descriptor_builder(zone); - - PipelineData data(&zone_stats, TurboshaftPipelineKind::kTSABuiltin, isolate, - &info, BuiltinAssemblerOptions(isolate, builtin)); - data.InitializeBuiltinComponent(call_descriptor); - data.InitializeGraphComponent(nullptr); - ZoneWithName temp_zone(&zone_stats, kTempZoneName); - generator(&data, isolate, data.graph(), temp_zone); - - DirectHandle code = - compiler::Pipeline::GenerateCodeForTurboshaftBuiltin( - &data, call_descriptor, builtin, name, - ProfileDataFromFile::TryRead(name)) - .ToHandleChecked(); - return *code; -} - // Builder for builtins implemented in Turboshaft with JS linkage. V8_NOINLINE Tagged BuildWithTurboshaftAssemblerJS( - Isolate* isolate, Builtin builtin, TurboshaftAssemblerGenerator generator, - int argc, const char* name) { - return BuildWithTurboshaftAssemblerImpl( + Isolate* isolate, Builtin builtin, + compiler::turboshaft::TurboshaftAssemblerGenerator generator, int argc, + const char* name) { + HandleScope scope(isolate); + Handle code = compiler::turboshaft::BuildWithTurboshaftAssemblerImpl( isolate, builtin, generator, [argc](Zone* zone) { return compiler::Linkage::GetJSCallDescriptor( zone, false, argc, compiler::CallDescriptor::kCanUseRoots); }, - name); + name, BuiltinAssemblerOptions(isolate, builtin)); + return *code; } // Builder for builtins implemented in TurboFan with JS linkage. @@ -263,9 +231,11 @@ V8_NOINLINE Tagged BuildWithCodeStubAssemblerJS( // Builder for builtins implemented in Turboshaft with CallStub linkage. V8_NOINLINE Tagged BuildWithTurboshaftAssemblerCS( - Isolate* isolate, Builtin builtin, TurboshaftAssemblerGenerator generator, + Isolate* isolate, Builtin builtin, + compiler::turboshaft::TurboshaftAssemblerGenerator generator, CallDescriptors::Key interface_descriptor, const char* name) { - return BuildWithTurboshaftAssemblerImpl( + HandleScope scope(isolate); + Handle code = compiler::turboshaft::BuildWithTurboshaftAssemblerImpl( isolate, builtin, generator, [interface_descriptor](Zone* zone) { CallInterfaceDescriptor descriptor(interface_descriptor); @@ -275,7 +245,8 @@ V8_NOINLINE Tagged BuildWithTurboshaftAssemblerCS( compiler::CallDescriptor::kNoFlags, compiler::Operator::kNoProperties); }, - name); + name, BuiltinAssemblerOptions(isolate, builtin)); + return *code; } // Builder for builtins implemented in TurboFan with CallStub linkage. diff --git a/deps/v8/src/builtins/string-trim.tq b/deps/v8/src/builtins/string-trim.tq index 0f528b23a8dc93..8f4b0ddda4d50e 100644 --- a/deps/v8/src/builtins/string-trim.tq +++ b/deps/v8/src/builtins/string-trim.tq @@ -14,7 +14,7 @@ extern enum TrimMode extends uint31 constexpr 'String::TrimMode' { @export macro IsWhiteSpaceOrLineTerminator(charCode: char16|char8): bool { - // 0x0020 - SPACE (Intentionally out of order to fast path a commmon case) + // 0x0020 - SPACE (Intentionally out of order to fast path a common case) if (charCode == 0x0020) { return true; } diff --git a/deps/v8/src/builtins/wasm-to-js.tq b/deps/v8/src/builtins/wasm-to-js.tq index 060ebae501ca38..8ae5ddd617c91f 100644 --- a/deps/v8/src/builtins/wasm-to-js.tq +++ b/deps/v8/src/builtins/wasm-to-js.tq @@ -17,9 +17,6 @@ struct WasmToJSResult { result3: float64; } -extern macro SwitchToTheCentralStackIfNeeded(): RawPtr; -extern macro SwitchFromTheCentralStack(RawPtr): void; - extern builtin IterableToFixedArrayForWasm(Context, JSAny, Smi): FixedArray; extern macro StackAlignmentInBytes(): intptr; @@ -51,28 +48,28 @@ macro HandleF32Returns( } @export -transitioning macro WasmToJSWrapper(ref: WasmImportData): WasmToJSResult { +transitioning macro WasmToJSWrapper(data: WasmImportData): WasmToJSResult { const oldSP = SwitchToTheCentralStackIfNeeded(); - dcheck(Is(ref)); + dcheck(Is(data)); // Spill the signature on the stack so that it can be read by the GC. This is // done in the very beginning before a GC could be triggered. // Caller FP + return address. const sigSlot = LoadFramePointer() + kSignatureOffset; - *GetRefAt(sigSlot, 0) = BitcastTaggedToWord(ref.sig); + *GetRefAt(sigSlot, 0) = BitcastTaggedToWord(data.sig); const alignment: intptr = StackAlignmentInBytes() / torque_internal::SizeOf(); - // 3 fixed slots, rounded up to stack alignment. - const numFixedSlots = ((2 + alignment) / alignment) * alignment; + // 1 fixed slot, rounded up to stack alignment. + const numFixedSlots = alignment; ModifyThreadInWasmFlag(0); // Trigger a wrapper tier-up when this function got called often enough. - dcheck(ref.wrapper_budget > 0); - ref.wrapper_budget = ref.wrapper_budget - 1; - if (ref.wrapper_budget == 0) { - runtime::TierUpWasmToJSWrapper(kNoContext, ref); + dcheck(data.wrapper_budget > 0); + data.wrapper_budget = data.wrapper_budget - 1; + if (data.wrapper_budget == 0) { + runtime::TierUpWasmToJSWrapper(kNoContext, data); } - const signaturePod = &ref.sig.bytes; + const signaturePod = &data.sig.bytes; const serializedSig = torque_internal::unsafe::NewConstSlice( signaturePod.object, signaturePod.offset, signaturePod.length / torque_internal::SizeOf()); @@ -175,14 +172,14 @@ transitioning macro WasmToJSWrapper(ref: WasmImportData): WasmToJSResult { const rawRef = *slot; const value = BitcastWordToTagged(rawRef); outParams.objects[nextIndex] = - WasmToJSObject(ref.native_context, value, paramType); + WasmToJSObject(data.native_context, value, paramType); } nextIndex++; } } - const target = ref.callable; + const target = data.callable; - const context = ref.native_context; + const context = data.native_context; // Reset the signature on the stack, so that incoming parameters don't get // scanned anymore. *GetRefAt(sigSlot, 0) = 0; @@ -262,9 +259,9 @@ transitioning macro WasmToJSWrapper(ref: WasmImportData): WasmToJSResult { } else { const retKind = retType & kValueTypeKindBitsMask; dcheck(retKind == ValueKind::kRef || retKind == ValueKind::kRefNull); - const trustedData = TaggedIsSmi(ref.instance_data) ? + const trustedData = TaggedIsSmi(data.instance_data) ? Undefined : - UnsafeCast(ref.instance_data); + UnsafeCast(data.instance_data); const converted = JSToWasmObject(context, trustedData, retType, retVal); if (returnCount == 1) { // There are no other values, we can write the object directly into the @@ -313,9 +310,7 @@ transitioning macro WasmToJSWrapper(ref: WasmImportData): WasmToJSResult { result2: *GetRefAt(fpRegSlots, 0), result3: *GetRefAt(fpRegSlots, torque_internal::SizeOf()) }; - if (Convert(oldSP) != 0) { - SwitchFromTheCentralStack(oldSP); - } + SwitchFromTheCentralStack(oldSP); return wasmToJSResult; } } // namespace wasm diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index 47a3893a8fdcf1..583e41c658b24d 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -32,7 +32,7 @@ extern runtime WasmThrowDataViewDetachedError(Context, Smi): never; extern runtime WasmThrow(Context, Object, FixedArray): JSAny; extern runtime WasmReThrow(Context, Object): JSAny; extern runtime WasmTriggerTierUp(Context, WasmTrustedInstanceData): JSAny; -extern runtime WasmStackGuard(Context): JSAny; +extern runtime WasmStackGuard(Context, Smi): JSAny; extern runtime ThrowWasmStackOverflow(Context): JSAny; extern runtime WasmTraceMemory(Context, Smi): JSAny; extern runtime WasmTraceEnter(Context): JSAny; @@ -107,6 +107,8 @@ const kAnyType: constexpr int31 generates 'wasm::kWasmAnyRef.raw_bit_field()'; const kMaxPolymorphism: constexpr int31 generates 'wasm::kMaxPolymorphism'; +const kFixedFrameSizeAboveFp: constexpr int32 + generates 'CommonFrameConstants::kFixedFrameSizeAboveFp'; extern macro WasmBuiltinsAssembler::LoadTrustedDataFromInstance( WasmInstanceObject): WasmTrustedInstanceData; @@ -135,6 +137,9 @@ extern macro WasmBuiltinsAssembler::StringToFloat64(String): float64; extern macro WasmBuiltinsAssembler::SignatureCheckFail( WasmInternalFunction, uintptr): Smi; +extern macro SwitchToTheCentralStackIfNeeded(): RawPtr; +extern macro SwitchFromTheCentralStack(RawPtr): void; + macro LoadContextFromFrame(): NativeContext { return LoadContextFromInstanceData(LoadInstanceDataFromFrame()); } @@ -449,8 +454,18 @@ builtin WasmTriggerTierUp(): JSAny { tail runtime::WasmTriggerTierUp(LoadContextFromFrame(), trustedData); } +extern builtin WasmHandleStackOverflow(RawPtr, uint32): JSAny; + +// {paramSlotsSize} is the size of the incoming stack parameters of the +// currently-executing function, which have to be copied along with its +// stack frame if the stack needs to be grown. +builtin WasmGrowableStackGuard(paramSlotsSize: intptr): JSAny { + tail WasmHandleStackOverflow( + LoadParentFramePointer() + paramSlotsSize + kFixedFrameSizeAboveFp, 0); +} + builtin WasmStackGuard(): JSAny { - tail runtime::WasmStackGuard(LoadContextFromFrame()); + tail runtime::WasmStackGuard(LoadContextFromFrame(), SmiConstant(0)); } builtin WasmStackOverflow(): JSAny { diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index b939da172d4467..6e7240d3c1ecdd 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -842,6 +842,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Resume (Ignition/TurboFan) generator object. { __ PushReturnAddressFrom(rax); + // TODO(40931165): use parameter count from JSDispatchTable and validate + // that it matches the number of values in the JSGeneratorObject. __ LoadTaggedField( rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movzxwq(rax, FieldOperand( @@ -1253,6 +1255,12 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing( feedback_vector, CodeKind::BASELINE, &flags_need_processing); +#ifndef V8_ENABLE_LEAPTIERING + // TODO(olivf, 42204201): This fastcase is difficult to support with the + // sandbox as it requires getting write access to the dispatch table. See + // `JSFunction::UpdateCode`. We might want to remove it for all + // configurations as it does not seem to be performance sensitive. + // Load the baseline code into the closure. __ Move(rcx, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); @@ -1260,6 +1268,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( rcx, closure, kInterpreterBytecodeArrayRegister, WriteBarrierDescriptor::SlotAddressRegister()); __ JumpCodeObject(rcx, kJSEntrypointTag); +#endif // V8_ENABLE_LEAPTIERING __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -2530,9 +2539,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- rsi : the function context. // ----------------------------------- +#ifdef V8_ENABLE_LEAPTIERING + __ InvokeFunctionCode(rdi, no_reg, rax, InvokeType::kJump); +#else __ movzxwq( rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump); +#endif } namespace { @@ -2839,8 +2852,9 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, Label jump_to_optimized_code; { // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a InstructionStream object, - // it must NOT be marked_for_deoptimization (callers must ensure this). + // precondition here is: if maybe_target_code is an InstructionStream + // object, it must NOT be marked_for_deoptimization (callers must ensure + // this). __ testq(maybe_target_code, maybe_target_code); __ j(not_equal, &jump_to_optimized_code, Label::kNear); } @@ -3009,11 +3023,11 @@ void RestoreWasmParams(MacroAssembler* masm, int offset) { // When this builtin is called, the topmost stack entry is the calling pc. // This is replaced with the following: // -// [ calling pc ] <-- rsp; popped by {ret}. -// [ feedback vector ] -// [ Wasm instance ] -// [ WASM frame marker ] -// [ saved rbp ] <-- rbp; this is where "calling pc" used to be. +// [ calling pc ] <-- rsp; popped by {ret}. +// [ feedback vector ] +// [ Wasm instance data ] +// [ WASM frame marker ] +// [ saved rbp ] <-- rbp; this is where "calling pc" used to be. void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register func_index = wasm::kLiftoffFrameSetupFunctionReg; Register vector = r15; @@ -3024,14 +3038,14 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Move(rbp, rsp); __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM))); __ LoadTaggedField( - vector, FieldOperand(kWasmInstanceRegister, + vector, FieldOperand(kWasmImplicitArgRegister, WasmTrustedInstanceData::kFeedbackVectorsOffset)); __ LoadTaggedField(vector, FieldOperand(vector, func_index, times_tagged_size, FixedArray::kHeaderSize)); Label allocate_vector, done; __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); - __ Push(kWasmInstanceRegister); + __ Push(kWasmImplicitArgRegister); __ Push(vector); __ Push(calling_pc); __ ret(0); @@ -3044,7 +3058,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { // // [ reserved slot for NativeModule ] <-- arg[2] // [ ("declared") function index ] <-- arg[1] for runtime func. - // [ Wasm instance ] <-- arg[0] + // [ Wasm instance data ] <-- arg[0] // [ ...spilled Wasm parameters... ] // [ calling pc ] // [ WASM_LIFTOFF_SETUP marker ] @@ -3055,8 +3069,8 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Push(calling_pc); int offset = SaveWasmParams(masm); - // Arguments to the runtime function: instance, func_index. - __ Push(kWasmInstanceRegister); + // Arguments to the runtime function: instance data, func_index. + __ Push(kWasmImplicitArgRegister); __ SmiTag(func_index); __ Push(func_index); // Allocate a stack slot where the runtime function can spill a pointer @@ -3087,7 +3101,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { int offset = SaveWasmParams(masm); // Push arguments for the runtime function. - __ Push(kWasmInstanceRegister); + __ Push(kWasmImplicitArgRegister); __ Push(r15); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. @@ -3099,10 +3113,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ movq(r15, kReturnRegister0); RestoreWasmParams(masm, offset); - // After the instance register has been restored, we can add the jump table - // start to the jump table offset already stored in r15. + // After the instance data register has been restored, we can add the jump + // table start to the jump table offset already stored in r15. __ addq(r15, - MemOperand(kWasmInstanceRegister, + MemOperand(kWasmImplicitArgRegister, wasm::ObjectAccess::ToTagged( WasmTrustedInstanceData::kJumpTableStartOffset))); } @@ -3282,20 +3296,20 @@ void ReloadParentContinuation(MacroAssembler* masm, Register promise, } // Loads the context field of the WasmTrustedInstanceData or WasmImportData -// depending on the ref's type, and places the result in the input register. -void GetContextFromRef(MacroAssembler* masm, Register ref) { +// depending on the data's type, and places the result in the input register. +void GetContextFromImplicitArg(MacroAssembler* masm, Register data) { __ LoadTaggedField(kScratchRegister, - FieldOperand(ref, HeapObject::kMapOffset)); + FieldOperand(data, HeapObject::kMapOffset)); __ CmpInstanceType(kScratchRegister, WASM_TRUSTED_INSTANCE_DATA_TYPE); Label instance; Label end; __ j(equal, &instance); - __ LoadTaggedField(ref, - FieldOperand(ref, WasmImportData::kNativeContextOffset)); + __ LoadTaggedField(data, + FieldOperand(data, WasmImportData::kNativeContextOffset)); __ jmp(&end); __ bind(&instance); __ LoadTaggedField( - ref, FieldOperand(ref, WasmTrustedInstanceData::kNativeContextOffset)); + data, FieldOperand(data, WasmTrustedInstanceData::kNativeContextOffset)); __ bind(&end); } @@ -3331,7 +3345,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, void ResetStackSwitchFrameStackSlots(MacroAssembler* masm) { __ Move(kScratchRegister, Smi::zero()); - __ movq(MemOperand(rbp, StackSwitchFrameConstants::kRefOffset), + __ movq(MemOperand(rbp, StackSwitchFrameConstants::kImplicitArgOffset), kScratchRegister); __ movq(MemOperand(rbp, StackSwitchFrameConstants::kResultArrayOffset), kScratchRegister); @@ -3348,7 +3362,7 @@ void SwitchToAllocatedStack(MacroAssembler* masm, Register wasm_instance, parent_continuation, FieldOperand(parent_continuation, WasmContinuationObject::kParentOffset)); SaveState(masm, parent_continuation, scratch, suspend); - SwitchStacks(masm, no_reg, kWasmInstanceRegister, wrapper_buffer); + SwitchStacks(masm, no_reg, kWasmImplicitArgRegister, wrapper_buffer); parent_continuation = no_reg; Register target_continuation = scratch; __ LoadRoot(target_continuation, RootIndex::kActiveContinuation); @@ -3403,8 +3417,8 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm, Register tmp1, __ LoadTaggedField( promise, FieldOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ movq(kContextRegister, - MemOperand(rbp, StackSwitchFrameConstants::kRefOffset)); - GetContextFromRef(masm, kContextRegister); + MemOperand(rbp, StackSwitchFrameConstants::kImplicitArgOffset)); + GetContextFromImplicitArg(masm, kContextRegister); ReloadParentContinuation(masm, promise, return_value, kContextRegister, tmp1, tmp2); @@ -3449,8 +3463,8 @@ void GenerateExceptionHandlingLandingPad(MacroAssembler* masm, __ LoadTaggedField( promise, FieldOperand(promise, WasmSuspenderObject::kPromiseOffset)); __ movq(kContextRegister, - MemOperand(rbp, StackSwitchFrameConstants::kRefOffset)); - GetContextFromRef(masm, kContextRegister); + MemOperand(rbp, StackSwitchFrameConstants::kImplicitArgOffset)); + GetContextFromImplicitArg(masm, kContextRegister); ReloadParentContinuation(masm, promise, reason, kContextRegister, r8, rdi); RestoreParentSuspender(masm, r8, rdi); @@ -3476,24 +3490,25 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { __ AllocateStackSpace(StackSwitchFrameConstants::kNumSpillSlots * kSystemPointerSize); + // Load the implicit argument (instance data or import data) from the frame. + __ movq(kWasmImplicitArgRegister, + MemOperand(rbp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); + Register wrapper_buffer = WasmJSToWasmWrapperDescriptor::WrapperBufferRegister(); - __ movq(kWasmInstanceRegister, - MemOperand(rbp, JSToWasmWrapperFrameConstants::kRefParamOffset)); - Register original_fp = stack_switch ? r9 : rbp; Register new_wrapper_buffer = stack_switch ? rbx : wrapper_buffer; Label suspend; if (stack_switch) { - SwitchToAllocatedStack(masm, kWasmInstanceRegister, wrapper_buffer, + SwitchToAllocatedStack(masm, kWasmImplicitArgRegister, wrapper_buffer, original_fp, new_wrapper_buffer, rax, &suspend); } __ movq(MemOperand(rbp, JSToWasmWrapperFrameConstants::kWrapperBufferOffset), new_wrapper_buffer); if (stack_switch) { - __ movq(MemOperand(rbp, StackSwitchFrameConstants::kRefOffset), - kWasmInstanceRegister); + __ movq(MemOperand(rbp, StackSwitchFrameConstants::kImplicitArgOffset), + kWasmImplicitArgRegister); Register result_array = kScratchRegister; __ movq(result_array, MemOperand(original_fp, @@ -3531,7 +3546,7 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Register last_stack_param = rcx; - // The first GP parameter is the ref, which we handle specially. + // The first GP parameter is the data, which we handle specially. int stack_params_offset = (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + arraysize(wasm::kFpParamRegisters) * kDoubleSize; @@ -3618,15 +3633,16 @@ void JSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { if (stack_switch) { __ movq(rbx, MemOperand(rbp, StackSwitchFrameConstants::kResultArrayOffset)); - __ movq(rax, MemOperand(rbp, StackSwitchFrameConstants::kRefOffset)); + __ movq(rax, + MemOperand(rbp, StackSwitchFrameConstants::kImplicitArgOffset)); } else { __ movq(rbx, MemOperand(rbp, JSToWasmWrapperFrameConstants::kResultArrayParamOffset)); __ movq(rax, - MemOperand(rbp, JSToWasmWrapperFrameConstants::kRefParamOffset)); + MemOperand(rbp, JSToWasmWrapperFrameConstants::kImplicitArgOffset)); } - GetContextFromRef(masm, rax); + GetContextFromImplicitArg(masm, rax); __ CallBuiltin(Builtin::kJSToWasmHandleReturns); Label return_promise; @@ -3675,18 +3691,7 @@ void Builtins::Generate_WasmToJsWrapperAsm(MacroAssembler* masm) { for (size_t i = arraysize(wasm::kGpParamRegisters) - 1; i > 0; --i) { __ pushq(wasm::kGpParamRegisters[i]); } - // Reserve fixed slots for the CSA wrapper. - // Two slots for stack-switching (central stack pointer and secondary stack - // limit): - static_assert(WasmImportWrapperFrameConstants::kCentralStackSPOffset == - WasmImportWrapperFrameConstants::kWasmInstanceOffset - - kSystemPointerSize); - __ pushq(Immediate(kNullAddress)); - static_assert(WasmImportWrapperFrameConstants::kSecondaryStackLimitOffset == - WasmImportWrapperFrameConstants::kCentralStackSPOffset - - kSystemPointerSize); - __ pushq(Immediate(kNullAddress)); - // One slot for the signature: + // Signature slot. __ pushq(rax); // Push the return address again. __ pushq(kScratchRegister); @@ -4261,6 +4266,74 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ jmp(rdi); } +#if V8_ENABLE_WEBASSEMBLY +void Builtins::Generate_WasmHandleStackOverflow(MacroAssembler* masm) { + using ER = ExternalReference; + Register frame_base = WasmHandleStackOverflowDescriptor::FrameBaseRegister(); + Register gap = WasmHandleStackOverflowDescriptor::GapRegister(); + { + DCHECK_NE(kCArgRegs[1], frame_base); + DCHECK_NE(kCArgRegs[3], frame_base); + __ movq(kCArgRegs[3], gap); + __ movq(kCArgRegs[1], rsp); + __ movq(kCArgRegs[2], frame_base); + __ subq(kCArgRegs[2], kCArgRegs[1]); +#ifdef V8_TARGET_OS_WIN + Register old_fp = rcx; + // On windows we need preserve rbp value somewhere before entering + // INTERNAL frame later. It will be placed on the stack as an argument. + __ movq(old_fp, rbp); +#else + __ movq(kCArgRegs[4], rbp); +#endif + FrameScope scope(masm, StackFrame::INTERNAL); + __ pushq(kCArgRegs[3]); + __ PrepareCallCFunction(5); + // On windows put the arguments on the stack (PrepareCallCFunction + // has created space for this). +#ifdef V8_TARGET_OS_WIN + __ movq(Operand(rsp, 4 * kSystemPointerSize), old_fp); +#endif + __ Move(kCArgRegs[0], ER::isolate_address()); + __ CallCFunction(ER::wasm_grow_stack(), 5); + __ popq(gap); + DCHECK_NE(kReturnRegister0, gap); + } + Label call_runtime; + // wasm_grow_stack returns zero if it cannot grow a stack. + __ testq(kReturnRegister0, kReturnRegister0); + __ j(zero, &call_runtime, Label::kNear); + // Calculate old FP - SP offset to adjust FP accordingly to new SP. + __ subq(rbp, rsp); + __ addq(rbp, kReturnRegister0); + __ movq(rsp, kReturnRegister0); + __ movq(kScratchRegister, + Immediate(StackFrame::TypeToMarker(StackFrame::WASM_SEGMENT_START))); + __ movq(MemOperand(rbp, TypedFrameConstants::kFrameTypeOffset), + kScratchRegister); + __ ret(0); + + // If wasm_grow_stack returns zero, interruption or stack overflow + // should be handled by runtime call. + { + __ bind(&call_runtime); + __ movq(kWasmImplicitArgRegister, + MemOperand(rbp, WasmFrameConstants::kWasmInstanceDataOffset)); + __ LoadTaggedField( + kContextRegister, + FieldOperand(kWasmImplicitArgRegister, + WasmTrustedInstanceData::kNativeContextOffset)); + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ SmiTag(gap); + __ pushq(gap); + __ CallRuntime(Runtime::kWasmStackGuard); + __ LeaveFrame(StackFrame::INTERNAL); + __ ret(0); + } +} +#endif // V8_ENABLE_WEBASSEMBLY + void Builtins::Generate_DoubleToI(MacroAssembler* masm) { Label check_negative, process_64_bits, done; @@ -5006,8 +5079,13 @@ void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) { // The arguments are already in the stack (including any necessary padding), // we should not try to massage the arguments again. +#ifdef V8_ENABLE_LEAPTIERING + __ InvokeFunction(rdi, no_reg, rax, InvokeType::kJump, + ArgumentAdaptionMode::kDontAdapt); +#else __ movq(rbx, Immediate(kDontAdaptArgumentsSentinel)); __ InvokeFunction(rdi, no_reg, rbx, rax, InvokeType::kJump); +#endif } #undef __ diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS index 5b17b9e31b4b14..36792db46900fb 100644 --- a/deps/v8/src/codegen/OWNERS +++ b/deps/v8/src/codegen/OWNERS @@ -1,12 +1,13 @@ cbruni@chromium.org clemensb@chromium.org +dmercadier@chromium.org gdeepti@chromium.org ishell@chromium.org jgruber@chromium.org jkummerow@chromium.org leszeks@chromium.org nicohartmann@chromium.org +olivf@chromium.org victorgomes@chromium.org -dmercadier@chromium.org per-file compiler.*=marja@chromium.org diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index 4b0d8ebf704c8a..d897a3c46ed2ff 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -589,8 +589,12 @@ void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc, // this point to make CodeDesc initialization less fiddly. static constexpr int kConstantPoolSize = 0; + static constexpr int kBuiltinJumpTableInfoSize = 0; const int instruction_size = pc_offset(); - const int code_comments_offset = instruction_size - code_comments_size; + const int builtin_jump_table_info_offset = + instruction_size - kBuiltinJumpTableInfoSize; + const int code_comments_offset = + builtin_jump_table_info_offset - code_comments_size; const int constant_pool_offset = code_comments_offset - kConstantPoolSize; const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) ? constant_pool_offset @@ -603,7 +607,8 @@ void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc, static_cast(reloc_info_writer.pos() - buffer_->start()); CodeDesc::Initialize(desc, this, safepoint_table_offset, handler_table_offset2, constant_pool_offset, - code_comments_offset, reloc_info_offset); + code_comments_offset, builtin_jump_table_info_offset, + reloc_info_offset); } void Assembler::Align(int m) { diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index c96598939cbb56..6da616dc01da05 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -1415,7 +1415,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type, } PushCommonFrame(scratch); #if V8_ENABLE_WEBASSEMBLY - if (type == StackFrame::WASM) Push(kWasmInstanceRegister); + if (type == StackFrame::WASM) Push(kWasmImplicitArgRegister); #endif // V8_ENABLE_WEBASSEMBLY } diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h index ce0c48e376126d..d9b10e4ada1d7e 100644 --- a/deps/v8/src/codegen/arm/register-arm.h +++ b/deps/v8/src/codegen/arm/register-arm.h @@ -320,7 +320,7 @@ constexpr Register kJavaScriptCallExtraArg1Register = r2; constexpr Register kRuntimeCallFunctionRegister = r1; constexpr Register kRuntimeCallArgCountRegister = r0; constexpr Register kRuntimeCallArgvRegister = r2; -constexpr Register kWasmInstanceRegister = r3; +constexpr Register kWasmImplicitArgRegister = r3; constexpr Register kWasmCompileLazyFuncIndexRegister = r4; // Give alias names to registers diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index 63fbed1bf7f307..343b5c75884410 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -455,8 +455,12 @@ void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc, // this point to make CodeDesc initialization less fiddly. static constexpr int kConstantPoolSize = 0; + static constexpr int kBuiltinJumpTableInfoSize = 0; const int instruction_size = pc_offset(); - const int code_comments_offset = instruction_size - code_comments_size; + const int builtin_jump_table_info_offset = + instruction_size - kBuiltinJumpTableInfoSize; + const int code_comments_offset = + builtin_jump_table_info_offset - code_comments_size; const int constant_pool_offset = code_comments_offset - kConstantPoolSize; const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) ? constant_pool_offset @@ -469,7 +473,8 @@ void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc, static_cast(reloc_info_writer.pos() - buffer_->start()); CodeDesc::Initialize(desc, this, safepoint_table_offset, handler_table_offset2, constant_pool_offset, - code_comments_offset, reloc_info_offset); + code_comments_offset, builtin_jump_table_info_offset, + reloc_info_offset); } void Assembler::Align(int m) { diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc index 997a8ea0449af7..50715ebcb8b210 100644 --- a/deps/v8/src/codegen/arm64/cpu-arm64.cc +++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc @@ -52,6 +52,10 @@ void CpuFeatures::FlushICache(void* address, size_t length) { ::FlushInstructionCache(GetCurrentProcess(), address, length); #elif defined(V8_OS_DARWIN) sys_icache_invalidate(address, length); +#elif defined(V8_OS_LINUX) + char* begin = reinterpret_cast(address); + + __builtin___clear_cache(begin, begin + length); #else // The code below assumes user space cache operations are allowed. The goal // of this routine is to make sure the code generated is visible to the I diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index d8c2cac8c23da6..c4749cb4da0507 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -1230,6 +1230,11 @@ void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { CompareAndBranch(x, y, lt, dest); } +void MacroAssembler::JumpIfUnsignedLessThan(Register x, int32_t y, + Label* dest) { + CompareAndBranch(x, y, lo, dest); +} + void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { JumpIfSmi(value, nullptr, not_smi_label); } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index ac907173a5ee13..c38fafca17e666 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -1424,6 +1424,8 @@ void MacroAssembler::PopCalleeSavedRegisters() { namespace { +#ifndef V8_ENABLE_LEAPTIERING +// Only used when leaptiering is disabled. void TailCallOptimizedCodeSlot(MacroAssembler* masm, Register optimized_code_entry, Register scratch) { @@ -1435,7 +1437,6 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, ASM_CODE_COMMENT(masm); DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch)); - Register closure = x1; Label heal_optimized_code_slot; // If the optimized code is cleared, go to runtime to update the optimization @@ -1456,7 +1457,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, // Optimized code is good, get it into the closure and link the closure into // the optimized functions list, then tail call the optimized code. - __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure); + __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, x1); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); __ Move(x2, optimized_code_entry); __ JumpCodeObject(x2, kJSEntrypointTag); @@ -1467,6 +1468,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, __ bind(&heal_optimized_code_slot); __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot); } +#endif // V8_ENABLE_LEAPTIERING } // namespace @@ -1489,13 +1491,18 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode( Register optimized_code, Register closure) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(optimized_code, closure)); + +#ifdef V8_ENABLE_LEAPTIERING + UNREACHABLE(); +#else // Store code entry in the closure. AssertCode(optimized_code); StoreCodePointerField(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code, kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore, SmiCheck::kOmit, - SlotDescriptor::ForCodePointerSlot()); + ReadOnlyCheck::kOmit, SlotDescriptor::ForCodePointerSlot()); +#endif // V8_ENABLE_LEAPTIERING } void MacroAssembler::GenerateTailCallToReturnedCode( @@ -1536,14 +1543,10 @@ Condition MacroAssembler::LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing( ASM_CODE_COMMENT(this); DCHECK(!AreAliased(flags, feedback_vector)); DCHECK(CodeKindCanTierUp(current_code_kind)); + uint32_t flag_mask = + FeedbackVector::FlagMaskForNeedsProcessingCheckFrom(current_code_kind); Ldrh(flags, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); - uint32_t kFlagsMask = FeedbackVector::kFlagsTieringStateIsAnyRequested | - FeedbackVector::kFlagsMaybeHasTurbofanCode | - FeedbackVector::kFlagsLogNextExecution; - if (current_code_kind != CodeKind::MAGLEV) { - kFlagsMask |= FeedbackVector::kFlagsMaybeHasMaglevCode; - } - Tst(flags, kFlagsMask); + Tst(flags, flag_mask); return ne; } @@ -1560,6 +1563,18 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( Register flags, Register feedback_vector) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(flags, feedback_vector)); +#ifdef V8_ENABLE_LEAPTIERING + // In the leaptiering case, we don't load optimized code from the feedback + // vector so only need to call CompileOptimized or FunctionLogNextExecution + // here. See also LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing above. + Label needs_logging; + TestAndBranchIfAllClear( + flags, FeedbackVector::kFlagsTieringStateIsAnyRequested, &needs_logging); + GenerateTailCallToReturnedCode(Runtime::kCompileOptimized); + + bind(&needs_logging); + GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution); +#else Label maybe_has_optimized_code, maybe_needs_logging; // Check if optimized code is available. TestAndBranchIfAllClear(flags, @@ -1573,11 +1588,14 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution); bind(&maybe_has_optimized_code); + // This tiering logic is only needed if leaptiering is disabled. Otherwise + // we'll automatically tier up through the dispatch table. Register optimized_code_entry = x7; LoadTaggedField(optimized_code_entry, FieldMemOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset)); TailCallOptimizedCodeSlot(this, optimized_code_entry, x4); +#endif // V8_ENABLE_LEAPTIERING } Condition MacroAssembler::CheckSmi(Register object) { @@ -2513,7 +2531,12 @@ void MacroAssembler::JumpCodeObject(Register code_object, CodeEntrypointTag tag, void MacroAssembler::CallJSFunction(Register function_object) { Register code = kJavaScriptCallCodeStartRegister; -#ifdef V8_ENABLE_SANDBOX +#ifdef V8_ENABLE_LEAPTIERING + Ldr(code.W(), + FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset)); + LoadCodeEntrypointFromJSDispatchTable(code, code); + Call(code); +#elif V8_ENABLE_SANDBOX // When the sandbox is enabled, we can directly fetch the entrypoint pointer // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. @@ -2531,7 +2554,18 @@ void MacroAssembler::CallJSFunction(Register function_object) { void MacroAssembler::JumpJSFunction(Register function_object, JumpMode jump_mode) { Register code = kJavaScriptCallCodeStartRegister; -#ifdef V8_ENABLE_SANDBOX +#ifdef V8_ENABLE_LEAPTIERING + Ldr(code.W(), + FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset)); + LoadCodeEntrypointFromJSDispatchTable(code, code); + DCHECK_EQ(jump_mode, JumpMode::kJump); + // We jump through x17 here because for Branch Identification (BTI) we use + // "Call" (`bti c`) rather than "Jump" (`bti j`) landing pads for tail-called + // code. See TailCallBuiltin for more information. + DCHECK_NE(code, x17); + Mov(x17, code); + Jump(x17); +#elif V8_ENABLE_SANDBOX // When the sandbox is enabled, we can directly fetch the entrypoint pointer // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. @@ -2660,7 +2694,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, } void MacroAssembler::InvokePrologue(Register formal_parameter_count, - Register actual_argument_count, Label* done, + Register actual_argument_count, InvokeType type) { ASM_CODE_COMMENT(this); // x0: actual arguments count. @@ -2759,9 +2793,10 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count, Bind(®ular_invoke); } -void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, - Register expected_parameter_count, - Register actual_parameter_count) { +void MacroAssembler::CallDebugOnFunctionCall( + Register fun, Register new_target, + Register expected_parameter_count_or_dispatch_handle, + Register actual_parameter_count) { ASM_CODE_COMMENT(this); // Load receiver to pass it later to DebugOnFunctionCall hook. Peek(x4, ReceiverOperand()); @@ -2771,18 +2806,118 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, if (!new_target.is_valid()) new_target = padreg; // Save values on stack. - SmiTag(expected_parameter_count); + SmiTag(expected_parameter_count_or_dispatch_handle); SmiTag(actual_parameter_count); - Push(expected_parameter_count, actual_parameter_count, new_target, fun); + Push(expected_parameter_count_or_dispatch_handle, actual_parameter_count, + new_target, fun); Push(fun, x4); CallRuntime(Runtime::kDebugOnFunctionCall); // Restore values from stack. - Pop(fun, new_target, actual_parameter_count, expected_parameter_count); + Pop(fun, new_target, actual_parameter_count, + expected_parameter_count_or_dispatch_handle); SmiUntag(actual_parameter_count); - SmiUntag(expected_parameter_count); + SmiUntag(expected_parameter_count_or_dispatch_handle); +} + +#ifdef V8_ENABLE_LEAPTIERING +void MacroAssembler::InvokeFunction( + Register function, Register actual_parameter_count, InvokeType type, + ArgumentAdaptionMode argument_adaption_mode) { + ASM_CODE_COMMENT(this); + // You can't call a function without a valid frame. + DCHECK(type == InvokeType::kJump || has_frame()); + + // Contract with called JS functions requires that function is passed in x1. + // (See FullCodeGenerator::Generate().) + DCHECK_EQ(function, x1); + + // Set up the context. + LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset)); + + InvokeFunctionCode(function, no_reg, actual_parameter_count, type, + argument_adaption_mode); } +void MacroAssembler::InvokeFunctionWithNewTarget( + Register function, Register new_target, Register actual_parameter_count, + InvokeType type) { + ASM_CODE_COMMENT(this); + // You can't call a function without a valid frame. + DCHECK(type == InvokeType::kJump || has_frame()); + + // Contract with called JS functions requires that function is passed in x1. + // (See FullCodeGenerator::Generate().) + DCHECK_EQ(function, x1); + + LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset)); + + InvokeFunctionCode(function, new_target, actual_parameter_count, type); +} + +void MacroAssembler::InvokeFunctionCode( + Register function, Register new_target, Register actual_parameter_count, + InvokeType type, ArgumentAdaptionMode argument_adaption_mode) { + ASM_CODE_COMMENT(this); + // You can't call a function without a valid frame. + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); + DCHECK_EQ(function, x1); + DCHECK_IMPLIES(new_target.is_valid(), new_target == x3); + + Register dispatch_handle = x20; + Ldr(dispatch_handle.W(), + FieldMemOperand(function, JSFunction::kDispatchHandleOffset)); + + // On function call, call into the debugger if necessary. + Label debug_hook, continue_after_hook; + { + Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate())); + Ldrsb(x4, MemOperand(x4)); + Cbnz(x4, &debug_hook); + } + bind(&continue_after_hook); + + // Clear the new.target register if not given. + if (!new_target.is_valid()) { + LoadRoot(x3, RootIndex::kUndefinedValue); + } + + if (argument_adaption_mode == ArgumentAdaptionMode::kAdapt) { + Register expected_parameter_count = x2; + LoadParameterCountFromJSDispatchTable(expected_parameter_count, + dispatch_handle); + InvokePrologue(expected_parameter_count, actual_parameter_count, type); + } + + // We call indirectly through the code field in the function to + // allow recompilation to take effect without changing any of the + // call sites. + LoadCodeEntrypointFromJSDispatchTable(kJavaScriptCallCodeStartRegister, + dispatch_handle); + switch (type) { + case InvokeType::kCall: + Call(kJavaScriptCallCodeStartRegister); + break; + case InvokeType::kJump: + // We jump through x17 here because for Branch Identification (BTI) we use + // "Call" (`bti c`) rather than "Jump" (`bti j`) landing pads for + // tail-called code. See TailCallBuiltin for more information. + Mov(x17, kJavaScriptCallCodeStartRegister); + Jump(x17); + break; + } + Label done; + B(&done); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, dispatch_handle, + actual_parameter_count); + B(&continue_after_hook); + + bind(&done); +} +#else void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, @@ -2807,11 +2942,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, LoadRoot(x3, RootIndex::kUndefinedValue); } - Label done; - InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type); + InvokePrologue(expected_parameter_count, actual_parameter_count, type); - // If actual != expected, InvokePrologue will have handled the call through - // the argument adaptor mechanism. // The called function expects the call kind in x5. // We call indirectly through the code field in the function to // allow recompilation to take effect without changing any of the @@ -2824,6 +2956,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, JumpJSFunction(function); break; } + Label done; B(&done); // Deferred debug hook. @@ -2832,30 +2965,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, actual_parameter_count); B(&continue_after_hook); - // Continue here if InvokePrologue does handle the invocation due to - // mismatched parameter counts. - Bind(&done); -} - -void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization( - Register code, Register scratch, Label* if_marked_for_deoptimization) { - Ldr(scratch.W(), FieldMemOperand(code, Code::kFlagsOffset)); - Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit, - if_marked_for_deoptimization); + bind(&done); } -void MacroAssembler::JumpIfCodeIsTurbofanned(Register code, Register scratch, - Label* if_turbofanned) { - Ldr(scratch.W(), FieldMemOperand(code, Code::kFlagsOffset)); - Tbnz(scratch.W(), Code::kIsTurbofannedBit, if_turbofanned); -} - -Operand MacroAssembler::ClearedValue() const { - return Operand(static_cast(i::ClearedValue(isolate()).ptr())); -} - -Operand MacroAssembler::ReceiverOperand() { return Operand(0); } - void MacroAssembler::InvokeFunctionWithNewTarget( Register function, Register new_target, Register actual_parameter_count, InvokeType type) { @@ -2902,6 +3014,26 @@ void MacroAssembler::InvokeFunction(Register function, InvokeFunctionCode(function, no_reg, expected_parameter_count, actual_parameter_count, type); } +#endif // V8_ENABLE_LEAPTIERING + +void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization( + Register code, Register scratch, Label* if_marked_for_deoptimization) { + Ldr(scratch.W(), FieldMemOperand(code, Code::kFlagsOffset)); + Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit, + if_marked_for_deoptimization); +} + +void MacroAssembler::JumpIfCodeIsTurbofanned(Register code, Register scratch, + Label* if_turbofanned) { + Ldr(scratch.W(), FieldMemOperand(code, Code::kFlagsOffset)); + Tbnz(scratch.W(), Code::kIsTurbofannedBit, if_turbofanned); +} + +Operand MacroAssembler::ClearedValue() const { + return Operand(static_cast(i::ClearedValue(isolate()).ptr())); +} + +Operand MacroAssembler::ReceiverOperand() { return Operand(0); } void MacroAssembler::TryConvertDoubleToInt64(Register result, DoubleRegister double_input, @@ -3000,18 +3132,16 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) { } else { Register type_reg = temps.AcquireX(); Mov(type_reg, StackFrame::TypeToMarker(type)); - Register fourth_reg = no_reg; + Register fourth_reg = padreg; if (type == StackFrame::CONSTRUCT || type == StackFrame::FAST_CONSTRUCT) { fourth_reg = cp; + } #if V8_ENABLE_WEBASSEMBLY - } else if (type == StackFrame::WASM || - type == StackFrame::WASM_LIFTOFF_SETUP || - type == StackFrame::WASM_EXIT) { - fourth_reg = kWasmInstanceRegister; -#endif // V8_ENABLE_WEBASSEMBLY - } else { - fourth_reg = padreg; + if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP || + type == StackFrame::WASM_EXIT) { + fourth_reg = kWasmImplicitArgRegister; } +#endif // V8_ENABLE_WEBASSEMBLY Push(lr, fp, type_reg, fourth_reg); static constexpr int kSPToFPDelta = 2 * kSystemPointerSize; Add(fp, sp, kSPToFPDelta); @@ -3582,17 +3712,24 @@ void MacroAssembler::JumpIfNotMarking(Label* not_marking, Cbz(scratch, not_marking); } -void MacroAssembler::RecordWriteField(Register object, int offset, - Register value, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - SmiCheck smi_check, SlotDescriptor slot) { +void MacroAssembler::RecordWriteField( + Register object, int offset, Register value, LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, SmiCheck smi_check, ReadOnlyCheck ro_check, + SlotDescriptor slot) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(object, value)); // First, check if a write barrier is even needed. The tests below - // catch stores of Smis. + // catch stores of Smis and read-only objects. Label done; +#if V8_STATIC_ROOTS_BOOL + if (ro_check == ReadOnlyCheck::kInline) { + // Quick check for Read-only and small Smi values. + static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize); + JumpIfUnsignedLessThan(value, kRegularPageSize, &done); + } +#endif // V8_STATIC_ROOTS_BOOL + // Skip the barrier if writing a smi. if (smi_check == SmiCheck::kInline) { JumpIfSmi(value, &done); @@ -3616,7 +3753,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status, - save_fp, SmiCheck::kOmit, slot); + save_fp, SmiCheck::kOmit, ReadOnlyCheck::kOmit, slot); Bind(&done); } @@ -3819,6 +3956,36 @@ void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination, } #endif // V8_ENABLE_SANDBOX +#ifdef V8_ENABLE_LEAPTIERING +void MacroAssembler::LoadCodeEntrypointFromJSDispatchTable( + Register destination, Register dispatch_handle) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + Mov(scratch, ExternalReference::js_dispatch_table_address()); + // TODO(saelo): can the offset computation be done more efficiently? + Mov(destination, Operand(dispatch_handle, LSR, kJSDispatchHandleShift)); + Mov(destination, Operand(destination, LSL, kJSDispatchTableEntrySizeLog2)); + DCHECK_EQ(JSDispatchEntry::kEntrypointOffset, 0); + Ldr(destination, MemOperand(scratch, destination)); +} + +void MacroAssembler::LoadParameterCountFromJSDispatchTable( + Register destination, Register dispatch_handle) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + Mov(scratch, ExternalReference::js_dispatch_table_address()); + // TODO(saelo): can the offset computation be done more efficiently? + Mov(destination, Operand(dispatch_handle, LSR, kJSDispatchHandleShift)); + Mov(destination, Operand(destination, LSL, kJSDispatchTableEntrySizeLog2)); + Add(destination, destination, Immediate(JSDispatchEntry::kCodeObjectOffset)); + Ldr(destination, MemOperand(scratch, destination)); + And(destination, destination, + Immediate(JSDispatchEntry::kParameterCountMask)); +} +#endif + void MacroAssembler::LoadProtectedPointerField(Register destination, MemOperand field_operand) { DCHECK(root_array_available()); @@ -3960,7 +4127,7 @@ void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, void MacroAssembler::RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode, SmiCheck smi_check, - SlotDescriptor slot) { + ReadOnlyCheck ro_check, SlotDescriptor slot) { ASM_CODE_COMMENT(this); ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite"); DCHECK(!AreAliased(object, value)); @@ -3987,9 +4154,18 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, } // First, check if a write barrier is even needed. The tests below - // catch stores of smis and stores into the young generation. + // catch stores of smisand read-only objects, as well as stores into the + // young generation. Label done; +#if V8_STATIC_ROOTS_BOOL + if (ro_check == ReadOnlyCheck::kInline) { + // Quick check for Read-only and small Smi values. + static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize); + JumpIfUnsignedLessThan(value, kRegularPageSize, &done); + } +#endif // V8_STATIC_ROOTS_BOOL + if (smi_check == SmiCheck::kInline) { DCHECK_EQ(0, kSmiTag); JumpIfSmi(value, &done); diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 38bcdf9b138f04..d14916d24b7aa1 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -1004,8 +1004,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { inline void JumpIfSmi(Register value, Label* smi_label, Label* not_smi_label = nullptr); + inline void JumpIf(Condition cond, Register x, int32_t y, Label* dest); inline void JumpIfEqual(Register x, int32_t y, Label* dest); inline void JumpIfLessThan(Register x, int32_t y, Label* dest); + inline void JumpIfUnsignedLessThan(Register x, int32_t y, Label* dest); void JumpIfMarking(Label* is_marking, Label::Distance condition_met_distance = Label::kFar); @@ -1652,6 +1654,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { CodeEntrypointTag tag); #endif +#ifdef V8_ENABLE_LEAPTIERING + // Load the entrypoint pointer of a JSDispatchTable entry. + void LoadCodeEntrypointFromJSDispatchTable(Register destination, + Register dispatch_handle); + // Load the parameter count of a JSDispatchTable entry. + void LoadParameterCountFromJSDispatchTable(Register destination, + Register dispatch_handle); +#endif // V8_ENABLE_LEAPTIERING + // Load a protected pointer field. void LoadProtectedPointerField(Register destination, MemOperand field_operand); @@ -2067,13 +2078,37 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // 'expected' must use an immediate or x2. // 'call_kind' must be x5. void InvokePrologue(Register expected_parameter_count, - Register actual_parameter_count, Label* done, - InvokeType type); + Register actual_parameter_count, InvokeType type); // On function call, call into the debugger. - void CallDebugOnFunctionCall(Register fun, Register new_target, - Register expected_parameter_count, - Register actual_parameter_count); + void CallDebugOnFunctionCall( + Register fun, Register new_target, + Register expected_parameter_count_or_dispatch_handle, + Register actual_parameter_count); + + // The way we invoke JSFunctions differs depending on whether leaptiering is + // enabled. As such, these functions exist in two variants. In the future, + // leaptiering will be used on all platforms. At that point, the + // non-leaptiering variants will disappear. + +#ifdef V8_ENABLE_LEAPTIERING + // Invoke the JavaScript function in the given register. Changes the + // current context to the context in the function before invoking. + void InvokeFunction(Register function, Register actual_parameter_count, + InvokeType type, + ArgumentAdaptionMode argument_adaption_mode = + ArgumentAdaptionMode::kAdapt); + // Invoke the JavaScript function in the given register. + // Changes the current context to the context in the function before invoking. + void InvokeFunctionWithNewTarget(Register function, Register new_target, + Register actual_parameter_count, + InvokeType type); + // Invoke the JavaScript function code by either calling or jumping. + void InvokeFunctionCode(Register function, Register new_target, + Register actual_parameter_count, InvokeType type, + ArgumentAdaptionMode argument_adaption_mode = + ArgumentAdaptionMode::kAdapt); +#else void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type); @@ -2084,6 +2119,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { InvokeType type); void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type); +#endif // ---- InstructionStream generation helpers ---- @@ -2251,6 +2287,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void RecordWriteField( Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check = SmiCheck::kInline, + ReadOnlyCheck ro_check = ReadOnlyCheck::kInline, SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot()); // For a given |object| notify the garbage collector that the slot at |offset| @@ -2259,6 +2296,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check = SmiCheck::kInline, + ReadOnlyCheck ro_check = ReadOnlyCheck::kInline, SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot()); // --------------------------------------------------------------------------- diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h index 6c4ef52422cff9..0114554df51a0a 100644 --- a/deps/v8/src/codegen/arm64/register-arm64.h +++ b/deps/v8/src/codegen/arm64/register-arm64.h @@ -267,9 +267,10 @@ static_assert(sizeof(Register) <= sizeof(int), "Register can efficiently be passed by value"); // Assign |source| value to |no_reg| and return the |source|'s previous value. -inline Register ReassignRegister(Register& source) { - Register result = source; - source = Register::no_reg(); +template +inline RegT ReassignRegister(RegT& source) { + RegT result = source; + source = RegT::no_reg(); return result; } @@ -612,7 +613,7 @@ constexpr Register kJavaScriptCallExtraArg1Register = x2; constexpr Register kRuntimeCallFunctionRegister = x1; constexpr Register kRuntimeCallArgCountRegister = x0; constexpr Register kRuntimeCallArgvRegister = x11; -constexpr Register kWasmInstanceRegister = x7; +constexpr Register kWasmImplicitArgRegister = x7; constexpr Register kWasmCompileLazyFuncIndexRegister = x8; constexpr Register kWasmTrapHandlerFaultAddressRegister = x16; constexpr Register kSimulatorHltArgument = x16; diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h index cb9f7456109ab3..a425d48698d9ad 100644 --- a/deps/v8/src/codegen/assembler-arch.h +++ b/deps/v8/src/codegen/assembler-arch.h @@ -15,7 +15,7 @@ #include "src/codegen/arm64/assembler-arm64.h" #elif V8_TARGET_ARCH_ARM #include "src/codegen/arm/assembler-arm.h" -#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 +#elif V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/assembler-ppc.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/assembler-mips64.h" diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h index 3618e243d5e0bc..f2dc73884be3e2 100644 --- a/deps/v8/src/codegen/assembler-inl.h +++ b/deps/v8/src/codegen/assembler-inl.h @@ -15,7 +15,7 @@ #include "src/codegen/arm64/assembler-arm64-inl.h" #elif V8_TARGET_ARCH_ARM #include "src/codegen/arm/assembler-arm-inl.h" -#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 +#elif V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/assembler-ppc-inl.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/assembler-mips64-inl.h" diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index 31389e7522bcc8..50c7d794f9cfff 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -308,7 +308,8 @@ class SlotDescriptor { } private: - SlotDescriptor(IndirectPointerTag tag) : indirect_pointer_tag_(tag) {} + explicit SlotDescriptor(IndirectPointerTag tag) + : indirect_pointer_tag_(tag) {} // If the tag is null, this object describes a direct pointer slot. IndirectPointerTag indirect_pointer_tag_; @@ -527,13 +528,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { !v8_flags.debug_code) { return false; } - if (RelocInfo::IsOnlyForDisassembler(rmode)) { -#ifdef ENABLE_DISASSEMBLER - return true; -#else - return false; -#endif // ENABLE_DISASSEMBLER - } return true; } diff --git a/deps/v8/src/codegen/code-desc.cc b/deps/v8/src/codegen/code-desc.cc index ea2e4712b6f6d1..1e36bc84814e7d 100644 --- a/deps/v8/src/codegen/code-desc.cc +++ b/deps/v8/src/codegen/code-desc.cc @@ -13,13 +13,19 @@ namespace internal { void CodeDesc::Initialize(CodeDesc* desc, Assembler* assembler, int safepoint_table_offset, int handler_table_offset, int constant_pool_offset, int code_comments_offset, + int builtin_jump_table_info_offset, int reloc_info_offset) { desc->buffer = assembler->buffer_start(); desc->buffer_size = assembler->buffer_size(); desc->instr_size = assembler->instruction_size(); + desc->builtin_jump_table_info_offset = builtin_jump_table_info_offset; + desc->builtin_jump_table_info_size = + desc->instr_size - builtin_jump_table_info_offset; + desc->code_comments_offset = code_comments_offset; - desc->code_comments_size = desc->instr_size - code_comments_offset; + desc->code_comments_size = + desc->builtin_jump_table_info_offset - code_comments_offset; desc->constant_pool_offset = constant_pool_offset; desc->constant_pool_size = desc->code_comments_offset - constant_pool_offset; @@ -61,7 +67,11 @@ void CodeDesc::Verify(const CodeDesc* desc) { desc->code_comments_offset); DCHECK_GE(desc->code_comments_size, 0); DCHECK_EQ(desc->code_comments_size + desc->code_comments_offset, - desc->instr_size); + desc->builtin_jump_table_info_offset); + DCHECK_GE(desc->builtin_jump_table_info_size, 0); + DCHECK_EQ( + desc->builtin_jump_table_info_size + desc->builtin_jump_table_info_offset, + desc->instr_size); DCHECK_GE(desc->reloc_offset, 0); DCHECK_GE(desc->reloc_size, 0); diff --git a/deps/v8/src/codegen/code-desc.h b/deps/v8/src/codegen/code-desc.h index 2f74cdb9c7cdbe..11db5652a04ef9 100644 --- a/deps/v8/src/codegen/code-desc.h +++ b/deps/v8/src/codegen/code-desc.h @@ -32,6 +32,7 @@ class CodeDesc { static void Initialize(CodeDesc* desc, Assembler* assembler, int safepoint_table_offset, int handler_table_offset, int constant_pool_offset, int code_comments_offset, + int builtin_jump_table_info_offset, int reloc_info_offset); #ifdef DEBUG @@ -62,6 +63,9 @@ class CodeDesc { int code_comments_offset = 0; int code_comments_size = 0; + int builtin_jump_table_info_offset = 0; + int builtin_jump_table_info_size = 0; + // TODO(jgruber,v8:11036): Remove these functions once CodeDesc fields have // been made consistent with InstructionStream layout. int body_size() const { return instr_size + unwinding_info_size; } @@ -79,6 +83,9 @@ class CodeDesc { int code_comments_offset_relative() const { return code_comments_offset - instruction_size(); } + int builtin_jump_table_info_offset_relative() const { + return builtin_jump_table_info_offset - instruction_size(); + } // Relocation info is located at the end of the buffer and not part of the // instructions area. @@ -93,7 +100,8 @@ class CodeDesc { int unwinding_info_offset_relative() const { // TODO(jgruber,v8:11036): Remove this function once unwinding_info setup // is more consistent with other metadata tables. - return code_comments_offset_relative() + code_comments_size; + return builtin_jump_table_info_offset_relative() + + builtin_jump_table_info_size; } Assembler* origin = nullptr; diff --git a/deps/v8/src/codegen/code-stub-assembler-inl.h b/deps/v8/src/codegen/code-stub-assembler-inl.h index 4a51a0d00f2798..e50133901daf15 100644 --- a/deps/v8/src/codegen/code-stub-assembler-inl.h +++ b/deps/v8/src/codegen/code-stub-assembler-inl.h @@ -15,6 +15,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + template TNode CodeStubAssembler::Call(TNode context, TNode callable, @@ -241,6 +243,9 @@ TNode CodeStubAssembler::FastCloneJSObject( return target; } +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 + #endif // V8_CODEGEN_CODE_STUB_ASSEMBLER_INL_H_ diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 917547e43918c4..bd2c37d3f10b01 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -43,6 +43,15 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + +#ifdef DEBUG +#define CSA_DCHECK_BRANCH(csa, gen, ...) \ + (csa)->Dcheck(gen, #gen, __FILE__, __LINE__, CSA_DCHECK_ARGS(__VA_ARGS__)) +#else +#define CSA_DCHECK_BRANCH(csa, ...) ((void)0) +#endif + namespace { Builtin BigIntComparisonBuiltinOf(Operation const& op) { @@ -1912,8 +1921,8 @@ TNode CodeStubAssembler::ResolveJSDispatchHandle( TNode table = ExternalConstant(ExternalReference::js_dispatch_table_address()); TNode offset = ComputeJSDispatchTableEntryOffset(handle); - offset = UintPtrAdd(offset, - UintPtrConstant(JSDispatchTable::kEntryCodeObjectOffset)); + offset = + UintPtrAdd(offset, UintPtrConstant(JSDispatchEntry::kCodeObjectOffset)); TNode value = Load(table, offset); // The LSB is used as marking bit by the js dispatch table, so here we have // to set it using a bitwise OR as it may or may not be set. @@ -3489,7 +3498,13 @@ TNode CodeStubAssembler::LoadJSFunctionPrototype( } TNode CodeStubAssembler::LoadJSFunctionCode(TNode function) { +#ifdef V8_ENABLE_LEAPTIERING + TNode dispatch_handle = LoadObjectField( + function, JSFunction::kDispatchHandleOffset); + return ResolveJSDispatchHandle(dispatch_handle); +#else return LoadCodePointerFromObject(function, JSFunction::kCodeOffset); +#endif // V8_ENABLE_LEAPTIERING } TNode CodeStubAssembler::LoadSharedFunctionInfoTrustedData( @@ -12255,7 +12270,7 @@ TNode CodeStubAssembler::LoadClosureFeedbackArray( TVARIABLE(HeapObject, feedback_cell_array, LoadFeedbackCellValue(closure)); Label end(this); - // When feedback vectors are not yet allocated feedback cell contains a + // When feedback vectors are not yet allocated feedback cell contains // an array of feedback cells used by create closures. GotoIf(HasInstanceType(feedback_cell_array.value(), CLOSURE_FEEDBACK_CELL_ARRAY_TYPE), @@ -13509,14 +13524,12 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, TNode CodeStubAssembler::MemoryChunkFromAddress( TNode address) { - DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); return WordAnd(address, IntPtrConstant(~MemoryChunk::GetAlignmentMaskForAssembler())); } TNode CodeStubAssembler::PageMetadataFromMemoryChunk( TNode address) { - DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); #ifdef V8_ENABLE_SANDBOX TNode table = ExternalConstant( ExternalReference::memory_chunk_metadata_table_address()); @@ -13541,7 +13554,6 @@ TNode CodeStubAssembler::PageMetadataFromMemoryChunk( TNode CodeStubAssembler::PageMetadataFromAddress( TNode address) { - DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); return PageMetadataFromMemoryChunk(MemoryChunkFromAddress(address)); } @@ -16942,28 +16954,11 @@ TNode CodeStubAssembler::LoadBuiltin(TNode builtin_id) { #ifdef V8_ENABLE_LEAPTIERING TNode CodeStubAssembler::LoadBuiltinDispatchHandle( - Builtin builtin) { - return LoadBuiltinDispatchHandle( - SmiConstant(JSBuiltinDispatchHandleRoot::to_idx(builtin))); -} - -TNode CodeStubAssembler::LoadBuiltinDispatchHandle( - RootIndex root_idx) { - return LoadBuiltinDispatchHandle( - SmiConstant(JSBuiltinDispatchHandleRoot::to_idx(root_idx))); -} - -TNode CodeStubAssembler::LoadBuiltinDispatchHandle( - TNode builtin_id) { - CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount))); - - TNode offset = - ElementOffsetFromIndex(SmiToBInt(builtin_id), PACKED_SMI_ELEMENTS); - - TNode table = - IsolateField(IsolateFieldId::kBuiltinDispatchTable); - - return Load(table, offset); + JSBuiltinDispatchHandleRoot::Idx dispatch_root_idx) { + static_assert(Isolate::kBuiltinDispatchHandlesAreStatic); + DCHECK_LT(dispatch_root_idx, JSBuiltinDispatchHandleRoot::Idx::kCount); + return ReinterpretCast( + Uint32Constant(isolate()->builtin_dispatch_handle(dispatch_root_idx))); } #endif // V8_ENABLE_LEAPTIERING @@ -17144,19 +17139,23 @@ TNode CodeStubAssembler::IsMarkedForDeoptimization(TNode code) { LoadObjectField(code, Code::kFlagsOffset)); } -TNode CodeStubAssembler::AllocateFunctionWithContext( - TNode shared_info, -#ifdef V8_ENABLE_LEAPTIERING - TNode dispatch_handle, -#endif - TNode context) { - const TNode code = GetSharedFunctionInfoCode(shared_info); - const TNode map = CAST( - LoadContextElement(LoadNativeContext(context), - Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); +TNode CodeStubAssembler::AllocateRootFunctionWithContext( + RootIndex function, TNode context, + std::optional> maybe_native_context) { + DCHECK_GE(function, RootIndex::kFirstBuiltinWithSfiRoot); + DCHECK_LE(function, RootIndex::kLastBuiltinWithSfiRoot); + DCHECK(v8::internal::IsSharedFunctionInfo( + isolate()->root(function).GetHeapObject())); + Tagged sfi = v8::internal::Cast( + isolate()->root(function).GetHeapObject()); + const TNode sfi_obj = + UncheckedCast(LoadRoot(function)); + const TNode native_context = + maybe_native_context ? *maybe_native_context : LoadNativeContext(context); + const TNode map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); const TNode fun = Allocate(JSFunction::kSizeWithoutPrototype); - static_assert(JSFunction::kSizeWithoutPrototype == - (7 + V8_ENABLE_LEAPTIERING_BOOL) * kTaggedSize); + static_assert(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize); StoreMapNoWriteBarrier(fun, map); StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset, RootIndex::kEmptyFixedArray); @@ -17165,13 +17164,23 @@ TNode CodeStubAssembler::AllocateFunctionWithContext( StoreObjectFieldRoot(fun, JSFunction::kFeedbackCellOffset, RootIndex::kManyClosuresCell); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset, - shared_info); + sfi_obj); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context); - StoreCodePointerFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, code); + // For the native closures that are initialized here we statically know their + // builtin id, so there's no need to use + // CodeStubAssembler::GetSharedFunctionInfoCode(). + DCHECK(sfi->HasBuiltinId()); #ifdef V8_ENABLE_LEAPTIERING - CSA_DCHECK(this, TaggedEqual(code, ResolveJSDispatchHandle(dispatch_handle))); + const TNode dispatch_handle = + LoadBuiltinDispatchHandle(function); + CSA_DCHECK(this, TaggedEqual(LoadBuiltin(SmiConstant(sfi->builtin_id())), + ResolveJSDispatchHandle(dispatch_handle))); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kDispatchHandleOffset, dispatch_handle); + USE(sfi); +#else + const TNode code = LoadBuiltin(SmiConstant(sfi->builtin_id())); + StoreCodePointerFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, code); #endif // V8_ENABLE_LEAPTIERING return CAST(fun); @@ -18595,64 +18604,6 @@ TNode CodeStubAssembler::ArrayListElements(TNode array) { return elements; } -#if V8_ENABLE_WEBASSEMBLY -TNode CodeStubAssembler::SwitchToTheCentralStack() { - TNode stack_limit_slot = IntPtrAdd( - LoadFramePointer(), - IntPtrConstant(WasmToJSWrapperConstants::kSecondaryStackLimitOffset)); - - TNode do_switch = ExternalConstant( - ExternalReference::wasm_switch_to_the_central_stack_for_js()); - TNode central_stack_sp = TNode::UncheckedCast(CallCFunction( - do_switch, MachineType::Pointer(), - std::make_pair(MachineType::Pointer(), - ExternalConstant(ExternalReference::isolate_address())), - std::make_pair(MachineType::Pointer(), stack_limit_slot))); - - TNode old_sp = LoadStackPointer(); - SetStackPointer(central_stack_sp); - StoreNoWriteBarrier( - MachineType::PointerRepresentation(), LoadFramePointer(), - IntPtrConstant(WasmToJSWrapperConstants::kCentralStackSPOffset), - central_stack_sp); - return old_sp; -} - -void CodeStubAssembler::SwitchFromTheCentralStack(TNode old_sp) { - TNode stack_limit = Load( - LoadFramePointer(), - IntPtrConstant(WasmToJSWrapperConstants::kSecondaryStackLimitOffset)); - - TNode do_switch = ExternalConstant( - ExternalReference::wasm_switch_from_the_central_stack_for_js()); - CallCFunction( - do_switch, MachineType::Pointer(), - std::make_pair(MachineType::Pointer(), - ExternalConstant(ExternalReference::isolate_address())), - std::make_pair(MachineType::Pointer(), stack_limit)); - - StoreNoWriteBarrier( - MachineType::PointerRepresentation(), LoadFramePointer(), - IntPtrConstant(WasmToJSWrapperConstants::kCentralStackSPOffset), - IntPtrConstant(0)); - SetStackPointer(old_sp); -} - -TNode CodeStubAssembler::SwitchToTheCentralStackIfNeeded() { - TVARIABLE(RawPtrT, old_sp, PointerConstant(nullptr)); - Label no_switch(this); - Label end(this); // -> return value of the call (kTaggedPointer) - TNode is_on_central_stack_flag = LoadUint8FromRootRegister( - IntPtrConstant(IsolateData::is_on_central_stack_flag_offset())); - GotoIf(is_on_central_stack_flag, &no_switch); - old_sp = SwitchToTheCentralStack(); - Goto(&no_switch); - Bind(&no_switch); - return old_sp.value(); -} - -#endif - TNode CodeStubAssembler::IsMarked(TNode object) { TNode cell; TNode mask; @@ -18692,5 +18643,9 @@ void CodeStubAssembler::GetMarkBit(TNode object, TNode* cell, } } +#undef CSA_DCHECK_BRANCH + +#include "src/codegen/undef-code-stub-assembler-macros.inc" + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 8055f450365703..7902e08b163436 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -48,6 +48,8 @@ namespace v8 { namespace internal { +#include "src/codegen/define-code-stub-assembler-macros.inc" + class CallInterfaceDescriptor; class CodeStubArguments; class CodeStubAssembler; @@ -56,102 +58,6 @@ class StubCache; enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; -#ifdef DEBUG -#define CSA_CHECK(csa, x) \ - (csa)->Check([&]() -> TNode { return x; }, #x, __FILE__, __LINE__) -#else -#define CSA_CHECK(csa, x) (csa)->FastCheck(x) -#endif - -#define CSA_CHECK_WITH_ABORT(csa, x) \ - (csa)->Check([&]() -> TNode { return x; }, #x, __FILE__, __LINE__) - -// This is a check that always calls into the runtime if it aborts. -// This also exits silently when --hole-fuzzing is enabled. -#define CSA_HOLE_SECURITY_CHECK(csa, x) CSA_CHECK_WITH_ABORT(csa, x) - -#ifdef DEBUG -// CSA_DCHECK_ARGS generates an -// std::initializer_list from __VA_ARGS__. It -// currently supports between 0 and 2 arguments. - -// clang-format off -#define CSA_DCHECK_0_ARGS(...) {} -#define CSA_DCHECK_1_ARG(a, ...) {{a, #a}} -#define CSA_DCHECK_2_ARGS(a, b, ...) {{a, #a}, {b, #b}} -// clang-format on -#define SWITCH_CSA_DCHECK_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b) -#define CSA_DCHECK_ARGS(...) \ - CALL(SWITCH_CSA_DCHECK_ARGS, (, ##__VA_ARGS__, CSA_DCHECK_2_ARGS, \ - CSA_DCHECK_1_ARG, CSA_DCHECK_0_ARGS)) -// Workaround for MSVC to skip comma in empty __VA_ARGS__. -#define CALL(x, y) x y - -// CSA_DCHECK(csa, , ) - -#define CSA_DCHECK(csa, condition_node, ...) \ - (csa)->Dcheck(condition_node, #condition_node, __FILE__, __LINE__, \ - CSA_DCHECK_ARGS(__VA_ARGS__)) - -// CSA_DCHECK_BRANCH(csa, [](Label* ok, Label* not_ok) {...}, -// ) - -#define CSA_DCHECK_BRANCH(csa, gen, ...) \ - (csa)->Dcheck(gen, #gen, __FILE__, __LINE__, CSA_DCHECK_ARGS(__VA_ARGS__)) - -#define CSA_DCHECK_JS_ARGC_OP(csa, Op, op, expected) \ - (csa)->Dcheck( \ - [&]() -> TNode { \ - const TNode argc = (csa)->UncheckedParameter( \ - Descriptor::kJSActualArgumentsCount); \ - return (csa)->Op(argc, \ - (csa)->Int32Constant(i::JSParameterCount(expected))); \ - }, \ - "argc " #op " " #expected, __FILE__, __LINE__, \ - {{SmiFromInt32((csa)->UncheckedParameter( \ - Descriptor::kJSActualArgumentsCount)), \ - "argc"}}) - -#define CSA_DCHECK_JS_ARGC_EQ(csa, expected) \ - CSA_DCHECK_JS_ARGC_OP(csa, Word32Equal, ==, expected) - -#define CSA_DEBUG_INFO(name) \ - { #name, __FILE__, __LINE__ } -#define BIND(label) Bind(label, CSA_DEBUG_INFO(label)) -#define TYPED_VARIABLE_DEF(type, name, ...) \ - TVariable name(CSA_DEBUG_INFO(name), __VA_ARGS__) -#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) \ - name(CSA_DEBUG_INFO(name), __VA_ARGS__) -#else // DEBUG -#define CSA_DCHECK(csa, ...) ((void)0) -#define CSA_DCHECK_BRANCH(csa, ...) ((void)0) -#define CSA_DCHECK_JS_ARGC_EQ(csa, expected) ((void)0) -#define BIND(label) Bind(label) -#define TYPED_VARIABLE_DEF(type, name, ...) TVariable name(__VA_ARGS__) -#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) name(__VA_ARGS__) -#endif // DEBUG - -#define TVARIABLE(...) EXPAND(TYPED_VARIABLE_DEF(__VA_ARGS__, this)) -#define TVARIABLE_CONSTRUCTOR(...) \ - EXPAND(TYPED_VARIABLE_CONSTRUCTOR(__VA_ARGS__, this)) - -#ifdef ENABLE_SLOW_DCHECKS -#define CSA_SLOW_DCHECK(csa, ...) \ - if (v8_flags.enable_slow_asserts) { \ - CSA_DCHECK(csa, __VA_ARGS__); \ - } -#else -#define CSA_SLOW_DCHECK(csa, ...) ((void)0) -#endif - -// Similar to SBXCHECK in C++, these become a CSA_CHECK in sandbox-enabled -// builds, otherwise a CSA_DCHECK. -#ifdef V8_ENABLE_SANDBOX -#define CSA_SBXCHECK(csa, ...) CSA_CHECK(csa, __VA_ARGS__) -#else -#define CSA_SBXCHECK(csa, ...) CSA_DCHECK(csa, __VA_ARGS__) -#endif - // Provides JavaScript-specific "macro-assembler" functionality on top of the // CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler, // it's possible to add JavaScript-specific useful CodeAssembler "macros" @@ -737,8 +643,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void DCheckReceiver(ConvertReceiverMode mode, TNode receiver); // The following Call wrappers call an object according to the semantics that - // one finds in the EcmaScript spec, operating on an Callable (e.g. a - // JSFunction or proxy) rather than a InstructionStream object. + // one finds in the ECMAScript spec, operating on a Callable (e.g. a + // JSFunction or proxy) rather than an InstructionStream object. template inline TNode Call(TNode context, TNode callable, ConvertReceiverMode mode, TNode receiver, @@ -1136,9 +1042,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler #if V8_ENABLE_WEBASSEMBLY // Returns WasmTrustedInstanceData|Smi. - TNode LoadInstanceDataFromWasmImportData(TNode ref) { + TNode LoadInstanceDataFromWasmImportData( + TNode import_data) { return LoadProtectedPointerField( - ref, WasmImportData::kProtectedInstanceDataOffset); + import_data, WasmImportData::kProtectedInstanceDataOffset); } // Returns WasmImportData or WasmTrustedInstanceData. @@ -4171,9 +4078,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler #ifdef V8_ENABLE_LEAPTIERING // Load a builtin's handle into the JSDispatchTable. - TNode LoadBuiltinDispatchHandle(Builtin builtin); - TNode LoadBuiltinDispatchHandle(RootIndex idx); - TNode LoadBuiltinDispatchHandle(TNode builtin_id); + TNode LoadBuiltinDispatchHandle( + JSBuiltinDispatchHandleRoot::Idx dispatch_root_idx); + inline TNode LoadBuiltinDispatchHandle(RootIndex idx) { + return LoadBuiltinDispatchHandle(JSBuiltinDispatchHandleRoot::to_idx(idx)); + } // Load a Code object from the JSDispatchTable. TNode ResolveJSDispatchHandle(TNode dispatch_handle); @@ -4190,26 +4099,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TVariable* data_type_out = nullptr, Label* if_compile_lazy = nullptr); - TNode AllocateFunctionWithContext( - TNode shared_info, -#ifdef V8_ENABLE_LEAPTIERING - TNode dispatch_handle, -#endif - TNode context); - TNode AllocateRootFunctionWithContext(RootIndex function, - TNode context) { - return AllocateFunctionWithContext( - UncheckedCast(LoadRoot(function)), -#ifdef V8_ENABLE_LEAPTIERING - LoadBuiltinDispatchHandle(function), -#endif - context); - } + TNode AllocateRootFunctionWithContext( + RootIndex function, TNode context, + std::optional> maybe_native_context); // Used from Torque because Torque - TNode AllocateRootFunctionWithContext(intptr_t function, - TNode context) { + TNode AllocateRootFunctionWithContext( + intptr_t function, TNode context, + TNode native_context) { return AllocateRootFunctionWithContext(static_cast(function), - context); + context, native_context); } // Promise helpers @@ -4601,15 +4499,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode property_details, Label* needs_resize); - // If the current code is running on a secondary stack, move the stack pointer - // to the central stack (but not the frame pointer) and adjust the stack - // limit. Returns the old stack pointer, or nullptr if no switch was - // performed. - TNode SwitchToTheCentralStackIfNeeded(); - // Switch the SP back to the secondary stack after switching to the central - // stack. - void SwitchFromTheCentralStack(TNode old_sp); - TNode IsMarked(TNode object); void GetMarkBit(TNode object, TNode* cell, @@ -4786,8 +4675,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void EmitElementStoreTypedArrayUpdateValue( TNode value, ElementsKind elements_kind, TNode converted_value, TVariable* maybe_converted_value); - - TNode SwitchToTheCentralStack(); }; class V8_EXPORT_PRIVATE CodeStubArguments { @@ -4964,9 +4851,12 @@ DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags) inline TNode CodeStubAssembler::GetClassMapConstant() { \ return class_name##MapConstant(); \ } - UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(CLASS_MAP_CONSTANT_ADAPTER, _) +#undef CLASS_MAP_CONSTANT_ADAPTER + +#include "src/codegen/undef-code-stub-assembler-macros.inc" } // namespace internal } // namespace v8 + #endif // V8_CODEGEN_CODE_STUB_ASSEMBLER_H_ diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index ce82b744f327d5..4215cee6ae7335 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -1338,14 +1338,7 @@ MaybeHandle GetOrCompileOptimized( } // Do not optimize when debugger needs to hook into every call. - if (isolate->debug()->needs_check_on_function_call()) { - // Reset the OSR urgency to avoid triggering this compilation request on - // every iteration and thereby skipping other interrupts. - if (IsOSR(osr_offset)) { - function->feedback_vector()->reset_osr_urgency(); - } - return {}; - } + if (isolate->debug()->needs_check_on_function_call()) return {}; // Do not optimize if we need to be able to set break points. if (shared->HasBreakInfo(isolate)) return {}; @@ -2339,10 +2332,6 @@ void BackgroundMergeTask::BeginMergeInBackground( } forwarder.AddBytecodeArray(new_sfi->GetBytecodeArray(isolate)); } - // TODO(355575275): We shouldn't be using the new sfi, so its script - // field shouldn't matter -- but there seems to be some cases where we - // do, so stay robust and set it. Remove this once this bug is fixed. - new_sfi->set_script(*old_script, kReleaseStore); } else { // The old script didn't have a SharedFunctionInfo for this function // literal, so it can use the new SharedFunctionInfo. @@ -2476,32 +2465,26 @@ Handle BackgroundMergeTask::CompleteMergeInForeground( SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, result); } - { - // TODO(355575275): Extra validation code to try to find a bug. Remove after - // fixing. - for (int i = 0; i < old_script->infos()->length(); ++i) { - Tagged maybe_sfi = old_script->infos()->get(i); - if (maybe_sfi.IsWeak() && - Is(maybe_sfi.GetHeapObjectAssumeWeak())) { - Tagged sfi = - Cast(maybe_sfi.GetHeapObjectAssumeWeak()); - - // Check that the SFI has the right script. - if (sfi->script() != *old_script) { - isolate->PushStackTraceAndContinue( - reinterpret_cast(sfi.ptr()), - reinterpret_cast(old_script->ptr()), - reinterpret_cast(new_script->ptr()), - reinterpret_cast(old_script->infos()->ptr() + - WeakFixedArray::OffsetOfElementAt(i)), - reinterpret_cast(new_script->infos()->ptr() + - WeakFixedArray::OffsetOfElementAt(i))); - } + if (v8_flags.verify_code_merge) { + // Check that: + // * There aren't any duplicate scope info. Every scope/context should + // correspond to at most one scope info. + // * All published SFIs refer to the old script (i.e. we chose new vs old + // correctly, and updated new SFIs where needed). + // * All constant pool SFI entries point to an SFI referring to the old + // script (i.e. references were updated correctly). + std::unordered_map> scope_infos; + for (int i = 0; i < old_script->infos()->length(); i++) { + Tagged scope_info; + if (!old_script->infos()->get(i).IsWeak()) continue; + Tagged info = + old_script->infos()->get(i).GetHeapObjectAssumeWeak(); + if (Is(info)) { + Tagged old_sfi = Cast(info); + CHECK_EQ(old_sfi->script(), *old_script); - // Check that all SFIs in the bytecode array's constant pool are from - // the same script. - if (sfi->HasBytecodeArray()) { - Tagged bytecode = sfi->GetBytecodeArray(isolate); + if (old_sfi->HasBytecodeArray()) { + Tagged bytecode = old_sfi->GetBytecodeArray(isolate); Tagged constant_pool = bytecode->constant_pool(); for (int i = 0; i < constant_pool->length(); ++i) { Tagged entry = constant_pool->get(i); @@ -2509,52 +2492,12 @@ Handle BackgroundMergeTask::CompleteMergeInForeground( Tagged inner_sfi = Cast(entry); int id = inner_sfi->function_literal_id(); - if (MakeWeak(inner_sfi) != old_script->infos()->get(id)) { - isolate->PushStackTraceAndContinue( - reinterpret_cast(sfi.ptr()), - reinterpret_cast(inner_sfi.ptr()), - reinterpret_cast(old_script->ptr()), - reinterpret_cast(new_script->ptr()), - reinterpret_cast( - old_script->infos()->ptr() + - WeakFixedArray::OffsetOfElementAt(id)), - reinterpret_cast( - new_script->infos()->ptr() + - WeakFixedArray::OffsetOfElementAt(id))); - } - - if (inner_sfi->script() != *old_script) { - isolate->PushStackTraceAndContinue( - reinterpret_cast(sfi.ptr()), - reinterpret_cast(inner_sfi.ptr()), - reinterpret_cast(old_script->ptr()), - reinterpret_cast(new_script->ptr()), - reinterpret_cast( - old_script->infos()->ptr() + - WeakFixedArray::OffsetOfElementAt(id)), - reinterpret_cast( - new_script->infos()->ptr() + - WeakFixedArray::OffsetOfElementAt(id))); - } + CHECK_EQ(MakeWeak(inner_sfi), old_script->infos()->get(id)); + CHECK_EQ(inner_sfi->script(), *old_script); } } } - } - } - } - if (v8_flags.verify_code_merge) { - // Check that there aren't any duplicate scope infos. Every scope/context - // should correspond to at most one scope info. - std::unordered_map> scope_infos; - for (int i = 0; i < old_script->infos()->length(); i++) { - Tagged scope_info; - if (!old_script->infos()->get(i).IsWeak()) continue; - Tagged info = - old_script->infos()->get(i).GetHeapObjectAssumeWeak(); - if (Is(info)) { - Tagged old_sfi = Cast(info); - CHECK_EQ(old_sfi->script(), *old_script); if (!old_sfi->scope_info()->IsEmpty()) { scope_info = old_sfi->scope_info(); } else if (old_sfi->HasOuterScopeInfo()) { @@ -2617,16 +2560,6 @@ MaybeHandle BackgroundCompileTask::FinalizeScript( Handle result = merge.CompleteMergeInForeground(isolate, script); maybe_result = result; - - { - // TODO(355575275): We shouldn't be using the new script, so its source - // and origin options shouldn't matter -- but there seems to be some cases - // where we do, so stay robust and set them. Remove this once this bug is - // fixed. - Script::SetSource(isolate, script, source); - script->set_origin_options(origin_options); - } - script = handle(Cast