diff --git a/common.gypi b/common.gypi index b2ea540133f0af..71862791dae3be 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.13', + 'v8_embedder_string': '-node.20', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h index 62917ab0a3456a..7acdf635c90ef0 100644 --- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -766,7 +766,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uint32_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { // Offsets >=2GB are statically OOB on 32-bit systems. DCHECK_LE(offset_imm, std::numeric_limits::max()); liftoff::LoadInternal(this, dst, src_addr, offset_reg, diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 39ef8528e5267a..bea5100ef3e9f8 100644 --- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -126,13 +126,23 @@ inline CPURegister AcquireByType(UseScratchRegisterScope* temps, template inline MemOperand GetMemOp(LiftoffAssembler* assm, UseScratchRegisterScope* temps, Register addr, - Register offset, T offset_imm) { + Register offset, T offset_imm, + bool i64_offset = false) { if (offset.is_valid()) { - if (offset_imm == 0) return MemOperand(addr.X(), offset.W(), UXTW); - Register tmp = temps->AcquireX(); + if (offset_imm == 0) { + return i64_offset ? MemOperand(addr.X(), offset.X()) + : MemOperand(addr.X(), offset.W(), UXTW); + } DCHECK_GE(kMaxUInt32, offset_imm); - assm->Add(tmp, offset.X(), offset_imm); - return MemOperand(addr.X(), tmp); + if (i64_offset) { + Register tmp = temps->AcquireX(); + assm->Add(tmp, offset.X(), offset_imm); + return MemOperand(addr.X(), tmp); + } else { + Register tmp = temps->AcquireW(); + assm->Add(tmp, offset.W(), offset_imm); + return MemOperand(addr.X(), tmp, UXTW); + } } return MemOperand(addr.X(), offset_imm); } @@ -490,10 +500,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { UseScratchRegisterScope temps(this); - MemOperand src_op = - liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm); + MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg, + offset_imm, i64_offset); if (protected_load_pc) *protected_load_pc = pc_offset(); switch (type.value()) { case LoadType::kI32Load8U: @@ -1333,7 +1344,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - if (src != dst) Mov(dst.gp().W(), src.gp().W()); + Mov(dst.gp().W(), src.gp().W()); return true; case kExprI32SConvertF32: Fcvtzs(dst.gp().W(), src.fp().S()); // f32 -> i32 round to zero. diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 83b00d4a2ad7db..e597467c7342c7 100644 --- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -388,7 +388,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uint32_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { // Offsets >=2GB are statically OOB on 32-bit systems. DCHECK_LE(offset_imm, std::numeric_limits::max()); DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair()); diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h index 3090bc81659779..dbff396f82bc63 100644 --- a/deps/v8/src/wasm/baseline/liftoff-assembler.h +++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h @@ -669,7 +669,7 @@ class LiftoffAssembler : public TurboAssembler { inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, uint32_t* protected_load_pc = nullptr, - bool is_load_mem = false); + bool is_load_mem = false, bool i64_offset = false); inline void Store(Register dst_addr, Register offset_reg, uintptr_t offset_imm, LiftoffRegister src, StoreType type, LiftoffRegList pinned, diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc index a26df17225204f..84d217b2e42163 100644 --- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc +++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc @@ -2767,33 +2767,75 @@ class LiftoffCompiler { return index; } + bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot, + int access_size, uintptr_t* offset) { + if (!index_slot.is_const()) return false; + + // Potentially zero extend index (which is a 32-bit constant). + const uintptr_t index = static_cast(index_slot.i32_const()); + const uintptr_t effective_offset = index + *offset; + + if (effective_offset < index // overflow + || !base::IsInBounds(effective_offset, access_size, + env_->min_memory_size)) { + return false; + } + + *offset = effective_offset; + return true; + } + void LoadMem(FullDecoder* decoder, LoadType type, const MemoryAccessImmediate& imm, const Value& index_val, Value* result) { ValueKind kind = type.value_type().kind(); + RegClass rc = reg_class_for(kind); if (!CheckSupportedType(decoder, kind, "load")) return; - LiftoffRegister full_index = __ PopToRegister(); - Register index = BoundsCheckMem(decoder, type.size(), imm.offset, - full_index, {}, kDontForceCheck); - if (index == no_reg) return; uintptr_t offset = imm.offset; - LiftoffRegList pinned = LiftoffRegList::ForRegs(index); - index = AddMemoryMasking(index, &offset, &pinned); - DEBUG_CODE_COMMENT("load from memory"); - Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); - LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); - RegClass rc = reg_class_for(kind); - LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); - uint32_t protected_load_pc = 0; - __ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true); - if (env_->use_trap_handler) { - AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, - protected_load_pc); + Register index = no_reg; + + // Only look at the slot, do not pop it yet (will happen in PopToRegister + // below, if this is not a statically-in-bounds index). + auto& index_slot = __ cache_state()->stack_state.back(); + bool i64_offset = index_val.type == kWasmI64; + if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) { + __ cache_state()->stack_state.pop_back(); + DEBUG_CODE_COMMENT("load from memory (constant offset)"); + LiftoffRegList pinned; + Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); + LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned); + LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); + __ Load(value, mem, no_reg, offset, type, pinned, nullptr, true, + i64_offset); + __ PushRegister(kind, value); + } else { + LiftoffRegister full_index = __ PopToRegister(); + index = BoundsCheckMem(decoder, type.size(), offset, full_index, {}, + kDontForceCheck); + if (index == no_reg) return; + + DEBUG_CODE_COMMENT("load from memory"); + LiftoffRegList pinned = LiftoffRegList::ForRegs(index); + index = AddMemoryMasking(index, &offset, &pinned); + + // Load the memory start address only now to reduce register pressure + // (important on ia32). + Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); + LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned); + LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned)); + + uint32_t protected_load_pc = 0; + __ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true, + i64_offset); + if (env_->use_trap_handler) { + AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, + protected_load_pc); + } + __ PushRegister(kind, value); } - __ PushRegister(kind, value); - if (FLAG_trace_wasm_memory) { + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) { TraceMemoryOperation(false, type.mem_type().representation(), index, offset, decoder->position()); } @@ -2836,7 +2878,7 @@ class LiftoffCompiler { } __ PushRegister(kS128, value); - if (FLAG_trace_wasm_memory) { + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) { // Again load extend is different. MachineRepresentation mem_rep = transform == LoadTransformationKind::kExtend @@ -2878,7 +2920,7 @@ class LiftoffCompiler { __ PushRegister(kS128, result); - if (FLAG_trace_wasm_memory) { + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) { TraceMemoryOperation(false, type.mem_type().representation(), index, offset, decoder->position()); } @@ -2889,29 +2931,45 @@ class LiftoffCompiler { const Value& index_val, const Value& value_val) { ValueKind kind = type.value_type().kind(); if (!CheckSupportedType(decoder, kind, "store")) return; + LiftoffRegList pinned; LiftoffRegister value = pinned.set(__ PopToRegister()); - LiftoffRegister full_index = __ PopToRegister(pinned); - Register index = BoundsCheckMem(decoder, type.size(), imm.offset, - full_index, pinned, kDontForceCheck); - if (index == no_reg) return; uintptr_t offset = imm.offset; - pinned.set(index); - index = AddMemoryMasking(index, &offset, &pinned); - DEBUG_CODE_COMMENT("store to memory"); - Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); - LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); - uint32_t protected_store_pc = 0; - LiftoffRegList outer_pinned; - if (FLAG_trace_wasm_memory) outer_pinned.set(index); - __ Store(addr, index, offset, value, type, outer_pinned, - &protected_store_pc, true); - if (env_->use_trap_handler) { - AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, - protected_store_pc); + Register index = no_reg; + + auto& index_slot = __ cache_state()->stack_state.back(); + if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) { + __ cache_state()->stack_state.pop_back(); + DEBUG_CODE_COMMENT("store to memory (constant offset)"); + Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); + LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned); + __ Store(mem, no_reg, offset, value, type, pinned, nullptr, true); + } else { + LiftoffRegister full_index = __ PopToRegister(pinned); + index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index, + pinned, kDontForceCheck); + if (index == no_reg) return; + + pinned.set(index); + index = AddMemoryMasking(index, &offset, &pinned); + DEBUG_CODE_COMMENT("store to memory"); + uint32_t protected_store_pc = 0; + // Load the memory start address only now to reduce register pressure + // (important on ia32). + Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); + LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned); + LiftoffRegList outer_pinned; + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index); + __ Store(mem, index, offset, value, type, outer_pinned, + &protected_store_pc, true); + if (env_->use_trap_handler) { + AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, + protected_store_pc); + } } - if (FLAG_trace_wasm_memory) { + + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) { TraceMemoryOperation(true, type.mem_rep(), index, offset, decoder->position()); } @@ -2940,7 +2998,7 @@ class LiftoffCompiler { AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, protected_store_pc); } - if (FLAG_trace_wasm_memory) { + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) { TraceMemoryOperation(true, type.mem_rep(), index, offset, decoder->position()); } @@ -4156,9 +4214,9 @@ class LiftoffCompiler { Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); LiftoffRegList outer_pinned; - if (FLAG_trace_wasm_memory) outer_pinned.set(index); + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index); __ AtomicStore(addr, index, offset, value, type, outer_pinned); - if (FLAG_trace_wasm_memory) { + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) { TraceMemoryOperation(true, type.mem_rep(), index, offset, decoder->position()); } @@ -4184,7 +4242,7 @@ class LiftoffCompiler { __ AtomicLoad(value, addr, index, offset, type, pinned); __ PushRegister(kind, value); - if (FLAG_trace_wasm_memory) { + if (V8_UNLIKELY(FLAG_trace_wasm_memory)) { TraceMemoryOperation(false, type.mem_type().representation(), index, offset, decoder->position()); } diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h index ca715a8a328114..d078fd5e429429 100644 --- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h +++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h @@ -491,7 +491,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uint32_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { Register src = no_reg; if (offset_reg != no_reg) { src = GetUnusedRegister(kGpReg, pinned).gp(); diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index a5a9f8ce231b46..dfbd8d6a752ee1 100644 --- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -470,7 +470,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); if (protected_load_pc) *protected_load_pc = pc_offset(); diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index 4e99821a27d563..bedee1a939c007 100644 --- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -137,7 +137,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { bailout(kUnsupportedArchitecture, "Load"); } diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h index 47f8ce2125d439..bb6c3bcad886a1 100644 --- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h +++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h @@ -446,7 +446,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); if (protected_load_pc) *protected_load_pc = pc_offset(); diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h index 8560c91553f8cc..04f30939fdbab6 100644 --- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h +++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h @@ -277,11 +277,17 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { UseScratchRegisterScope temps(this); if (!is_int20(offset_imm)) { mov(ip, Operand(offset_imm)); if (offset_reg != no_reg) { + if (!i64_offset) { + // Clear the upper 32 bits of the 64 bit offset register. + llgfr(r0, offset_reg); + offset_reg = r0; + } AddS64(ip, offset_reg); } offset_reg = ip; diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h index e8a57bafca1f35..68619a9f1b3e49 100644 --- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -389,7 +389,11 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Register offset_reg, uintptr_t offset_imm, LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem) { + uint32_t* protected_load_pc, bool is_load_mem, + bool i64_offset) { + if (offset_reg != no_reg && !i64_offset) { + AssertZeroExtended(offset_reg); + } Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); if (protected_load_pc) *protected_load_pc = pc_offset(); switch (type.value()) { diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-11809.js b/deps/v8/test/mjsunit/regress/wasm/regress-11809.js new file mode 100644 index 00000000000000..eef8c291f6e6db --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-11809.js @@ -0,0 +1,64 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --enable-testing-opcode-in-wasm --nowasm-tier-up +// Flags: --wasm-tier-mask-for-testing=2 + +load("test/mjsunit/wasm/wasm-module-builder.js"); + +function InstanceMaker(offset) { + var builder = new WasmModuleBuilder(); + builder.addMemory(1, 1, false /* exported */); + + var sig_index = builder.addType(makeSig( + [kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, + kWasmI32], + [kWasmI32])); + var sig_three = builder.addType(makeSig( + [kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, + kWasmI64], + [])); + + var zero = builder.addFunction("zero", kSig_i_i); + var one = builder.addFunction("one", sig_index); + var two = builder.addFunction("two", kSig_v_i); + var three = builder.addFunction("three", sig_three).addBody([]); + + zero.addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, offset]); + + one.addBody([ + kExprLocalGet, 7, + kExprCallFunction, zero.index]); + + two.addBody([ + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10, + kExprCallFunction, three.index, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprCallFunction, one.index, + kExprDrop, + ]).exportFunc(); + + return builder.instantiate({}); +} + +var instance = InstanceMaker(0); +instance.exports.two(); + +// Regression test for crbug.com/1224882. +var instance_with_offset = InstanceMaker(4); +instance_with_offset.exports.two();