diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 2faa0b1110893d..b28c6bf23ce5b5 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,14 +1,27 @@ +2010-05-05: Version 2.2.8 + + Performance improvements in the x64 and ARM backends. + + +2010-05-03: Version 2.2.7 + + Added support for ES5 date time string format to Date.parse. + + Performance improvements in the x64 backend. + + 2010-04-28: Version 2.2.6 - Add "amd64" as recognized architecture in scons build script + Added "amd64" as recognized architecture in scons build script (by Ryan Dahl ). - Fix bug in String search and replace with very simple RegExps. + Fixed bug in String search and replace with very simple RegExps. - Fix bug in RegExp containing "\b^". + Fixed bug in RegExp containing "\b^". Performance improvements on all platforms. + 2010-04-26: Version 2.2.5 Various performance improvements (especially for ARM and x64) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index e777fa6b8bfbde..c07ba1f0cfbe70 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -3014,7 +3014,7 @@ template <> struct InternalConstants<4> { // Internal constants for 64-bit systems. template <> struct InternalConstants<8> { - static const int kStringResourceOffset = 2 * sizeof(void*); + static const int kStringResourceOffset = 3 * sizeof(void*); }; /** diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index f738a3783e873b..4709a156b4bd9f 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -3646,6 +3646,8 @@ void V8::ResumeProfilerEx(int flags, int tag) { // those modules which haven't been started prior to making a // snapshot. + // Make a GC prior to taking a snapshot. + i::Heap::CollectAllGarbage(false); // Reset snapshot flag and CPU module flags. flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU); const int current_flags = i::Logger::GetActiveProfilerModules(); @@ -4020,6 +4022,7 @@ void Debug::ProcessDebugMessages() { } Local Debug::GetDebugContext() { + EnsureInitialized("v8::Debug::GetDebugContext()"); ENTER_V8; return Utils::ToLocal(i::Debugger::GetDebugContext()); } diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 291a763fb99446..30860a1f997b0a 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -565,7 +565,7 @@ void CodeGenerator::Load(Expression* expr) { } ASSERT(has_valid_frame()); ASSERT(!has_cc()); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -1008,10 +1008,10 @@ static int BitPosition(unsigned x) { } -void CodeGenerator::VirtualFrameSmiOperation(Token::Value op, - Handle value, - bool reversed, - OverwriteMode mode) { +void CodeGenerator::SmiOperation(Token::Value op, + Handle value, + bool reversed, + OverwriteMode mode) { int int_value = Smi::cast(*value)->value(); bool something_to_inline; @@ -1232,189 +1232,6 @@ void CodeGenerator::VirtualFrameSmiOperation(Token::Value op, } -void CodeGenerator::SmiOperation(Token::Value op, - Handle value, - bool reversed, - OverwriteMode mode) { - VirtualFrame::SpilledScope spilled_scope(frame_); - // NOTE: This is an attempt to inline (a bit) more of the code for - // some possible smi operations (like + and -) when (at least) one - // of the operands is a literal smi. With this optimization, the - // performance of the system is increased by ~15%, and the generated - // code size is increased by ~1% (measured on a combination of - // different benchmarks). - - // sp[0] : operand - - int int_value = Smi::cast(*value)->value(); - - JumpTarget exit; - frame_->EmitPop(r0); - - bool something_to_inline = true; - switch (op) { - case Token::ADD: { - DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); - - __ add(r0, r0, Operand(value), SetCC); - deferred->Branch(vs); - __ tst(r0, Operand(kSmiTagMask)); - deferred->Branch(ne); - deferred->BindExit(); - break; - } - - case Token::SUB: { - DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); - - if (reversed) { - __ rsb(r0, r0, Operand(value), SetCC); - } else { - __ sub(r0, r0, Operand(value), SetCC); - } - deferred->Branch(vs); - __ tst(r0, Operand(kSmiTagMask)); - deferred->Branch(ne); - deferred->BindExit(); - break; - } - - - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: { - DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); - __ tst(r0, Operand(kSmiTagMask)); - deferred->Branch(ne); - switch (op) { - case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break; - case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break; - case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break; - default: UNREACHABLE(); - } - deferred->BindExit(); - break; - } - - case Token::SHL: - case Token::SHR: - case Token::SAR: { - if (reversed) { - something_to_inline = false; - break; - } - int shift_value = int_value & 0x1f; // least significant 5 bits - DeferredCode* deferred = - new DeferredInlineSmiOperation(op, shift_value, false, mode, r0); - __ tst(r0, Operand(kSmiTagMask)); - deferred->Branch(ne); - __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags - switch (op) { - case Token::SHL: { - if (shift_value != 0) { - __ mov(r2, Operand(r2, LSL, shift_value)); - } - // check that the *unsigned* result fits in a smi - __ add(r3, r2, Operand(0x40000000), SetCC); - deferred->Branch(mi); - break; - } - case Token::SHR: { - // LSR by immediate 0 means shifting 32 bits. - if (shift_value != 0) { - __ mov(r2, Operand(r2, LSR, shift_value)); - } - // check that the *unsigned* result fits in a smi - // neither of the two high-order bits can be set: - // - 0x80000000: high bit would be lost when smi tagging - // - 0x40000000: this number would convert to negative when - // smi tagging these two cases can only happen with shifts - // by 0 or 1 when handed a valid smi - __ and_(r3, r2, Operand(0xc0000000), SetCC); - deferred->Branch(ne); - break; - } - case Token::SAR: { - if (shift_value != 0) { - // ASR by immediate 0 means shifting 32 bits. - __ mov(r2, Operand(r2, ASR, shift_value)); - } - break; - } - default: UNREACHABLE(); - } - __ mov(r0, Operand(r2, LSL, kSmiTagSize)); - deferred->BindExit(); - break; - } - - case Token::MOD: { - if (reversed || int_value < 2 || !IsPowerOf2(int_value)) { - something_to_inline = false; - break; - } - DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); - unsigned mask = (0x80000000u | kSmiTagMask); - __ tst(r0, Operand(mask)); - deferred->Branch(ne); // Go to deferred code on non-Smis and negative. - mask = (int_value << kSmiTagSize) - 1; - __ and_(r0, r0, Operand(mask)); - deferred->BindExit(); - break; - } - - case Token::MUL: { - if (!IsEasyToMultiplyBy(int_value)) { - something_to_inline = false; - break; - } - DeferredCode* deferred = - new DeferredInlineSmiOperation(op, int_value, reversed, mode, r0); - unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; - max_smi_that_wont_overflow <<= kSmiTagSize; - unsigned mask = 0x80000000u; - while ((mask & max_smi_that_wont_overflow) == 0) { - mask |= mask >> 1; - } - mask |= kSmiTagMask; - // This does a single mask that checks for a too high value in a - // conservative way and for a non-Smi. It also filters out negative - // numbers, unfortunately, but since this code is inline we prefer - // brevity to comprehensiveness. - __ tst(r0, Operand(mask)); - deferred->Branch(ne); - MultiplyByKnownInt(masm_, r0, r0, int_value); - deferred->BindExit(); - break; - } - - default: - something_to_inline = false; - break; - } - - if (!something_to_inline) { - if (!reversed) { - frame_->EmitPush(r0); - __ mov(r0, Operand(value)); - frame_->EmitPush(r0); - GenericBinaryOperation(op, mode, int_value); - } else { - __ mov(ip, Operand(value)); - frame_->EmitPush(ip); - frame_->EmitPush(r0); - GenericBinaryOperation(op, mode, kUnknownIntValue); - } - } - - exit.Bind(); -} - - void CodeGenerator::Comparison(Condition cc, Expression* left, Expression* right, @@ -1526,9 +1343,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand, // give us a megamorphic load site. Not super, but it works. LoadAndSpill(applicand); Handle name = Factory::LookupAsciiSymbol("apply"); - __ mov(r2, Operand(name)); - __ ldr(r0, MemOperand(sp, 0)); - frame_->CallLoadIC(RelocInfo::CODE_TARGET); + frame_->CallLoadIC(name, RelocInfo::CODE_TARGET); frame_->EmitPush(r0); // Load the receiver and the existing arguments object onto the @@ -1746,12 +1561,11 @@ void CodeGenerator::VisitBlock(Block* node) { void CodeGenerator::DeclareGlobals(Handle pairs) { - VirtualFrame::SpilledScope spilled_scope(frame_); frame_->EmitPush(cp); - __ mov(r0, Operand(pairs)); - frame_->EmitPush(r0); - __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0))); - frame_->EmitPush(r0); + frame_->EmitPush(Operand(pairs)); + frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0))); + + VirtualFrame::SpilledScope spilled_scope(frame_); frame_->CallRuntime(Runtime::kDeclareGlobals, 3); // The result is discarded. } @@ -1761,7 +1575,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Declaration"); Variable* var = node->proxy()->var(); ASSERT(var != NULL); // must have been resolved @@ -1776,28 +1589,27 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { ASSERT(var->is_dynamic()); // For now, just do a runtime call. frame_->EmitPush(cp); - __ mov(r0, Operand(var->name())); - frame_->EmitPush(r0); + frame_->EmitPush(Operand(var->name())); // Declaration nodes are always declared in only two modes. ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST); PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY; - __ mov(r0, Operand(Smi::FromInt(attr))); - frame_->EmitPush(r0); + frame_->EmitPush(Operand(Smi::FromInt(attr))); // Push initial value, if any. // Note: For variables we must not push an initial value (such as // 'undefined') because we may have a (legal) redeclaration and we // must not destroy the current value. if (node->mode() == Variable::CONST) { - __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); - frame_->EmitPush(r0); + frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex); } else if (node->fun() != NULL) { - LoadAndSpill(node->fun()); + Load(node->fun()); } else { - __ mov(r0, Operand(0)); // no initial value! - frame_->EmitPush(r0); + frame_->EmitPush(Operand(0)); } + + VirtualFrame::SpilledScope spilled_scope(frame_); frame_->CallRuntime(Runtime::kDeclareContextSlot, 4); // Ignore the return value (declarations are statements). + ASSERT(frame_->height() == original_height); return; } @@ -1813,12 +1625,11 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { } if (val != NULL) { - { - // Set initial value. - Reference target(this, node->proxy()); - LoadAndSpill(val); - target.SetValue(NOT_CONST_INIT); - } + // Set initial value. + Reference target(this, node->proxy()); + Load(val); + target.SetValue(NOT_CONST_INIT); + // Get rid of the assigned value (declarations are statements). frame_->Drop(); } @@ -2904,7 +2715,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { return; } InstantiateFunction(function_info); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -2916,7 +2727,7 @@ void CodeGenerator::VisitSharedFunctionInfoLiteral( VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); InstantiateFunction(node->shared_function_info()); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -2943,7 +2754,7 @@ void CodeGenerator::VisitConditional(Conditional* node) { LoadAndSpill(node->else_expression()); if (exit.is_linked()) exit.Bind(); } - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3199,11 +3010,10 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot, // Load the global object. LoadGlobal(); // Setup the name register and call load IC. - frame_->SpillAllButCopyTOSToR0(); - __ mov(r2, Operand(slot->var()->name())); - frame_->CallLoadIC(typeof_state == INSIDE_TYPEOF - ? RelocInfo::CODE_TARGET - : RelocInfo::CODE_TARGET_CONTEXT); + frame_->CallLoadIC(slot->var()->name(), + typeof_state == INSIDE_TYPEOF + ? RelocInfo::CODE_TARGET + : RelocInfo::CODE_TARGET_CONTEXT); // Drop the global object. The result is in r0. frame_->Drop(); } @@ -3215,7 +3025,7 @@ void CodeGenerator::VisitSlot(Slot* node) { #endif Comment cmnt(masm_, "[ Slot"); LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3234,7 +3044,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { Reference ref(this, node); ref.GetValue(); } - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3246,7 +3056,7 @@ void CodeGenerator::VisitLiteral(Literal* node) { Register reg = frame_->GetTOSRegister(); __ mov(reg, Operand(node->handle())); frame_->EmitPush(reg); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3290,7 +3100,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { done.Bind(); // Push the literal. frame_->EmitPush(r2); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3371,7 +3181,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { } } } - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3430,7 +3240,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { __ mov(r3, Operand(offset)); __ RecordWrite(r1, r3, r2); } - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3446,70 +3256,318 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { LoadAndSpill(node->value()); frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); frame_->EmitPush(r0); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } -void CodeGenerator::VisitAssignment(Assignment* node) { - VirtualFrame::RegisterAllocationScope scope(this); +void CodeGenerator::EmitSlotAssignment(Assignment* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - Comment cmnt(masm_, "[ Assignment"); + Comment cmnt(masm(), "[ Variable Assignment"); + Variable* var = node->target()->AsVariableProxy()->AsVariable(); + ASSERT(var != NULL); + Slot* slot = var->slot(); + ASSERT(slot != NULL); - { Reference target(this, node->target(), node->is_compound()); - if (target.is_illegal()) { - // Fool the virtual frame into thinking that we left the assignment's - // value on the frame. - Register tos = frame_->GetTOSRegister(); - __ mov(tos, Operand(Smi::FromInt(0))); - frame_->EmitPush(tos); - ASSERT(frame_->height() == original_height + 1); - return; + // Evaluate the right-hand side. + if (node->is_compound()) { + // For a compound assignment the right-hand side is a binary operation + // between the current property value and the actual right-hand side. + LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); + + // Perform the binary operation. + Literal* literal = node->value()->AsLiteral(); + bool overwrite_value = + (node->value()->AsBinaryOperation() != NULL && + node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + if (literal != NULL && literal->handle()->IsSmi()) { + SmiOperation(node->binary_op(), + literal->handle(), + false, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); + } else { + Load(node->value()); + VirtualFrameBinaryOperation( + node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); + } + } else { + Load(node->value()); + } + + // Perform the assignment. + if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) { + CodeForSourcePosition(node->position()); + StoreToSlot(slot, + node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm(), "[ Named Property Assignment"); + Variable* var = node->target()->AsVariableProxy()->AsVariable(); + Property* prop = node->target()->AsProperty(); + ASSERT(var == NULL || (prop == NULL && var->is_global())); + + // Initialize name and evaluate the receiver sub-expression if necessary. If + // the receiver is trivial it is not placed on the stack at this point, but + // loaded whenever actually needed. + Handle name; + bool is_trivial_receiver = false; + if (var != NULL) { + name = var->name(); + } else { + Literal* lit = prop->key()->AsLiteral(); + ASSERT_NOT_NULL(lit); + name = Handle::cast(lit->handle()); + // Do not materialize the receiver on the frame if it is trivial. + is_trivial_receiver = prop->obj()->IsTrivial(); + if (!is_trivial_receiver) Load(prop->obj()); + } + + // Change to slow case in the beginning of an initialization block to + // avoid the quadratic behavior of repeatedly adding fast properties. + if (node->starts_initialization_block()) { + // Initialization block consists of assignments of the form expr.x = ..., so + // this will never be an assignment to a variable, so there must be a + // receiver object. + ASSERT_EQ(NULL, var); + if (is_trivial_receiver) { + Load(prop->obj()); + } else { + frame_->Dup(); + } + frame_->CallRuntime(Runtime::kToSlowProperties, 1); + } + + // Change to fast case at the end of an initialization block. To prepare for + // that add an extra copy of the receiver to the frame, so that it can be + // converted back to fast case after the assignment. + if (node->ends_initialization_block() && !is_trivial_receiver) { + frame_->Dup(); + } + + // Stack layout: + // [tos] : receiver (only materialized if non-trivial) + // [tos+1] : receiver if at the end of an initialization block + + // Evaluate the right-hand side. + if (node->is_compound()) { + // For a compound assignment the right-hand side is a binary operation + // between the current property value and the actual right-hand side. + if (is_trivial_receiver) { + Load(prop->obj()); + } else if (var != NULL) { + LoadGlobal(); + } else { + frame_->Dup(); } + EmitNamedLoad(name, var != NULL); + frame_->Drop(); // Receiver is left on the stack. + frame_->EmitPush(r0); - if (node->op() == Token::ASSIGN || - node->op() == Token::INIT_VAR || - node->op() == Token::INIT_CONST) { + // Perform the binary operation. + Literal* literal = node->value()->AsLiteral(); + bool overwrite_value = + (node->value()->AsBinaryOperation() != NULL && + node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + if (literal != NULL && literal->handle()->IsSmi()) { + SmiOperation(node->binary_op(), + literal->handle(), + false, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); + } else { Load(node->value()); + VirtualFrameBinaryOperation( + node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); + } + } else { + // For non-compound assignment just load the right-hand side. + Load(node->value()); + } + + // Stack layout: + // [tos] : value + // [tos+1] : receiver (only materialized if non-trivial) + // [tos+2] : receiver if at the end of an initialization block + + // Perform the assignment. It is safe to ignore constants here. + ASSERT(var == NULL || var->mode() != Variable::CONST); + ASSERT_NE(Token::INIT_CONST, node->op()); + if (is_trivial_receiver) { + // Load the receiver and swap with the value. + Load(prop->obj()); + Register t0 = frame_->PopToRegister(); + Register t1 = frame_->PopToRegister(t0); + frame_->EmitPush(t0); + frame_->EmitPush(t1); + } + CodeForSourcePosition(node->position()); + bool is_contextual = (var != NULL); + EmitNamedStore(name, is_contextual); + frame_->EmitPush(r0); - } else { // Assignment is a compound assignment. - // Get the old value of the lhs. - target.GetValue(); - Literal* literal = node->value()->AsLiteral(); - bool overwrite = - (node->value()->AsBinaryOperation() != NULL && - node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); - if (literal != NULL && literal->handle()->IsSmi()) { - VirtualFrameSmiOperation(node->binary_op(), - literal->handle(), - false, - overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); - } else { - Load(node->value()); - VirtualFrameBinaryOperation(node->binary_op(), - overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); - } + // Change to fast case at the end of an initialization block. + if (node->ends_initialization_block()) { + ASSERT_EQ(NULL, var); + // The argument to the runtime call is the receiver. + if (is_trivial_receiver) { + Load(prop->obj()); + } else { + // A copy of the receiver is below the value of the assignment. Swap + // the receiver and the value of the assignment expression. + Register t0 = frame_->PopToRegister(); + Register t1 = frame_->PopToRegister(t0); + frame_->EmitPush(t0); + frame_->EmitPush(t1); } - Variable* var = node->target()->AsVariableProxy()->AsVariable(); - if (var != NULL && - (var->mode() == Variable::CONST) && - node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { - // Assignment ignored - leave the value on the stack. - UnloadReference(&target); + frame_->CallRuntime(Runtime::kToFastProperties, 1); + } + + // Stack layout: + // [tos] : result + + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Keyed Property Assignment"); + Property* prop = node->target()->AsProperty(); + ASSERT_NOT_NULL(prop); + + // Evaluate the receiver subexpression. + Load(prop->obj()); + + // Change to slow case in the beginning of an initialization block to + // avoid the quadratic behavior of repeatedly adding fast properties. + if (node->starts_initialization_block()) { + frame_->Dup(); + frame_->CallRuntime(Runtime::kToSlowProperties, 1); + } + + // Change to fast case at the end of an initialization block. To prepare for + // that add an extra copy of the receiver to the frame, so that it can be + // converted back to fast case after the assignment. + if (node->ends_initialization_block()) { + frame_->Dup(); + } + + // Evaluate the key subexpression. + Load(prop->key()); + + // Stack layout: + // [tos] : key + // [tos+1] : receiver + // [tos+2] : receiver if at the end of an initialization block + + // Evaluate the right-hand side. + if (node->is_compound()) { + // For a compound assignment the right-hand side is a binary operation + // between the current property value and the actual right-hand side. + // Load of the current value leaves receiver and key on the stack. + EmitKeyedLoad(); + frame_->EmitPush(r0); + + // Perform the binary operation. + Literal* literal = node->value()->AsLiteral(); + bool overwrite_value = + (node->value()->AsBinaryOperation() != NULL && + node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + if (literal != NULL && literal->handle()->IsSmi()) { + SmiOperation(node->binary_op(), + literal->handle(), + false, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { - CodeForSourcePosition(node->position()); - if (node->op() == Token::INIT_CONST) { - // Dynamic constant initializations must use the function context - // and initialize the actual constant declared. Dynamic variable - // initializations are simply assignments and use SetValue. - target.SetValue(CONST_INIT); - } else { - target.SetValue(NOT_CONST_INIT); - } + Load(node->value()); + VirtualFrameBinaryOperation( + node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); } + } else { + // For non-compound assignment just load the right-hand side. + Load(node->value()); } - ASSERT(frame_->height() == original_height + 1); + + // Stack layout: + // [tos] : value + // [tos+1] : key + // [tos+2] : receiver + // [tos+3] : receiver if at the end of an initialization block + + // Perform the assignment. It is safe to ignore constants here. + ASSERT(node->op() != Token::INIT_CONST); + CodeForSourcePosition(node->position()); + frame_->PopToR0(); + EmitKeyedStore(prop->key()->type()); + frame_->Drop(2); // Key and receiver are left on the stack. + frame_->EmitPush(r0); + + // Stack layout: + // [tos] : result + // [tos+1] : receiver if at the end of an initialization block + + // Change to fast case at the end of an initialization block. + if (node->ends_initialization_block()) { + // The argument to the runtime call is the extra copy of the receiver, + // which is below the value of the assignment. Swap the receiver and + // the value of the assignment expression. + Register t0 = frame_->PopToRegister(); + Register t1 = frame_->PopToRegister(t0); + frame_->EmitPush(t1); + frame_->EmitPush(t0); + frame_->CallRuntime(Runtime::kToFastProperties, 1); + } + + // Stack layout: + // [tos] : result + + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitAssignment(Assignment* node) { + VirtualFrame::RegisterAllocationScope scope(this); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Assignment"); + + Variable* var = node->target()->AsVariableProxy()->AsVariable(); + Property* prop = node->target()->AsProperty(); + + if (var != NULL && !var->is_global()) { + EmitSlotAssignment(node); + + } else if ((prop != NULL && prop->key()->IsPropertyName()) || + (var != NULL && var->is_global())) { + // Properties whose keys are property names and global variables are + // treated as named property references. We do not need to consider + // global 'this' because it is not a valid left-hand side. + EmitNamedPropertyAssignment(node); + + } else if (prop != NULL) { + // Other properties (including rewritten parameters for a function that + // uses arguments) are keyed property assignments. + EmitKeyedPropertyAssignment(node); + + } else { + // Invalid left-hand side. + Load(node->target()); + frame_->CallRuntime(Runtime::kThrowReferenceError, 1); + // The runtime call doesn't actually return but the code generator will + // still generate code and expects a certain frame height. + frame_->EmitPush(r0); + } + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3524,7 +3582,7 @@ void CodeGenerator::VisitThrow(Throw* node) { CodeForSourcePosition(node->position()); frame_->CallRuntime(Runtime::kThrow, 1); frame_->EmitPush(r0); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3537,7 +3595,7 @@ void CodeGenerator::VisitProperty(Property* node) { { Reference property(this, node); property.GetValue(); } - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3744,7 +3802,7 @@ void CodeGenerator::VisitCall(Call* node) { CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); frame_->EmitPush(r0); } - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3787,7 +3845,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) { // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)). __ str(r0, frame_->Top()); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -3950,9 +4008,12 @@ void CodeGenerator::GenerateMathSqrt(ZoneList* args) { } -// This should generate code that performs a charCodeAt() call or returns +// This generates code that performs a charCodeAt() call or returns // undefined in order to trigger the slow case, Runtime_StringCharCodeAt. -// It is not yet implemented on ARM, so it always goes to the slow case. +// It can handle flat, 8 and 16 bit characters and cons strings where the +// answer is found in the left hand branch of the cons. The slow case will +// flatten the string, which will ensure that the answer is in the left hand +// side the next time around. void CodeGenerator::GenerateFastCharCodeAt(ZoneList* args) { VirtualFrame::SpilledScope spilled_scope(frame_); ASSERT(args->length() == 2); @@ -3960,75 +4021,28 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList* args) { LoadAndSpill(args->at(0)); LoadAndSpill(args->at(1)); - frame_->EmitPop(r0); // Index. - frame_->EmitPop(r1); // String. - - Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string; + frame_->EmitPop(r1); // Index. + frame_->EmitPop(r2); // String. - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, &slow); // The 'string' was a Smi. - - ASSERT(kSmiTag == 0); - __ tst(r0, Operand(kSmiTagMask | 0x80000000u)); - __ b(ne, &slow); // The index was negative or not a Smi. - - __ bind(&try_again_with_new_string); - __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &slow); - - // Now r2 has the string type. - __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); - // Now r3 has the length of the string. Compare with the index. - __ cmp(r3, Operand(r0, LSR, kSmiTagSize)); - __ b(le, &slow); - - // Here we know the index is in range. Check that string is sequential. - ASSERT_EQ(0, kSeqStringTag); - __ tst(r2, Operand(kStringRepresentationMask)); - __ b(ne, ¬_a_flat_string); - - // Check whether it is an ASCII string. - ASSERT_EQ(0, kTwoByteStringTag); - __ tst(r2, Operand(kStringEncodingMask)); - __ b(ne, &ascii_string); - - // 2-byte string. We can add without shifting since the Smi tag size is the - // log2 of the number of bytes in a two-byte character. - ASSERT_EQ(1, kSmiTagSize); - ASSERT_EQ(0, kSmiShiftSize); - __ add(r1, r1, Operand(r0)); - __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize)); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - __ jmp(&end); - - __ bind(&ascii_string); - __ add(r1, r1, Operand(r0, LSR, kSmiTagSize)); - __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - __ jmp(&end); - - __ bind(¬_a_flat_string); - __ and_(r2, r2, Operand(kStringRepresentationMask)); - __ cmp(r2, Operand(kConsStringTag)); - __ b(ne, &slow); - - // ConsString. - // Check that the right hand side is the empty string (ie if this is really a - // flat string in a cons string). If that is not the case we would rather go - // to the runtime system now, to flatten the string. - __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset)); - __ LoadRoot(r3, Heap::kEmptyStringRootIndex); - __ cmp(r2, Operand(r3)); - __ b(ne, &slow); - - // Get the first of the two strings. - __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset)); - __ jmp(&try_again_with_new_string); + Label slow_case; + Label exit; + StringHelper::GenerateFastCharCodeAt(masm_, + r2, + r1, + r3, + r0, + &slow_case, + &slow_case, + &slow_case, + &slow_case); + __ jmp(&exit); - __ bind(&slow); + __ bind(&slow_case); + // Move the undefined value into the result register, which will + // trigger the slow case. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - __ bind(&end); + __ bind(&exit); frame_->EmitPush(r0); } @@ -4037,37 +4051,19 @@ void CodeGenerator::GenerateCharFromCode(ZoneList* args) { Comment(masm_, "[ GenerateCharFromCode"); ASSERT(args->length() == 1); - LoadAndSpill(args->at(0)); - frame_->EmitPop(r0); - - JumpTarget slow_case; - JumpTarget exit; - - // Fast case of Heap::LookupSingleCharacterStringFromCode. - ASSERT(kSmiTag == 0); - ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); - __ tst(r0, Operand(kSmiTagMask | - ((~String::kMaxAsciiCharCode) << kSmiTagSize))); - slow_case.Branch(nz); - - ASSERT(kSmiTag == 0); - __ mov(r1, Operand(Factory::single_character_string_cache())); - __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r1, ip); - slow_case.Branch(eq); - - frame_->EmitPush(r1); - exit.Jump(); + Register code = r1; + Register scratch = ip; + Register result = r0; - slow_case.Bind(); - frame_->EmitPush(r0); - frame_->CallRuntime(Runtime::kCharFromCode, 1); - frame_->EmitPush(r0); + LoadAndSpill(args->at(0)); + frame_->EmitPop(code); - exit.Bind(); + StringHelper::GenerateCharFromCode(masm_, + code, + scratch, + result, + CALL_FUNCTION); + frame_->EmitPush(result); } @@ -4508,6 +4504,110 @@ void CodeGenerator::GenerateNumberToString(ZoneList* args) { } +class DeferredSwapElements: public DeferredCode { + public: + DeferredSwapElements(Register object, Register index1, Register index2) + : object_(object), index1_(index1), index2_(index2) { + set_comment("[ DeferredSwapElements"); + } + + virtual void Generate(); + + private: + Register object_, index1_, index2_; +}; + + +void DeferredSwapElements::Generate() { + __ push(object_); + __ push(index1_); + __ push(index2_); + __ CallRuntime(Runtime::kSwapElements, 3); +} + + +void CodeGenerator::GenerateSwapElements(ZoneList* args) { + Comment cmnt(masm_, "[ GenerateSwapElements"); + + ASSERT_EQ(3, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + Load(args->at(2)); + + Register index2 = r2; + Register index1 = r1; + Register object = r0; + Register tmp1 = r3; + Register tmp2 = r4; + + frame_->EmitPop(index2); + frame_->EmitPop(index1); + frame_->EmitPop(object); + + DeferredSwapElements* deferred = + new DeferredSwapElements(object, index1, index2); + + // Fetch the map and check if array is in fast case. + // Check that object doesn't require security checks and + // has no indexed interceptor. + __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE); + deferred->Branch(lt); + __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset)); + __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); + deferred->Branch(nz); + + // Check the object's elements are in fast case. + __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset)); + __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(tmp2, ip); + deferred->Branch(ne); + + // Smi-tagging is equivalent to multiplying by 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + + // Check that both indices are smis. + __ mov(tmp2, index1); + __ orr(tmp2, tmp2, index2); + __ tst(tmp2, Operand(kSmiTagMask)); + deferred->Branch(nz); + + // Bring the offsets into the fixed array in tmp1 into index1 and + // index2. + __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize)); + + // Swap elements. + Register tmp3 = object; + object = no_reg; + __ ldr(tmp3, MemOperand(tmp1, index1)); + __ ldr(tmp2, MemOperand(tmp1, index2)); + __ str(tmp3, MemOperand(tmp1, index2)); + __ str(tmp2, MemOperand(tmp1, index1)); + + Label done; + __ InNewSpace(tmp1, tmp2, eq, &done); + // Possible optimization: do a check that both values are Smis + // (or them and test against Smi mask.) + + __ mov(tmp2, tmp1); + RecordWriteStub recordWrite1(tmp1, index1, tmp3); + __ CallStub(&recordWrite1); + + RecordWriteStub recordWrite2(tmp2, index2, tmp3); + __ CallStub(&recordWrite2); + + __ bind(&done); + + deferred->BindExit(); + __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex); + frame_->EmitPush(tmp1); +} + + void CodeGenerator::GenerateCallFunction(ZoneList* args) { Comment cmnt(masm_, "[ GenerateCallFunction"); @@ -4598,7 +4698,7 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) { frame_->CallRuntime(function, arg_count); frame_->EmitPush(r0); } - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -4762,7 +4862,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { __ mov(r0, Operand(Smi::FromInt(0))); frame_->EmitPush(r0); } - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); return; } target.GetValue(); @@ -4830,7 +4930,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { // Postfix: Discard the new value and use the old. if (is_postfix) frame_->EmitPop(r0); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -4968,18 +5068,17 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { if (rliteral != NULL && rliteral->handle()->IsSmi()) { VirtualFrame::RegisterAllocationScope scope(this); Load(node->left()); - VirtualFrameSmiOperation( - node->op(), - rliteral->handle(), - false, - overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); + SmiOperation(node->op(), + rliteral->handle(), + false, + overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); } else if (lliteral != NULL && lliteral->handle()->IsSmi()) { VirtualFrame::RegisterAllocationScope scope(this); Load(node->right()); - VirtualFrameSmiOperation(node->op(), - lliteral->handle(), - true, - overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); + SmiOperation(node->op(), + lliteral->handle(), + true, + overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); } else { VirtualFrame::RegisterAllocationScope scope(this); OverwriteMode overwrite_mode = NO_OVERWRITE; @@ -5006,7 +5105,7 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) { VirtualFrame::SpilledScope spilled_scope(frame_); __ ldr(r0, frame_->Function()); frame_->EmitPush(r0); - ASSERT(frame_->height() == original_height + 1); + ASSERT_EQ(original_height + 1, frame_->height()); } @@ -5289,7 +5388,8 @@ void DeferredReferenceGetKeyedValue::Generate() { // The rest of the instructions in the deferred code must be together. { Assembler::BlockConstPoolScope block_const_pool(masm_); - // Call keyed load IC. It has all arguments on the stack. + // Call keyed load IC. It has all arguments on the stack and the key in r0. + __ ldr(r0, MemOperand(sp, 0)); Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The call must be followed by a nop instruction to indicate that the @@ -5343,11 +5443,10 @@ void CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) { if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { Comment cmnt(masm(), "[ Load from named Property"); // Setup the name register and call load IC. - frame_->SpillAllButCopyTOSToR0(); - __ mov(r2, Operand(name)); - frame_->CallLoadIC(is_contextual - ? RelocInfo::CODE_TARGET_CONTEXT - : RelocInfo::CODE_TARGET); + frame_->CallLoadIC(name, + is_contextual + ? RelocInfo::CODE_TARGET_CONTEXT + : RelocInfo::CODE_TARGET); } else { // Inline the in-object property case. Comment cmnt(masm(), "[ Inlined named property load"); @@ -5400,9 +5499,18 @@ void CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) { } +void CodeGenerator::EmitNamedStore(Handle name, bool is_contextual) { +#ifdef DEBUG + int expected_height = frame_->height() - (is_contextual ? 1 : 2); +#endif + frame_->CallStoreIC(name, is_contextual); + + ASSERT_EQ(expected_height, frame_->height()); +} + + void CodeGenerator::EmitKeyedLoad() { if (loop_nesting() == 0) { - VirtualFrame::SpilledScope spilled(frame_); Comment cmnt(masm_, "[ Load from keyed property"); frame_->CallKeyedLoadIC(); } else { @@ -5414,7 +5522,7 @@ void CodeGenerator::EmitKeyedLoad() { __ IncrementCounter(&Counters::keyed_load_inline, 1, frame_->scratch0(), frame_->scratch1()); - // Load the receiver and key from the stack. + // Load the receiver and key from the stack. frame_->SpillAllButCopyTOSToR1R0(); Register receiver = r0; Register key = r1; @@ -5489,7 +5597,7 @@ void CodeGenerator::EmitKeyedLoad() { void CodeGenerator::EmitKeyedStore(StaticType* key_type) { - frame_->AssertIsSpilled(); + VirtualFrame::SpilledScope scope(frame_); // Generate inlined version of the keyed store if the code is in a loop // and the key is likely to be a smi. if (loop_nesting() > 0 && key_type->IsLikelySmi()) { @@ -5657,21 +5765,13 @@ void Reference::SetValue(InitState init_state) { Comment cmnt(masm, "[ Store to Slot"); Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); cgen_->StoreToSlot(slot, init_state); - cgen_->UnloadReference(this); + set_unloaded(); break; } case NAMED: { - VirtualFrame::SpilledScope scope(frame); Comment cmnt(masm, "[ Store to named Property"); - // Call the appropriate IC code. - Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); - Handle name(GetName()); - - frame->EmitPop(r0); - frame->EmitPop(r1); - __ mov(r2, Operand(name)); - frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); + cgen_->EmitNamedStore(GetName(), false); frame->EmitPush(r0); set_unloaded(); break; @@ -6489,6 +6589,12 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { } +void RecordWriteStub::Generate(MacroAssembler* masm) { + __ RecordWriteHelper(object_, offset_, scratch_); + __ Ret(); +} + + // On entry r0 (rhs) and r1 (lhs) are the values to be compared. // On exit r0 is 0, positive or negative to indicate the result of // the comparison. @@ -8388,14 +8494,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); // r2: Number of capture registers - // r3: Length of subject string + // r3: Length of subject string as a smi // subject: Subject string // regexp_data: RegExp data (FixedArray) // Check that the third argument is a positive smi less than the subject // string length. A negative value will be greater (unsigned comparison). __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); - __ cmp(r3, Operand(r0, ASR, kSmiTagSize + kSmiShiftSize)); - __ b(ls, &runtime); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &runtime); + __ cmp(r3, Operand(r0)); + __ b(le, &runtime); // r2: Number of capture registers // subject: Subject string @@ -8521,6 +8629,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize); __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ eor(r3, r3, Operand(1)); @@ -8750,12 +8859,151 @@ int CompareStub::MinorKey() { } -void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii) { +void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm, + Register object, + Register index, + Register scratch, + Register result, + Label* receiver_not_string, + Label* index_not_smi, + Label* index_out_of_range, + Label* slow_case) { + Label not_a_flat_string; + Label try_again_with_new_string; + Label ascii_string; + Label got_char_code; + + // If the receiver is a smi trigger the non-string case. + __ BranchOnSmi(object, receiver_not_string); + + // Fetch the instance type of the receiver into result register. + __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); + __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ tst(result, Operand(kIsNotStringMask)); + __ b(ne, receiver_not_string); + + // If the index is non-smi trigger the non-smi case. + __ BranchOnNotSmi(index, index_not_smi); + + // Check for index out of range. + __ ldr(scratch, FieldMemOperand(object, String::kLengthOffset)); + // Now scratch has the length of the string. Compare with the index. + __ cmp(scratch, Operand(index)); + __ b(ls, index_out_of_range); + + __ bind(&try_again_with_new_string); + // ----------- S t a t e ------------- + // -- object : string to access + // -- result : instance type of the string + // -- scratch : non-negative index < length + // ----------------------------------- + + // We need special handling for non-flat strings. + ASSERT_EQ(0, kSeqStringTag); + __ tst(result, Operand(kStringRepresentationMask)); + __ b(ne, ¬_a_flat_string); + + // Check for 1-byte or 2-byte string. + ASSERT_EQ(0, kTwoByteStringTag); + __ tst(result, Operand(kStringEncodingMask)); + __ b(ne, &ascii_string); + + // 2-byte string. We can add without shifting since the Smi tag size is the + // log2 of the number of bytes in a two-byte character. + ASSERT_EQ(1, kSmiTagSize); + ASSERT_EQ(0, kSmiShiftSize); + __ add(scratch, object, Operand(index)); + __ ldrh(result, FieldMemOperand(scratch, SeqTwoByteString::kHeaderSize)); + __ jmp(&got_char_code); + + // Handle non-flat strings. + __ bind(¬_a_flat_string); + __ and_(result, result, Operand(kStringRepresentationMask)); + __ cmp(result, Operand(kConsStringTag)); + __ b(ne, slow_case); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ ldr(result, FieldMemOperand(object, ConsString::kSecondOffset)); + __ LoadRoot(scratch, Heap::kEmptyStringRootIndex); + __ cmp(result, Operand(scratch)); + __ b(ne, slow_case); + + // Get the first of the two strings and load its instance type. + __ ldr(object, FieldMemOperand(object, ConsString::kFirstOffset)); + __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); + __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + __ jmp(&try_again_with_new_string); + + // ASCII string. + __ bind(&ascii_string); + __ add(scratch, object, Operand(index, LSR, kSmiTagSize)); + __ ldrb(result, FieldMemOperand(scratch, SeqAsciiString::kHeaderSize)); + + __ bind(&got_char_code); + __ mov(result, Operand(result, LSL, kSmiTagSize)); +} + + +void StringHelper::GenerateCharFromCode(MacroAssembler* masm, + Register code, + Register scratch, + Register result, + InvokeFlag flag) { + ASSERT(!code.is(result)); + + Label slow_case; + Label exit; + + // Fast case of Heap::LookupSingleCharacterStringFromCode. + ASSERT(kSmiTag == 0); + ASSERT(kSmiShiftSize == 0); + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + __ tst(code, Operand(kSmiTagMask | + ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + __ b(nz, &slow_case); + + ASSERT(kSmiTag == 0); + __ mov(result, Operand(Factory::single_character_string_cache())); + __ add(result, result, Operand(code, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(result, MemOperand(result, FixedArray::kHeaderSize - kHeapObjectTag)); + __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + __ cmp(result, scratch); + __ b(eq, &slow_case); + __ b(&exit); + + __ bind(&slow_case); + if (flag == CALL_FUNCTION) { + __ push(code); + __ CallRuntime(Runtime::kCharFromCode, 1); + if (!result.is(r0)) { + __ mov(result, r0); + } + } else { + ASSERT(flag == JUMP_FUNCTION); + ASSERT(result.is(r0)); + __ push(code); + __ TailCallRuntime(Runtime::kCharFromCode, 1, 1); + } + + __ bind(&exit); + if (flag == JUMP_FUNCTION) { + ASSERT(result.is(r0)); + __ Ret(); + } +} + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii) { Label loop; Label done; // This loop just copies one character at a time, as it is only used for very @@ -8786,16 +9034,16 @@ enum CopyCharactersFlags { }; -void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - int flags) { +void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + int flags) { bool ascii = (flags & COPY_ASCII) != 0; bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; @@ -8929,15 +9177,15 @@ void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm, } -void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - Label* not_found) { +void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found) { // Register scratch3 is the general scratch register in this function. Register scratch = scratch3; @@ -8959,9 +9207,9 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, __ bind(¬_array_index); // Calculate the two character string hash. Register hash = scratch1; - GenerateHashInit(masm, hash, c1); - GenerateHashAddCharacter(masm, hash, c2); - GenerateHashGetHash(masm, hash); + StringHelper::GenerateHashInit(masm, hash, c1); + StringHelper::GenerateHashAddCharacter(masm, hash, c2); + StringHelper::GenerateHashGetHash(masm, hash); // Collect the two characters in a register. Register chars = c1; @@ -9028,7 +9276,7 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // If length is not 2 the string is not a candidate. __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); - __ cmp(scratch, Operand(2)); + __ cmp(scratch, Operand(Smi::FromInt(2))); __ b(ne, &next_probe[i]); // Check that the candidate is a non-external ascii string. @@ -9055,9 +9303,9 @@ void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, } -void StringStubBase::GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character) { +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character) { // hash = character + (character << 10); __ add(hash, character, Operand(character, LSL, 10)); // hash ^= hash >> 6; @@ -9065,9 +9313,9 @@ void StringStubBase::GenerateHashInit(MacroAssembler* masm, } -void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character) { +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character) { // hash += character; __ add(hash, hash, Operand(character)); // hash += hash << 10; @@ -9077,8 +9325,8 @@ void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm, } -void StringStubBase::GenerateHashGetHash(MacroAssembler* masm, - Register hash) { +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash) { // hash += hash << 3; __ add(hash, hash, Operand(hash, LSL, 3)); // hash ^= hash >> 11; @@ -9179,7 +9427,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r6: from (smi) // r7: to (smi) __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset)); - __ cmp(r4, Operand(r7, ASR, 1)); + __ cmp(r4, Operand(r7)); __ b(lt, &runtime); // Fail if to > length. // r1: instance type. @@ -9205,8 +9453,8 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Try to lookup two character string in symbol table. Label make_two_character_string; - GenerateTwoCharacterSymbolTableProbe(masm, r3, r4, r1, r5, r6, r7, r9, - &make_two_character_string); + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -9240,8 +9488,8 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r2: result string length. // r5: first character of sub string to copy. ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask); - GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, - COPY_ASCII | DEST_ALWAYS_ALIGNED); + StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, + COPY_ASCII | DEST_ALWAYS_ALIGNED); __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -9271,8 +9519,8 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r2: result length. // r5: first character of string to copy. ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask); - GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, - DEST_ALWAYS_ALIGNED); + StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, + DEST_ALWAYS_ALIGNED); __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -9298,9 +9546,13 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, Register length_delta = scratch3; __ mov(scratch1, scratch2, LeaveCC, gt); Register min_length = scratch1; + ASSERT(kSmiTag == 0); __ tst(min_length, Operand(min_length)); __ b(eq, &compare_lengths); + // Untag smi. + __ mov(min_length, Operand(min_length, ASR, kSmiTagSize)); + // Setup registers so that we only need to increment one register // in the loop. __ add(scratch2, min_length, @@ -9410,9 +9662,12 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Check if either of the strings are empty. In that case return the other. __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); - __ cmp(r2, Operand(0)); // Test if first string is empty. + ASSERT(kSmiTag == 0); + __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. - __ cmp(r3, Operand(0), ne); // Else test if second string is empty. + ASSERT(kSmiTag == 0); + // Else test if second string is empty. + __ cmp(r3, Operand(Smi::FromInt(0)), ne); __ b(ne, &strings_not_empty); // If either string was empty, return r0. __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); @@ -9422,6 +9677,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&strings_not_empty); } + __ mov(r2, Operand(r2, ASR, kSmiTagSize)); + __ mov(r3, Operand(r3, ASR, kSmiTagSize)); // Both strings are non-empty. // r0: first string // r1: second string @@ -9456,8 +9713,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Try to lookup two character string in symbol table. If it is not found // just allocate a new one. Label make_two_character_string; - GenerateTwoCharacterSymbolTableProbe(masm, r2, r3, r6, r7, r4, r5, r9, - &make_two_character_string); + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -9566,7 +9823,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r3: length of second string. // r6: first character of result. // r7: result string. - GenerateCopyCharacters(masm, r6, r0, r2, r4, true); + StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); // Load second argument and locate first character. __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); @@ -9574,7 +9831,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r3: length of second string. // r6: next character of result. // r7: result string. - GenerateCopyCharacters(masm, r6, r1, r3, r4, true); + StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); __ mov(r0, Operand(r7)); __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); @@ -9605,7 +9862,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r3: length of second string. // r6: first character of result. // r7: result string. - GenerateCopyCharacters(masm, r6, r0, r2, r4, false); + StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false); // Locate first character of second argument. __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); @@ -9614,7 +9871,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r3: length of second string. // r6: next character of result (after copy of first string). // r7: result string. - GenerateCopyCharacters(masm, r6, r1, r3, r4, false); + StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); __ mov(r0, Operand(r7)); __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 80df65448994c5..bb76b633bd845e 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -312,10 +312,20 @@ class CodeGenerator: public AstVisitor { // Store the value on top of the stack to a slot. void StoreToSlot(Slot* slot, InitState init_state); - // Load a named property, leaving it in r0. The receiver is passed on the + // Support for compiling assignment expressions. + void EmitSlotAssignment(Assignment* node); + void EmitNamedPropertyAssignment(Assignment* node); + void EmitKeyedPropertyAssignment(Assignment* node); + + // Load a named property, returning it in r0. The receiver is passed on the // stack, and remains there. void EmitNamedLoad(Handle name, bool is_contextual); + // Store to a named property. If the store is contextual, value is passed on + // the frame and consumed. Otherwise, receiver and value are passed on the + // frame and consumed. The result is returned in r0. + void EmitNamedStore(Handle name, bool is_contextual); + // Load a keyed property, leaving it in r0. The receiver and key are // passed on the stack, and remain there. void EmitKeyedLoad(); @@ -357,11 +367,6 @@ class CodeGenerator: public AstVisitor { bool reversed, OverwriteMode mode); - void VirtualFrameSmiOperation(Token::Value op, - Handle value, - bool reversed, - OverwriteMode mode); - void CallWithArguments(ZoneList* arguments, CallFunctionFlags flags, int position); @@ -457,6 +462,9 @@ class CodeGenerator: public AstVisitor { // Fast support for number to string. void GenerateNumberToString(ZoneList* args); + // Fast swapping of elements. + void GenerateSwapElements(ZoneList* args); + // Fast call for custom callbacks. void GenerateCallFunction(ZoneList* args); @@ -667,34 +675,66 @@ class GenericBinaryOpStub : public CodeStub { }; -class StringStubBase: public CodeStub { +class StringHelper : public AllStatic { public: + // Generates fast code for getting a char code out of a string + // object at the given index. May bail out for four reasons (in the + // listed order): + // * Receiver is not a string (receiver_not_string label). + // * Index is not a smi (index_not_smi label). + // * Index is out of range (index_out_of_range). + // * Some other reason (slow_case label). In this case it's + // guaranteed that the above conditions are not violated, + // e.g. it's safe to assume the receiver is a string and the + // index is a non-negative smi < length. + // When successful, object, index, and scratch are clobbered. + // Otherwise, scratch and result are clobbered. + static void GenerateFastCharCodeAt(MacroAssembler* masm, + Register object, + Register index, + Register scratch, + Register result, + Label* receiver_not_string, + Label* index_not_smi, + Label* index_out_of_range, + Label* slow_case); + + // Generates code for creating a one-char string from the given char + // code. May do a runtime call, so any register can be clobbered + // and, if the given invoke flag specifies a call, an internal frame + // is required. In tail call mode the result must be r0 register. + static void GenerateCharFromCode(MacroAssembler* masm, + Register code, + Register scratch, + Register result, + InvokeFlag flag); + // Generate code for copying characters using a simple loop. This should only // be used in places where the number of characters is small and the // additional setup and checking in GenerateCopyCharactersLong adds too much // overhead. Copying of overlapping regions is not supported. // Dest register ends at the position after the last character written. - void GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii); + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii); // Generate code for copying a large number of characters. This function // is allowed to spend extra time setting up conditions to make copying // faster. Copying of overlapping regions is not supported. // Dest register ends at the position after the last character written. - void GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - int flags); + static void GenerateCopyCharactersLong(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + int flags); // Probe the symbol table for a two character string. If the string is @@ -704,27 +744,30 @@ class StringStubBase: public CodeStub { // Contents of both c1 and c2 registers are modified. At the exit c1 is // guaranteed to contain halfword with low and high bytes equal to // initial contents of c1 and c2 respectively. - void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - Label* not_found); + static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found); // Generate string hash. - void GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character); + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character); + + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character); - void GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character); + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash); - void GenerateHashGetHash(MacroAssembler* masm, - Register hash); + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); }; @@ -735,7 +778,7 @@ enum StringAddFlags { }; -class StringAddStub: public StringStubBase { +class StringAddStub: public CodeStub { public: explicit StringAddStub(StringAddFlags flags) { string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); @@ -752,7 +795,7 @@ class StringAddStub: public StringStubBase { }; -class SubStringStub: public StringStubBase { +class SubStringStub: public CodeStub { public: SubStringStub() {} @@ -861,6 +904,43 @@ class NumberToStringStub: public CodeStub { }; +class RecordWriteStub : public CodeStub { + public: + RecordWriteStub(Register object, Register offset, Register scratch) + : object_(object), offset_(offset), scratch_(scratch) { } + + void Generate(MacroAssembler* masm); + + private: + Register object_; + Register offset_; + Register scratch_; + +#ifdef DEBUG + void Print() { + PrintF("RecordWriteStub (object reg %d), (offset reg %d)," + " (scratch reg %d)\n", + object_.code(), offset_.code(), scratch_.code()); + } +#endif + + // Minor key encoding in 12 bits. 4 bits for each of the three + // registers (object, offset and scratch) OOOOAAAASSSS. + class ScratchBits: public BitField {}; + class OffsetBits: public BitField {}; + class ObjectBits: public BitField {}; + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + // Encode the registers. + return ObjectBits::encode(object_.code()) | + OffsetBits::encode(offset_.code()) | + ScratchBits::encode(scratch_.code()); + } +}; + + } } // namespace v8::internal #endif // V8_ARM_CODEGEN_ARM_H_ diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index d8149f0928e7e4..d02ba764f84b0b 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -161,9 +161,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver - Generate_DebugBreakCallHelper(masm, 0); + Generate_DebugBreakCallHelper(masm, r0.bit()); } diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index bed93640ff49ab..e9bdfe55f77d07 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -728,7 +728,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, ASSERT_NOT_NULL(object_slot); // Load the object. - Move(r2, object_slot); + Move(r1, object_slot); // Assert that the key is a smi. Literal* key_literal = property->key()->AsLiteral(); @@ -736,12 +736,12 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, ASSERT(key_literal->handle()->IsSmi()); // Load the key. - __ mov(r1, Operand(key_literal->handle())); + __ mov(r0, Operand(key_literal->handle())); // Push both as arguments to ic. - __ Push(r2, r1); + __ Push(r1, r0); - // Do a keyed property load. + // Call keyed load IC. It has all arguments on the stack and the key in r0. Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); @@ -1005,6 +1005,8 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); + // Call keyed load IC. It has all arguments on the stack and the key in r0. + __ ldr(r0, MemOperand(sp, 0)); Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); } @@ -1247,6 +1249,9 @@ void FullCodeGenerator::VisitCall(Call* expr) { VisitForValue(prop->key(), kStack); // Record source code position for IC call. SetSourcePosition(prop->position()); + // Call keyed load IC. It has all arguments on the stack and the key in + // r0. + __ ldr(r0, MemOperand(sp, 0)); Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // Load receiver object into r1. diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index eec1f213839f98..5b1915f6392d11 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -682,12 +682,13 @@ Object* KeyedLoadIC_Miss(Arguments args); void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- - __ ldm(ia, sp, r2.bit() | r3.bit()); - __ Push(r3, r2); + __ ldr(r1, MemOperand(sp, kPointerSize)); + __ Push(r1, r0); ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss)); __ TailCallExternalReference(ref, 2, 1); @@ -697,12 +698,13 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- - __ ldm(ia, sp, r2.bit() | r3.bit()); - __ Push(r3, r2); + __ ldr(r1, MemOperand(sp, kPointerSize)); + __ Push(r1, r0); __ TailCallRuntime(Runtime::kGetProperty, 2, 1); } @@ -711,13 +713,14 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label slow, fast, check_pixel_array, check_number_dictionary; - // Get the key and receiver object from the stack. - __ ldm(ia, sp, r0.bit() | r1.bit()); + // Get the object from the stack. + __ ldr(r1, MemOperand(sp, kPointerSize)); // Check that the object isn't a smi. __ BranchOnSmi(r1, &slow); @@ -790,6 +793,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Slow case: Push extra copies of the arguments (2). __ bind(&slow); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); + __ ldr(r0, MemOperand(sp, 0)); GenerateRuntimeGetProperty(masm); } @@ -797,31 +801,71 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { void KeyedLoadIC::GenerateString(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- + Label miss; + Label index_not_smi; + Label index_out_of_range; + Label slow_char_code; + Label got_char_code; - Label miss, index_ok; - - // Get the key and receiver object from the stack. - __ ldm(ia, sp, r0.bit() | r1.bit()); - - // Check that the receiver isn't a smi. - __ BranchOnSmi(r1, &miss); + // Get the object from the stack. + __ ldr(r1, MemOperand(sp, kPointerSize)); - // Check that the receiver is a string. - Condition is_string = masm->IsObjectStringType(r1, r2); - __ b(NegateCondition(is_string), &miss); + Register object = r1; + Register index = r0; + Register code = r2; + Register scratch = r3; - // Check if key is a smi or a heap number. - __ BranchOnSmi(r0, &index_ok); - __ CheckMap(r0, r2, Factory::heap_number_map(), &miss, false); + StringHelper::GenerateFastCharCodeAt(masm, + object, + index, + scratch, + code, + &miss, // When not a string. + &index_not_smi, + &index_out_of_range, + &slow_char_code); + + // If we didn't bail out, code register contains smi tagged char + // code. + __ bind(&got_char_code); + StringHelper::GenerateCharFromCode(masm, code, scratch, r0, JUMP_FUNCTION); +#ifdef DEBUG + __ Abort("Unexpected fall-through from char from code tail call"); +#endif + + // Check if key is a heap number. + __ bind(&index_not_smi); + __ CheckMap(index, scratch, Factory::heap_number_map(), &miss, true); + + // Push receiver and key on the stack (now that we know they are a + // string and a number), and call runtime. + __ bind(&slow_char_code); + __ EnterInternalFrame(); + __ Push(object, index); + __ CallRuntime(Runtime::kStringCharCodeAt, 2); + ASSERT(!code.is(r0)); + __ mov(code, r0); + __ LeaveInternalFrame(); - __ bind(&index_ok); - // Duplicate receiver and key since they are expected on the stack after - // the KeyedLoadIC call. - __ Push(r1, r0); - __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_JS); + // Check if the runtime call returned NaN char code. If yes, return + // undefined. Otherwise, we can continue. + if (FLAG_debug_code) { + __ BranchOnSmi(code, &got_char_code); + __ ldr(scratch, FieldMemOperand(code, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, ip); + __ Assert(eq, "StringCharCodeAt must return smi or heap number"); + } + __ LoadRoot(scratch, Heap::kNanValueRootIndex); + __ cmp(code, scratch); + __ b(ne, &got_char_code); + __ bind(&index_out_of_range); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ Ret(); __ bind(&miss); GenerateGeneric(masm); @@ -868,13 +912,14 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, ExternalArrayType array_type) { // ---------- S t a t e -------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label slow, failed_allocation; - // Get the key and receiver object from the stack. - __ ldm(ia, sp, r0.bit() | r1.bit()); + // Get the object from the stack. + __ ldr(r1, MemOperand(sp, kPointerSize)); // r0: key // r1: receiver object @@ -1104,6 +1149,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, // Slow case: Load name and receiver from stack and jump to runtime. __ bind(&slow); __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1); + __ ldr(r0, MemOperand(sp, 0)); GenerateRuntimeGetProperty(masm); } @@ -1111,13 +1157,14 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label slow; - // Get the key and receiver object from the stack. - __ ldm(ia, sp, r0.bit() | r1.bit()); + // Get the object from the stack. + __ ldr(r1, MemOperand(sp, kPointerSize)); // Check that the receiver isn't a smi. __ BranchOnSmi(r1, &slow); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index ccabe2c67e20af..d97f04b71cc00e 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -232,30 +232,23 @@ void MacroAssembler::LoadRoot(Register destination, } -// Will clobber 4 registers: object, offset, scratch, ip. The -// register 'object' contains a heap object pointer. The heap object -// tag is shifted away. -void MacroAssembler::RecordWrite(Register object, Register offset, - Register scratch) { - // The compiled code assumes that record write doesn't change the - // context register, so we check that none of the clobbered - // registers are cp. - ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp)); +void MacroAssembler::RecordWriteHelper(Register object, + Register offset, + Register scratch) { + if (FLAG_debug_code) { + // Check that the object is not in new space. + Label not_in_new_space; + InNewSpace(object, scratch, ne, ¬_in_new_space); + Abort("new-space object passed to RecordWriteHelper"); + bind(¬_in_new_space); + } // This is how much we shift the remembered set bit offset to get the // offset of the word in the remembered set. We divide by kBitsPerInt (32, // shift right 5) and then multiply by kIntSize (4, shift left 2). const int kRSetWordShift = 3; - Label fast, done; - - // First, test that the object is not in the new space. We cannot set - // remembered set bits in the new space. - // object: heap object pointer (with tag) - // offset: offset to store location from the object - and_(scratch, object, Operand(ExternalReference::new_space_mask())); - cmp(scratch, Operand(ExternalReference::new_space_start())); - b(eq, &done); + Label fast; // Compute the bit offset in the remembered set. // object: heap object pointer (with tag) @@ -307,6 +300,38 @@ void MacroAssembler::RecordWrite(Register object, Register offset, mov(ip, Operand(1)); orr(scratch, scratch, Operand(ip, LSL, offset)); str(scratch, MemOperand(object)); +} + + +void MacroAssembler::InNewSpace(Register object, + Register scratch, + Condition cc, + Label* branch) { + ASSERT(cc == eq || cc == ne); + and_(scratch, object, Operand(ExternalReference::new_space_mask())); + cmp(scratch, Operand(ExternalReference::new_space_start())); + b(cc, branch); +} + + +// Will clobber 4 registers: object, offset, scratch, ip. The +// register 'object' contains a heap object pointer. The heap object +// tag is shifted away. +void MacroAssembler::RecordWrite(Register object, Register offset, + Register scratch) { + // The compiled code assumes that record write doesn't change the + // context register, so we check that none of the clobbered + // registers are cp. + ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp)); + + Label done; + + // First, test that the object is not in the new space. We cannot set + // remembered set bits in the new space. + InNewSpace(object, scratch, eq, &done); + + // Record the actual write. + RecordWriteHelper(object, offset, scratch); bind(&done); @@ -399,6 +424,20 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { } +void MacroAssembler::InitializeNewString(Register string, + Register length, + Heap::RootListIndex map_index, + Register scratch1, + Register scratch2) { + mov(scratch1, Operand(length, LSL, kSmiTagSize)); + LoadRoot(scratch2, map_index); + str(scratch1, FieldMemOperand(string, String::kLengthOffset)); + mov(scratch1, Operand(String::kEmptyHashField)); + str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); + str(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); +} + + int MacroAssembler::ActivationFrameAlignment() { #if defined(V8_HOST_ARCH_ARM) // Running on the real platform. Use the alignment as mandated by the local @@ -722,6 +761,7 @@ void MacroAssembler::PopTryHandler() { Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, JSObject* holder, Register holder_reg, Register scratch, + int save_at_depth, Label* miss) { // Make sure there's no overlap between scratch and the other // registers. @@ -729,7 +769,11 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, // Keep track of the current object in register reg. Register reg = object_reg; - int depth = 1; + int depth = 0; + + if (save_at_depth == depth) { + str(reg, MemOperand(sp)); + } // Check the maps in the prototype chain. // Traverse the prototype chain from the object and do map checks. @@ -769,6 +813,10 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, mov(reg, Operand(Handle(prototype))); } + if (save_at_depth == depth) { + str(reg, MemOperand(sp)); + } + // Go to the next object in the prototype chain. object = prototype; } @@ -779,7 +827,7 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, b(ne, miss); // Log the check depth. - LOG(IntEvent("check-maps-depth", depth)); + LOG(IntEvent("check-maps-depth", depth + 1)); // Perform security check for access to the global object and return // the holder register. @@ -1020,11 +1068,11 @@ void MacroAssembler::AllocateTwoByteString(Register result, TAG_OBJECT); // Set the map, length and hash field. - LoadRoot(scratch1, Heap::kStringMapRootIndex); - str(length, FieldMemOperand(result, String::kLengthOffset)); - str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); - mov(scratch2, Operand(String::kEmptyHashField)); - str(scratch2, FieldMemOperand(result, String::kHashFieldOffset)); + InitializeNewString(result, + length, + Heap::kStringMapRootIndex, + scratch1, + scratch2); } @@ -1054,12 +1102,11 @@ void MacroAssembler::AllocateAsciiString(Register result, TAG_OBJECT); // Set the map, length and hash field. - LoadRoot(scratch1, Heap::kAsciiStringMapRootIndex); - mov(scratch1, Operand(Factory::ascii_string_map())); - str(length, FieldMemOperand(result, String::kLengthOffset)); - str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); - mov(scratch2, Operand(String::kEmptyHashField)); - str(scratch2, FieldMemOperand(result, String::kHashFieldOffset)); + InitializeNewString(result, + length, + Heap::kAsciiStringMapRootIndex, + scratch1, + scratch2); } @@ -1074,11 +1121,12 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, scratch2, gc_required, TAG_OBJECT); - LoadRoot(scratch1, Heap::kConsStringMapRootIndex); - mov(scratch2, Operand(String::kEmptyHashField)); - str(length, FieldMemOperand(result, String::kLengthOffset)); - str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); - str(scratch2, FieldMemOperand(result, String::kHashFieldOffset)); + + InitializeNewString(result, + length, + Heap::kConsStringMapRootIndex, + scratch1, + scratch2); } @@ -1093,11 +1141,12 @@ void MacroAssembler::AllocateAsciiConsString(Register result, scratch2, gc_required, TAG_OBJECT); - LoadRoot(scratch1, Heap::kConsAsciiStringMapRootIndex); - mov(scratch2, Operand(String::kEmptyHashField)); - str(length, FieldMemOperand(result, String::kLengthOffset)); - str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); - str(scratch2, FieldMemOperand(result, String::kHashFieldOffset)); + + InitializeNewString(result, + length, + Heap::kConsAsciiStringMapRootIndex, + scratch1, + scratch2); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 062c5c6782c427..2ec7a39eab9d24 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -86,6 +86,20 @@ class MacroAssembler: public Assembler { Heap::RootListIndex index, Condition cond = al); + + // Check if object is in new space. + // scratch can be object itself, but it will be clobbered. + void InNewSpace(Register object, + Register scratch, + Condition cc, // eq for new space, ne otherwise + Label* branch); + + + // Set the remebered set bit for an offset into an + // object. RecordWriteHelper only works if the object is not in new + // space. + void RecordWriteHelper(Register object, Register offset, Register scracth); + // Sets the remembered set bit for [address+offset], where address is the // address of the heap object 'object'. The address must be in the first 8K // of an allocated page. The 'scratch' register is used in the @@ -243,9 +257,14 @@ class MacroAssembler: public Assembler { // clobbered if it the same as the holder register. The function // returns a register containing the holder - either object_reg or // holder_reg. + // The function can optionally (when save_at_depth != + // kInvalidProtoDepth) save the object at the given depth by moving + // it to [sp]. Register CheckMaps(JSObject* object, Register object_reg, JSObject* holder, Register holder_reg, - Register scratch, Label* miss); + Register scratch, + int save_at_depth, + Label* miss); // Generate code for checking access rights - used for security checks // on access to global objects across environments. The holder register @@ -553,6 +572,12 @@ class MacroAssembler: public Assembler { void EnterFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type); + void InitializeNewString(Register string, + Register length, + Heap::RootListIndex map_index, + Register scratch1, + Register scratch2); + bool generating_stub_; bool allow_stub_calls_; // This handle will be patched with the code object on installation. diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index a770d160f028bc..095631d6428cea 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -229,7 +229,6 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, // Load length directly from the string. __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); __ Ret(); // Check if the object is a JSValue wrapper. @@ -241,7 +240,6 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); __ Ret(); } @@ -597,6 +595,258 @@ static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler, } +// Reserves space for the extra arguments to FastHandleApiCall in the +// caller's frame. +// +// These arguments are set by CheckPrototypes and GenerateFastApiCall. +static void ReserveSpaceForFastApiCall(MacroAssembler* masm, + Register scratch) { + __ mov(scratch, Operand(Smi::FromInt(0))); + __ push(scratch); + __ push(scratch); + __ push(scratch); + __ push(scratch); +} + + +// Undoes the effects of ReserveSpaceForFastApiCall. +static void FreeSpaceForFastApiCall(MacroAssembler* masm) { + __ Drop(4); +} + + +// Generates call to FastHandleApiCall builtin. +static void GenerateFastApiCall(MacroAssembler* masm, + const CallOptimization& optimization, + int argc) { + // Get the function and setup the context. + JSFunction* function = optimization.constant_function(); + __ mov(r7, Operand(Handle(function))); + __ ldr(cp, FieldMemOperand(r7, JSFunction::kContextOffset)); + + // Pass the additional arguments FastHandleApiCall expects. + bool info_loaded = false; + Object* callback = optimization.api_call_info()->callback(); + if (Heap::InNewSpace(callback)) { + info_loaded = true; + __ Move(r0, Handle(optimization.api_call_info())); + __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset)); + } else { + __ Move(r6, Handle(callback)); + } + Object* call_data = optimization.api_call_info()->data(); + if (Heap::InNewSpace(call_data)) { + if (!info_loaded) { + __ Move(r0, Handle(optimization.api_call_info())); + } + __ ldr(r5, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); + } else { + __ Move(r5, Handle(call_data)); + } + + __ add(sp, sp, Operand(1 * kPointerSize)); + __ stm(ia, sp, r5.bit() | r6.bit() | r7.bit()); + __ sub(sp, sp, Operand(1 * kPointerSize)); + + // Set the number of arguments. + __ mov(r0, Operand(argc + 4)); + + // Jump to the fast api call builtin (tail call). + Handle code = Handle( + Builtins::builtin(Builtins::FastHandleApiCall)); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, + RelocInfo::CODE_TARGET, JUMP_FUNCTION); +} + + +class CallInterceptorCompiler BASE_EMBEDDED { + public: + CallInterceptorCompiler(StubCompiler* stub_compiler, + const ParameterCount& arguments, + Register name) + : stub_compiler_(stub_compiler), + arguments_(arguments), + name_(name) {} + + void Compile(MacroAssembler* masm, + JSObject* object, + JSObject* holder, + String* name, + LookupResult* lookup, + Register receiver, + Register scratch1, + Register scratch2, + Label* miss) { + ASSERT(holder->HasNamedInterceptor()); + ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); + + // Check that the receiver isn't a smi. + __ BranchOnSmi(receiver, miss); + + CallOptimization optimization(lookup); + + if (optimization.is_constant_call()) { + CompileCacheable(masm, + object, + receiver, + scratch1, + scratch2, + holder, + lookup, + name, + optimization, + miss); + } else { + CompileRegular(masm, + object, + receiver, + scratch1, + scratch2, + name, + holder, + miss); + } + } + + private: + void CompileCacheable(MacroAssembler* masm, + JSObject* object, + Register receiver, + Register scratch1, + Register scratch2, + JSObject* holder_obj, + LookupResult* lookup, + String* name, + const CallOptimization& optimization, + Label* miss_label) { + ASSERT(optimization.is_constant_call()); + ASSERT(!lookup->holder()->IsGlobalObject()); + + int depth1 = kInvalidProtoDepth; + int depth2 = kInvalidProtoDepth; + bool can_do_fast_api_call = false; + if (optimization.is_simple_api_call() && + !lookup->holder()->IsGlobalObject()) { + depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj); + if (depth1 == kInvalidProtoDepth) { + depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj, + lookup->holder()); + } + can_do_fast_api_call = (depth1 != kInvalidProtoDepth) || + (depth2 != kInvalidProtoDepth); + } + + __ IncrementCounter(&Counters::call_const_interceptor, 1, + scratch1, scratch2); + + if (can_do_fast_api_call) { + __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1, + scratch1, scratch2); + ReserveSpaceForFastApiCall(masm, scratch1); + } + + Label miss_cleanup; + Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label; + Register holder = + stub_compiler_->CheckPrototypes(object, receiver, holder_obj, scratch1, + scratch2, name, depth1, miss); + + Label regular_invoke; + LoadWithInterceptor(masm, receiver, holder, holder_obj, scratch2, + ®ular_invoke); + + // Generate code for the failed interceptor case. + + // Check the lookup is still valid. + stub_compiler_->CheckPrototypes(holder_obj, receiver, + lookup->holder(), scratch1, + scratch2, name, depth2, miss); + + if (can_do_fast_api_call) { + GenerateFastApiCall(masm, optimization, arguments_.immediate()); + } else { + __ InvokeFunction(optimization.constant_function(), arguments_, + JUMP_FUNCTION); + } + + if (can_do_fast_api_call) { + __ bind(&miss_cleanup); + FreeSpaceForFastApiCall(masm); + __ b(miss_label); + } + + __ bind(®ular_invoke); + if (can_do_fast_api_call) { + FreeSpaceForFastApiCall(masm); + } + } + + void CompileRegular(MacroAssembler* masm, + JSObject* object, + Register receiver, + Register scratch1, + Register scratch2, + String* name, + JSObject* holder_obj, + Label* miss_label) { + Register holder = + stub_compiler_->CheckPrototypes(object, receiver, holder_obj, + scratch1, scratch2, name, + miss_label); + + // Call a runtime function to load the interceptor property. + __ EnterInternalFrame(); + // Save the name_ register across the call. + __ push(name_); + + PushInterceptorArguments(masm, + receiver, + holder, + name_, + holder_obj); + + __ CallExternalReference( + ExternalReference( + IC_Utility(IC::kLoadPropertyWithInterceptorForCall)), + 5); + + // Restore the name_ register. + __ pop(name_); + __ LeaveInternalFrame(); + } + + void LoadWithInterceptor(MacroAssembler* masm, + Register receiver, + Register holder, + JSObject* holder_obj, + Register scratch, + Label* interceptor_succeeded) { + __ EnterInternalFrame(); + __ Push(holder, name_); + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + __ LeaveInternalFrame(); + + // If interceptor returns no-result sentinel, call the constant function. + __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); + __ cmp(r0, scratch); + __ b(ne, interceptor_succeeded); + } + + StubCompiler* stub_compiler_; + const ParameterCount& arguments_; + Register name_; +}; + + // Generate code to check that a global property cell is empty. Create // the property cell at compilation time if no cell exists for the // property. @@ -631,12 +881,10 @@ Register StubCompiler::CheckPrototypes(JSObject* object, String* name, int save_at_depth, Label* miss) { - // TODO(602): support object saving. - ASSERT(save_at_depth == kInvalidProtoDepth); - // Check that the maps haven't changed. Register result = - masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss); + masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, + save_at_depth, miss); // If we've skipped any global objects, it's not enough to verify // that their maps haven't changed. We also need to check that the @@ -837,6 +1085,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, // -- lr : return address // ----------------------------------- + // If object is not an array, bail out to regular call. + if (!object->IsJSArray()) { + return Heap::undefined_value(); + } + // TODO(639): faster implementation. ASSERT(check == RECEIVER_MAP_CHECK); @@ -886,6 +1139,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, // -- lr : return address // ----------------------------------- + // If object is not an array, bail out to regular call. + if (!object->IsJSArray()) { + return Heap::undefined_value(); + } + // TODO(642): faster implementation. ASSERT(check == RECEIVER_MAP_CHECK); @@ -938,10 +1196,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, if (function_info->HasCustomCallGenerator()) { CustomCallGenerator generator = ToCData(function_info->function_data()); - return generator(this, object, holder, function, name, check); + Object* result = generator(this, object, holder, function, name, check); + // undefined means bail out to regular compiler. + if (!result->IsUndefined()) { + return result; + } } - Label miss; + Label miss_in_smi_check; // Get the receiver from the stack const int argc = arguments().immediate(); @@ -950,21 +1212,39 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, // Check that the receiver isn't a smi. if (check != NUMBER_CHECK) { __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, &miss); + __ b(eq, &miss_in_smi_check); } // Make sure that it's okay not to patch the on stack receiver // unless we're doing a receiver map check. ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); + CallOptimization optimization(function); + int depth = kInvalidProtoDepth; + Label miss; + switch (check) { case RECEIVER_MAP_CHECK: + __ IncrementCounter(&Counters::call_const, 1, r0, r3); + + if (optimization.is_simple_api_call() && !object->IsGlobalObject()) { + depth = optimization.GetPrototypeDepthOfExpectedType( + JSObject::cast(object), holder); + } + + if (depth != kInvalidProtoDepth) { + __ IncrementCounter(&Counters::call_const_fast_api, 1, r0, r3); + ReserveSpaceForFastApiCall(masm(), r0); + } + // Check that the maps haven't changed. - CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss); + CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, name, + depth, &miss); // Patch the receiver on the stack with the global proxy if // necessary. if (object->IsGlobalObject()) { + ASSERT(depth == kInvalidProtoDepth); __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); __ str(r3, MemOperand(sp, argc * kPointerSize)); } @@ -1037,10 +1317,19 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, UNREACHABLE(); } - __ InvokeFunction(function, arguments(), JUMP_FUNCTION); + if (depth != kInvalidProtoDepth) { + GenerateFastApiCall(masm(), optimization, argc); + } else { + __ InvokeFunction(function, arguments(), JUMP_FUNCTION); + } // Handle call cache miss. __ bind(&miss); + if (depth != kInvalidProtoDepth) { + FreeSpaceForFastApiCall(masm()); + } + + __ bind(&miss_in_smi_check); Handle ic = ComputeCallMiss(arguments().immediate()); __ Jump(ic, RelocInfo::CODE_TARGET); @@ -1060,14 +1349,8 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object, // -- r2 : name // -- lr : return address // ----------------------------------- - ASSERT(holder->HasNamedInterceptor()); - ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); - Label miss; - const Register receiver = r0; - const Register holder_reg = r1; - const Register name_reg = r2; - const Register scratch = r3; + Label miss; // Get the number of arguments. const int argc = arguments().immediate(); @@ -1075,80 +1358,24 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object, LookupResult lookup; LookupPostInterceptor(holder, name, &lookup); - // Get the receiver from the stack into r0. - __ ldr(r0, MemOperand(sp, argc * kPointerSize)); - - // Check that the receiver isn't a smi. - __ BranchOnSmi(receiver, &miss); - - // Check that the maps haven't changed. - Register reg = CheckPrototypes(object, receiver, holder, holder_reg, - scratch, name, &miss); - if (!reg.is(holder_reg)) { - __ mov(holder_reg, reg); - } - - // If we call a constant function when the interceptor returns - // the no-result sentinel, generate code that optimizes this case. - if (lookup.IsProperty() && - lookup.IsCacheable() && - lookup.type() == CONSTANT_FUNCTION && - lookup.GetConstantFunction()->is_compiled() && - !holder->IsJSArray()) { - // Constant functions cannot sit on global object. - ASSERT(!lookup.holder()->IsGlobalObject()); - - // Call the interceptor. - __ EnterInternalFrame(); - __ Push(holder_reg, name_reg); - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - holder); - __ pop(name_reg); - __ pop(holder_reg); - __ LeaveInternalFrame(); - // r0 no longer contains the receiver. - - // If interceptor returns no-result sentinal, call the constant function. - __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); - __ cmp(r0, scratch); - Label invoke; - __ b(ne, &invoke); - // Check the prototypes between the interceptor's holder and the - // constant function's holder. - CheckPrototypes(holder, holder_reg, - lookup.holder(), r0, - scratch, - name, - &miss); - - __ InvokeFunction(lookup.GetConstantFunction(), - arguments(), - JUMP_FUNCTION); - - __ bind(&invoke); - - } else { - // Call a runtime function to load the interceptor property. - __ EnterInternalFrame(); - __ push(name_reg); - - PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, holder); - - __ CallExternalReference( - ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)), - 5); + // Get the receiver from the stack. + __ ldr(r1, MemOperand(sp, argc * kPointerSize)); - __ pop(name_reg); - __ LeaveInternalFrame(); - } + CallInterceptorCompiler compiler(this, arguments(), r2); + compiler.Compile(masm(), + object, + holder, + name, + &lookup, + r1, + r3, + r4, + &miss); // Move returned value, the function to call, to r1. __ mov(r1, r0); // Restore receiver. - __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); + __ ldr(r0, MemOperand(sp, argc * kPointerSize)); GenerateCallFunction(masm(), object, arguments(), &miss); @@ -1597,18 +1824,18 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, int index) { // ----------- S t a t e ------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label miss; - __ ldr(r2, MemOperand(sp, 0)); - __ ldr(r0, MemOperand(sp, kPointerSize)); - - __ cmp(r2, Operand(Handle(name))); + // Check the key is the cached one. + __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - GenerateLoadField(receiver, holder, r0, r3, r1, index, name, &miss); + __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. + GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1622,19 +1849,19 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, AccessorInfo* callback) { // ----------- S t a t e ------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label miss; - __ ldr(r2, MemOperand(sp, 0)); - __ ldr(r0, MemOperand(sp, kPointerSize)); - - __ cmp(r2, Operand(Handle(name))); + // Check the key is the cached one. + __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, + __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. + bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, callback, name, &miss, &failure); if (!success) return failure; @@ -1651,19 +1878,18 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, Object* value) { // ----------- S t a t e ------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label miss; - // Check the key is the cached one - __ ldr(r2, MemOperand(sp, 0)); - __ ldr(r0, MemOperand(sp, kPointerSize)); - - __ cmp(r2, Operand(Handle(name))); + // Check the key is the cached one. + __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - GenerateLoadConstant(receiver, holder, r0, r3, r1, value, name, &miss); + __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. + GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1677,27 +1903,26 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, String* name) { // ----------- S t a t e ------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label miss; - // Check the key is the cached one - __ ldr(r2, MemOperand(sp, 0)); - __ ldr(r0, MemOperand(sp, kPointerSize)); - - __ cmp(r2, Operand(Handle(name))); + // Check the key is the cached one. + __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); LookupResult lookup; LookupPostInterceptor(holder, name, &lookup); + __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. GenerateLoadInterceptor(receiver, holder, &lookup, + r1, r0, r2, r3, - r1, name, &miss); __ bind(&miss); @@ -1710,19 +1935,18 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { // ----------- S t a t e ------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label miss; - // Check the key is the cached one - __ ldr(r2, MemOperand(sp, 0)); - __ ldr(r0, MemOperand(sp, kPointerSize)); - - __ cmp(r2, Operand(Handle(name))); + // Check the key is the cached one. + __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - GenerateLoadArrayLength(masm(), r0, r3, &miss); + __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. + GenerateLoadArrayLength(masm(), r1, r2, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1733,19 +1957,19 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { // ----------- S t a t e ------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- Label miss; __ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3); - __ ldr(r2, MemOperand(sp)); - __ ldr(r0, MemOperand(sp, kPointerSize)); // receiver - - __ cmp(r2, Operand(Handle(name))); + // Check the key is the cached one. + __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - GenerateLoadStringLength(masm(), r0, r1, r3, &miss); + __ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver. + GenerateLoadStringLength(masm(), r1, r2, r3, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3); @@ -1759,6 +1983,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { // ----------- S t a t e ------------- // -- lr : return address + // -- r0 : key // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc index ed26c41d1707b2..bf5cff2998c23e 100644 --- a/deps/v8/src/arm/virtual-frame-arm.cc +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -298,19 +298,38 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, } -void VirtualFrame::CallLoadIC(RelocInfo::Mode mode) { +void VirtualFrame::CallLoadIC(Handle name, RelocInfo::Mode mode) { Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + SpillAllButCopyTOSToR0(); + __ mov(r2, Operand(name)); CallCodeObject(ic, mode, 0); } +void VirtualFrame::CallStoreIC(Handle name, bool is_contextual) { + Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + PopToR0(); + if (is_contextual) { + SpillAll(); + __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + } else { + EmitPop(r1); + SpillAll(); + } + __ mov(r2, Operand(name)); + CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); +} + + void VirtualFrame::CallKeyedLoadIC() { Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + SpillAllButCopyTOSToR0(); CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); } void VirtualFrame::CallKeyedStoreIC() { + ASSERT(SpilledScope::is_spilled()); Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); } @@ -477,6 +496,38 @@ Register VirtualFrame::Peek() { } +void VirtualFrame::Dup() { + AssertIsNotSpilled(); + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r0, MemOperand(sp, 0)); + top_of_stack_state_ = R0_TOS; + break; + case R0_TOS: + __ mov(r1, r0); + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_TOS: + __ mov(r0, r1); + top_of_stack_state_ = R0_R1_TOS; + break; + case R0_R1_TOS: + __ push(r1); + __ mov(r1, r0); + // No need to change state as r0 and r1 now contains the same value. + break; + case R1_R0_TOS: + __ push(r0); + __ mov(r0, r1); + // No need to change state as r0 and r1 now contains the same value. + break; + default: + UNREACHABLE(); + } + element_count_++; +} + + Register VirtualFrame::PopToRegister(Register but_not_to_this_one) { ASSERT(but_not_to_this_one.is(r0) || but_not_to_this_one.is(r1) || @@ -541,6 +592,19 @@ Register VirtualFrame::GetTOSRegister() { } +void VirtualFrame::EmitPush(Operand operand) { + element_count_++; + if (SpilledScope::is_spilled()) { + __ mov(r0, operand); + __ push(r0); + return; + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + __ mov(kTopRegister[top_of_stack_state_], operand); +} + + void VirtualFrame::EmitPush(MemOperand operand) { element_count_++; if (SpilledScope::is_spilled()) { @@ -554,6 +618,19 @@ void VirtualFrame::EmitPush(MemOperand operand) { } +void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) { + element_count_++; + if (SpilledScope::is_spilled()) { + __ LoadRoot(r0, index); + __ push(r0); + return; + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + __ LoadRoot(kTopRegister[top_of_stack_state_], index); +} + + void VirtualFrame::EmitPushMultiple(int count, int src_regs) { ASSERT(SpilledScope::is_spilled()); Adjust(count); diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h index 7b56bc244c2690..77bc70ec3303e8 100644 --- a/deps/v8/src/arm/virtual-frame-arm.h +++ b/deps/v8/src/arm/virtual-frame-arm.h @@ -308,9 +308,13 @@ class VirtualFrame : public ZoneObject { InvokeJSFlags flag, int arg_count); - // Call load IC. Receiver is on the stack and the property name is in r2. + // Call load IC. Receiver is on the stack. Result is returned in r0. + void CallLoadIC(Handle name, RelocInfo::Mode mode); + + // Call store IC. If the load is contextual, value is found on top of the + // frame. If not, value and receiver are on the frame. Both are consumed. // Result is returned in r0. - void CallLoadIC(RelocInfo::Mode mode); + void CallStoreIC(Handle name, bool is_contextual); // Call keyed load IC. Key and receiver are on the stack. Result is returned // in r0. @@ -348,6 +352,9 @@ class VirtualFrame : public ZoneObject { // must be copied to a scratch register before modification. Register Peek(); + // Duplicate the top of stack. + void Dup(); + // Flushes all registers, but it puts a copy of the top-of-stack in r0. void SpillAllButCopyTOSToR0(); @@ -372,7 +379,9 @@ class VirtualFrame : public ZoneObject { // Push an element on top of the expression stack and emit a // corresponding push instruction. void EmitPush(Register reg); + void EmitPush(Operand operand); void EmitPush(MemOperand operand); + void EmitPushRoot(Heap::RootListIndex index); // Get a register which is free and which must be immediately used to // push on the top of the stack. diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 00010de91ac0ec..216c03b6332c7a 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -684,8 +684,7 @@ function ArraySort(comparefn) { var pivot = a[pivot_index]; // Issue 95: Keep the pivot element out of the comparisons to avoid // infinite recursion if comparefn(pivot, pivot) != 0. - a[pivot_index] = a[from]; - a[from] = pivot; + %_SwapElements(a, from, pivot_index); var low_end = from; // Upper bound of the elements lower than pivot. var high_start = to; // Lower bound of the elements greater than pivot. // From low_end to i are elements equal to pivot. @@ -694,14 +693,12 @@ function ArraySort(comparefn) { var element = a[i]; var order = %_CallFunction(global_receiver, element, pivot, comparefn); if (order < 0) { - a[i] = a[low_end]; - a[low_end] = element; + %_SwapElements(a, i, low_end); i++; low_end++; } else if (order > 0) { high_start--; - a[i] = a[high_start]; - a[high_start] = element; + %_SwapElements(a, i, high_start); } else { // order == 0 i++; } diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index ac9663d2131ff4..657d0dc3dac7ac 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -248,9 +248,13 @@ class Genesis BASE_EMBEDDED { void TransferNamedProperties(Handle from, Handle to); void TransferIndexedProperties(Handle from, Handle to); + enum PrototypePropertyMode { + DONT_ADD_PROTOTYPE, + ADD_READONLY_PROTOTYPE, + ADD_WRITEABLE_PROTOTYPE + }; Handle ComputeFunctionInstanceDescriptor( - bool make_prototype_read_only, - bool make_prototype_enumerable = false); + PrototypePropertyMode prototypeMode); void MakeFunctionInstancePrototypeWritable(); static bool CompileBuiltin(int index); @@ -330,7 +334,8 @@ static Handle InstallFunction(Handle target, bool is_ecma_native) { Handle symbol = Factory::LookupAsciiSymbol(name); Handle call_code = Handle(Builtins::builtin(call)); - Handle function = + Handle function = prototype.is_null() ? + Factory::NewFunctionWithoutPrototype(symbol, call_code) : Factory::NewFunctionWithPrototype(symbol, type, instance_size, @@ -346,23 +351,23 @@ static Handle InstallFunction(Handle target, Handle Genesis::ComputeFunctionInstanceDescriptor( - bool make_prototype_read_only, - bool make_prototype_enumerable) { + PrototypePropertyMode prototypeMode) { Handle result = Factory::empty_descriptor_array(); - // Add prototype. - PropertyAttributes attributes = static_cast( - (make_prototype_enumerable ? 0 : DONT_ENUM) - | DONT_DELETE - | (make_prototype_read_only ? READ_ONLY : 0)); - result = - Factory::CopyAppendProxyDescriptor( - result, - Factory::prototype_symbol(), - Factory::NewProxy(&Accessors::FunctionPrototype), - attributes); + if (prototypeMode != DONT_ADD_PROTOTYPE) { + PropertyAttributes attributes = static_cast( + DONT_ENUM | + DONT_DELETE | + (prototypeMode == ADD_READONLY_PROTOTYPE ? READ_ONLY : 0)); + result = + Factory::CopyAppendProxyDescriptor( + result, + Factory::prototype_symbol(), + Factory::NewProxy(&Accessors::FunctionPrototype), + attributes); + } - attributes = + PropertyAttributes attributes = static_cast(DONT_ENUM | DONT_DELETE | READ_ONLY); // Add length. result = @@ -407,14 +412,29 @@ Handle Genesis::CreateEmptyFunction() { // Please note that the prototype property for function instances must be // writable. Handle function_map_descriptors = - ComputeFunctionInstanceDescriptor(false, false); + ComputeFunctionInstanceDescriptor(ADD_WRITEABLE_PROTOTYPE); fm->set_instance_descriptors(*function_map_descriptors); + fm->set_function_with_prototype(true); + + // Functions with this map will not have a 'prototype' property, and + // can not be used as constructors. + Handle function_without_prototype_map = + Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); + global_context()->set_function_without_prototype_map( + *function_without_prototype_map); + Handle function_without_prototype_map_descriptors = + ComputeFunctionInstanceDescriptor(DONT_ADD_PROTOTYPE); + function_without_prototype_map->set_instance_descriptors( + *function_without_prototype_map_descriptors); + function_without_prototype_map->set_function_with_prototype(false); // Allocate the function map first and then patch the prototype later fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); global_context()->set_function_map(*fm); - function_map_descriptors = ComputeFunctionInstanceDescriptor(true); + function_map_descriptors = + ComputeFunctionInstanceDescriptor(ADD_READONLY_PROTOTYPE); fm->set_instance_descriptors(*function_map_descriptors); + fm->set_function_with_prototype(true); Handle object_name = Handle(Heap::Object_symbol()); @@ -442,7 +462,7 @@ Handle Genesis::CreateEmptyFunction() { // 262 15.3.4. Handle symbol = Factory::LookupAsciiSymbol("Empty"); Handle empty_function = - Factory::NewFunction(symbol, Factory::null_value()); + Factory::NewFunctionWithoutPrototype(symbol); // --- E m p t y --- Handle code = @@ -457,10 +477,14 @@ Handle Genesis::CreateEmptyFunction() { empty_function->shared()->DontAdaptArguments(); global_context()->function_map()->set_prototype(*empty_function); global_context()->function_instance_map()->set_prototype(*empty_function); + global_context()->function_without_prototype_map()-> + set_prototype(*empty_function); // Allocate the function map first and then patch the prototype later - Handle empty_fm = Factory::CopyMapDropDescriptors(fm); - empty_fm->set_instance_descriptors(*function_map_descriptors); + Handle empty_fm = Factory::CopyMapDropDescriptors( + function_without_prototype_map); + empty_fm->set_instance_descriptors( + *function_without_prototype_map_descriptors); empty_fm->set_prototype(global_context()->object_function()->prototype()); empty_function->set_map(*empty_fm); return empty_function; @@ -1215,12 +1239,12 @@ bool Genesis::InstallNatives() { // Install the call and the apply functions. Handle call = InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize, - Factory::NewJSObject(Top::object_function(), TENURED), + Handle::null(), Builtins::FunctionCall, false); Handle apply = InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize, - Factory::NewJSObject(Top::object_function(), TENURED), + Handle::null(), Builtins::FunctionApply, false); @@ -1311,14 +1335,12 @@ bool Genesis::InstallNatives() { static FixedArray* CreateCache(int size, JSFunction* factory) { // Caches are supposed to live for a long time, allocate in old space. int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size; - Handle cache = - Factory::NewFixedArrayWithHoles(array_size, TENURED); + // Cannot use cast as object is not fully initialized yet. + JSFunctionResultCache* cache = reinterpret_cast( + *Factory::NewFixedArrayWithHoles(array_size, TENURED)); cache->set(JSFunctionResultCache::kFactoryIndex, factory); - cache->set(JSFunctionResultCache::kFingerIndex, - Smi::FromInt(JSFunctionResultCache::kEntriesIndex)); - cache->set(JSFunctionResultCache::kCacheSizeIndex, - Smi::FromInt(JSFunctionResultCache::kEntriesIndex)); - return *cache; + cache->MakeZeroSize(); + return cache; } @@ -1655,9 +1677,10 @@ void Genesis::MakeFunctionInstancePrototypeWritable() { HandleScope scope; Handle function_map_descriptors = - ComputeFunctionInstanceDescriptor(false); + ComputeFunctionInstanceDescriptor(ADD_WRITEABLE_PROTOTYPE); Handle fm = Factory::CopyMapDropDescriptors(Top::function_map()); fm->set_instance_descriptors(*function_map_descriptors); + fm->set_function_with_prototype(true); Top::context()->global_context()->set_function_map(*fm); } diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index ccb6c0c5338a73..dd2e3cbfc14495 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -164,8 +164,7 @@ enum BuiltinExtraArguments { V(STRING_ADD_LEFT, 1) \ V(STRING_ADD_RIGHT, 1) \ V(APPLY_PREPARE, 1) \ - V(APPLY_OVERFLOW, 1) \ - V(STRING_CHAR_AT, 1) + V(APPLY_OVERFLOW, 1) class ObjectVisitor; diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 5bbf050ccabcd2..f89399a974fc79 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -31,7 +31,6 @@ #include "codegen-inl.h" #include "compiler.h" #include "debug.h" -#include "liveedit.h" #include "oprofile-agent.h" #include "prettyprinter.h" #include "register-allocator-inl.h" @@ -204,7 +203,6 @@ Handle CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, // all the pieces into a Code object. This function is only to be called by // the compiler.cc code. Handle CodeGenerator::MakeCode(CompilationInfo* info) { - LiveEditFunctionTracker live_edit_tracker(info->function()); Handle