vendor/v8/src/arm/macro-assembler-arm.cc in mustang-0.0.1 vs vendor/v8/src/arm/macro-assembler-arm.cc in mustang-0.1.0
- old
+ new
@@ -37,15 +37,18 @@
#include "runtime.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ allow_stub_calls_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
}
// We always generate arm code, never thumb code, even if V8 is compiled to
// thumb, so we require inter-working support
@@ -101,66 +104,132 @@
// 'code' is always generated ARM code, never THUMB code
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
+int MacroAssembler::CallSize(Register target, Condition cond) {
+#if USE_BLX
+ return kInstrSize;
+#else
+ return 2 * kInstrSize;
+#endif
+}
+
+
void MacroAssembler::Call(Register target, Condition cond) {
+ // Block constant pool for the call instruction sequence.
+ BlockConstPoolScope block_const_pool(this);
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
blx(target, cond);
#else
// set lr for return at current pc + 8
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, cond), post_position);
+#endif
}
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ int size = 2 * kInstrSize;
+ Instr mov_instr = cond | MOV | LeaveCC;
+ if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+ size += kInstrSize;
+ }
+ return size;
+}
+
+
+void MacroAssembler::Call(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ // Block constant pool for the call instruction sequence.
+ BlockConstPoolScope block_const_pool(this);
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
// blx ip
- // The two instructions (ldr and blx) could be separated by a constant
- // pool and the code would still work. The issue comes from the
- // patching code which expect the ldr to be just above the blx.
- { BlockConstPoolScope block_const_pool(this);
- // Statement positions are expected to be recorded when the target
- // address is loaded. The mov method will automatically record
- // positions when pc is the target, since this is not the case here
- // we have to do it explicitly.
- positions_recorder()->WriteRecordedPositions();
+ // Statement positions are expected to be recorded when the target
+ // address is loaded. The mov method will automatically record
+ // positions when pc is the target, since this is not the case here
+ // we have to do it explicitly.
+ positions_recorder()->WriteRecordedPositions();
- mov(ip, Operand(target, rmode), LeaveCC, cond);
- blx(ip, cond);
- }
+ mov(ip, Operand(target, rmode), LeaveCC, cond);
+ blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
-
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Call(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
+#endif
}
void MacroAssembler::Ret(Condition cond) {
#if USE_BX
@@ -269,10 +338,33 @@
sbfx(dst, src1, lsb, width, cond);
}
}
+void MacroAssembler::Bfi(Register dst,
+ Register src,
+ Register scratch,
+ int lsb,
+ int width,
+ Condition cond) {
+ ASSERT(0 <= lsb && lsb < 32);
+ ASSERT(0 <= width && width < 32);
+ ASSERT(lsb + width < 32);
+ ASSERT(!scratch.is(dst));
+ if (width == 0) return;
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ bic(dst, dst, Operand(mask));
+ and_(scratch, src, Operand((1 << width) - 1));
+ mov(scratch, Operand(scratch, LSL, lsb));
+ orr(dst, dst, scratch);
+ } else {
+ bfi(dst, src, lsb, width, cond);
+ }
+}
+
+
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
@@ -342,11 +434,11 @@
void MacroAssembler::RecordWriteHelper(Register object,
Register address,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, ne, ¬_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(¬_in_new_space);
@@ -370,12 +462,12 @@
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
Label* branch) {
ASSERT(cond == eq || cond == ne);
- and_(scratch, object, Operand(ExternalReference::new_space_mask()));
- cmp(scratch, Operand(ExternalReference::new_space_start()));
+ and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+ cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
b(cond, branch);
}
// Will clobber 4 registers: object, offset, scratch, ip. The
@@ -404,11 +496,11 @@
bind(&done);
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -436,11 +528,11 @@
bind(&done);
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(address, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -640,21 +732,21 @@
ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
Push(lr, fp);
mov(fp, Operand(sp)); // Setup new frame pointer.
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(ip, Operand(0));
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
str(fp, MemOperand(ip));
- mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
str(cp, MemOperand(ip));
// Optionally save all double registers.
if (save_doubles) {
sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
@@ -726,15 +818,15 @@
}
}
// Clear top frame.
mov(r3, Operand(0, RelocInfo::NONE));
- mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
str(r3, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
ldr(cp, MemOperand(ip));
#ifdef DEBUG
str(r3, MemOperand(ip));
#endif
@@ -759,11 +851,11 @@
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
bool definitely_matches = false;
Label regular_invoke;
// Check whether the expected and actual arguments count match. If not,
// setup registers according to contract with ArgumentsAdaptorTrampoline:
@@ -812,14 +904,17 @@
mov(r3, Operand(code_constant));
add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
}
Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
+ if (call_wrapper != NULL) {
+ call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ }
Call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
b(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(®ular_invoke);
@@ -829,18 +924,19 @@
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- post_call_generator);
+ call_wrapper);
if (flag == CALL_FUNCTION) {
+ if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
Call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code);
}
@@ -871,11 +967,11 @@
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
Register expected_reg = r2;
Register code_reg = r3;
@@ -888,11 +984,11 @@
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
@@ -952,11 +1048,11 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
- mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
+ mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif
@@ -975,11 +1071,11 @@
ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
// Save the current handler as the next handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(r1, MemOperand(r3));
ASSERT(StackHandlerConstants::kNextOffset == 0);
push(r1);
// Link this handler as the new current one.
str(sp, MemOperand(r3));
@@ -994,11 +1090,11 @@
ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
// Save the current handler as the next handler.
- mov(r7, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(r6, MemOperand(r7));
ASSERT(StackHandlerConstants::kNextOffset == 0);
push(r6);
// Link this handler as the new current one.
str(sp, MemOperand(r7));
@@ -1007,11 +1103,11 @@
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
pop(r1);
- mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(ip));
}
@@ -1023,11 +1119,11 @@
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r2);
@@ -1042,11 +1138,11 @@
// Set cp to NULL if fp is NULL.
mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
pop(pc);
@@ -1062,11 +1158,11 @@
if (!value.is(r0)) {
mov(r0, value);
}
// Drop sp to the top stack handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
bind(&loop);
@@ -1086,19 +1182,21 @@
pop(r2);
str(r2, MemOperand(r3));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address, isolate());
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate())));
str(r0, MemOperand(r2));
}
// Stack layout at this point. See also StackHandlerConstants.
// sp -> state (ENTRY)
@@ -1115,11 +1213,11 @@
// Set cp to NULL if fp is NULL.
mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
pop(pc);
@@ -1147,11 +1245,11 @@
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
ldr(scratch, FieldMemOperand(scratch, offset));
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
// Read the first word and compare to the global_context_map.
@@ -1166,11 +1264,11 @@
ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
cmp(scratch, Operand(ip));
b(eq, &same_contexts);
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, ip); // Move ip to its holding place.
@@ -1208,11 +1306,11 @@
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
mov(scratch1, Operand(0x7191));
mov(scratch2, Operand(0x7291));
}
@@ -1221,10 +1319,12 @@
}
ASSERT(!result.is(scratch1));
ASSERT(!result.is(scratch2));
ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(ip));
+ ASSERT(!scratch2.is(ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
@@ -1233,13 +1333,13 @@
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
intptr_t top =
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
intptr_t limit =
reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
ASSERT((limit - top) == kPointerSize);
@@ -1255,11 +1355,11 @@
// does not need ip for implicit literal generation.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
ldm(ia, topaddr, result.bit() | ip.bit());
} else {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
@@ -1289,11 +1389,11 @@
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
mov(scratch1, Operand(0x7191));
mov(scratch2, Operand(0x7291));
}
@@ -1313,13 +1413,13 @@
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
intptr_t top =
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
intptr_t limit =
reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
ASSERT((limit - top) == kPointerSize);
@@ -1333,11 +1433,11 @@
// does not need ip for implicit literal generation.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
ldm(ia, topaddr, result.bit() | ip.bit());
} else {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
@@ -1358,11 +1458,11 @@
b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
// Update allocation top. result temporarily holds the new top.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
tst(scratch2, Operand(kObjectAlignmentMask));
Check(eq, "Unaligned allocation in new space");
}
str(scratch2, MemOperand(topaddr));
@@ -1374,11 +1474,11 @@
void MacroAssembler::UndoAllocationInNewSpace(Register object,
Register scratch) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
and_(object, object, Operand(~kHeapObjectTagMask));
#ifdef DEBUG
// Check that the object un-allocated is below the current top.
@@ -1510,10 +1610,18 @@
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
+void MacroAssembler::CompareRoot(Register obj,
+ Heap::RootListIndex index) {
+ ASSERT(!obj.is(ip));
+ LoadRoot(ip, index);
+ cmp(obj, ip);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
bool is_heap_object) {
@@ -1616,11 +1724,11 @@
return ref0.address() - ref1.address();
}
MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
- ApiFunction* function, int stack_space) {
+ ExternalReference function, int stack_space) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
ExternalReference::handle_scope_limit_address(),
@@ -1654,11 +1762,11 @@
ldr(r0, MemOperand(r0), ne);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
str(r4, MemOperand(r7, kNextOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ldr(r1, MemOperand(r7, kLevelOffset));
cmp(r1, r6);
Check(eq, "Unexpected level after return from api call");
}
sub(r6, r6, Operand(1));
@@ -1668,33 +1776,38 @@
b(ne, &delete_allocated_handles);
// Check if the function scheduled an exception.
bind(&leave_exit_frame);
LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- mov(ip, Operand(ExternalReference::scheduled_exception_address()));
+ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
LeaveExitFrame(false, r4);
mov(pc, lr);
bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
+ MaybeObject* result
+ = TryTailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0,
+ 1);
if (result->IsFailure()) {
return result;
}
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
str(r5, MemOperand(r7, kLimitOffset));
mov(r4, r0);
- PrepareCallCFunction(0, r5);
- CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
+ PrepareCallCFunction(1, r5);
+ mov(r0, Operand(ExternalReference::isolate_address()));
+ CallCFunction(
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
mov(r0, r4);
jmp(&leave_exit_frame);
return result;
}
@@ -1816,13 +1929,13 @@
Label right_exponent, done;
// Get exponent word.
ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
+ scratch,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
mov(dest, Operand(0, RelocInfo::NONE));
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
@@ -1881,10 +1994,171 @@
bind(&done);
}
}
+void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
+ SwVfpRegister result,
+ DwVfpRegister double_input,
+ Register scratch1,
+ Register scratch2,
+ CheckForInexactConversion check_inexact) {
+ ASSERT(CpuFeatures::IsSupported(VFP3));
+ CpuFeatures::Scope scope(VFP3);
+ Register prev_fpscr = scratch1;
+ Register scratch = scratch2;
+
+ int32_t check_inexact_conversion =
+ (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
+
+ // Set custom FPCSR:
+ // - Set rounding mode.
+ // - Clear vfp cumulative exception flags.
+ // - Make sure Flush-to-zero mode control bit is unset.
+ vmrs(prev_fpscr);
+ bic(scratch,
+ prev_fpscr,
+ Operand(kVFPExceptionMask |
+ check_inexact_conversion |
+ kVFPRoundingModeMask |
+ kVFPFlushToZeroMask));
+ // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
+ if (rounding_mode != kRoundToNearest) {
+ orr(scratch, scratch, Operand(rounding_mode));
+ }
+ vmsr(scratch);
+
+ // Convert the argument to an integer.
+ vcvt_s32_f64(result,
+ double_input,
+ (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
+ : kFPSCRRounding);
+
+ // Retrieve FPSCR.
+ vmrs(scratch);
+ // Restore FPSCR.
+ vmsr(prev_fpscr);
+ // Check for vfp exceptions.
+ tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
+}
+
+
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch) {
+ Label done, normal_exponent, restore_sign;
+
+ // Extract the biased exponent in result.
+ Ubfx(result,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ cmp(result, Operand(HeapNumber::kExponentMask));
+ mov(result, Operand(0), LeaveCC, eq);
+ b(eq, &done);
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ sub(result,
+ result,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
+ SetCC);
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ b(le, &normal_exponent);
+ mov(result, Operand(0));
+ b(&done);
+
+ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
+
+ // Save the sign.
+ Register sign = result;
+ result = no_reg;
+ and_(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ orr(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ mov(input_high, Operand(input_high, LSL, scratch));
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ rsb(scratch, scratch, Operand(32), SetCC);
+ b(&pos_shift, ge);
+
+ // Negate scratch.
+ rsb(scratch, scratch, Operand(0));
+ mov(input_low, Operand(input_low, LSL, scratch));
+ b(&shift_done);
+
+ bind(&pos_shift);
+ mov(input_low, Operand(input_low, LSR, scratch));
+
+ bind(&shift_done);
+ orr(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ cmp(sign, Operand(0));
+ result = sign;
+ sign = no_reg;
+ rsb(result, input_high, Operand(0), LeaveCC, ne);
+ mov(result, input_high, LeaveCC, eq);
+ bind(&done);
+}
+
+
+void MacroAssembler::EmitECMATruncate(Register result,
+ DwVfpRegister double_input,
+ SwVfpRegister single_scratch,
+ Register scratch,
+ Register input_high,
+ Register input_low) {
+ CpuFeatures::Scope scope(VFP3);
+ ASSERT(!input_high.is(result));
+ ASSERT(!input_low.is(result));
+ ASSERT(!input_low.is(input_high));
+ ASSERT(!scratch.is(result) &&
+ !scratch.is(input_high) &&
+ !scratch.is(input_low));
+ ASSERT(!single_scratch.is(double_input.low()) &&
+ !single_scratch.is(double_input.high()));
+
+ Label done;
+
+ // Clear cumulative exception flags.
+ ClearFPSCRBits(kVFPExceptionMask, scratch);
+ // Try a conversion to a signed integer.
+ vcvt_s32_f64(single_scratch, double_input);
+ vmov(result, single_scratch);
+ // Retrieve he FPSCR.
+ vmrs(scratch);
+ // Check for overflow and NaNs.
+ tst(scratch, Operand(kVFPOverflowExceptionBit |
+ kVFPUnderflowExceptionBit |
+ kVFPInvalidOpExceptionBit));
+ // If we had no exceptions we are done.
+ b(eq, &done);
+
+ // Load the double value and perform a manual truncation.
+ vmov(input_low, input_high, double_input);
+ EmitOutOfInt32RangeTruncate(result,
+ input_high,
+ input_low,
+ scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7)) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
@@ -1900,11 +2174,12 @@
int num_least_bits) {
and_(dst, src, Operand((1 << num_least_bits) - 1));
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -1916,11 +2191,11 @@
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
- mov(r1, Operand(ExternalReference(f)));
+ mov(r1, Operand(ExternalReference(f, isolate())));
CEntryStub stub(1);
CallStub(&stub);
}
@@ -1928,13 +2203,13 @@
CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function)));
+ mov(r1, Operand(ExternalReference(function, isolate())));
CEntryStub stub(1);
stub.SaveDoubles();
CallStub(&stub);
}
@@ -1973,11 +2248,13 @@
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
#if defined(__thumb__)
@@ -2002,15 +2279,16 @@
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
GetBuiltinEntry(r2, id);
if (flags == CALL_JS) {
+ if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
Call(r2);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flags == JUMP_JS);
Jump(r2);
}
}
@@ -2068,27 +2346,27 @@
}
}
void MacroAssembler::Assert(Condition cond, const char* msg) {
- if (FLAG_debug_code)
+ if (emit_debug_code())
Check(cond, msg);
}
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
LoadRoot(ip, index);
cmp(reg, ip);
Check(eq, "Register did not match expected root");
}
}
void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ASSERT(!elements.is(ip));
Label ok;
push(elements);
ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
@@ -2172,11 +2450,11 @@
// We should not have found a 'with' context by walking the context chain
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
cmp(dst, ip);
Check(eq, "Yo dawg, I heard you liked function contexts "
"so I put function contexts in all your contexts");
}
@@ -2197,11 +2475,11 @@
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
// Load the initial map. The global functions all have initial maps.
ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
b(&ok);
bind(&fail);
Abort("Global functions must have initial map");
@@ -2219,10 +2497,22 @@
tst(scratch, reg);
b(ne, not_power_of_two_or_zero);
}
+void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
+ Register reg,
+ Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two) {
+ sub(scratch, reg, Operand(1), SetCC);
+ b(mi, zero_and_neg);
+ tst(scratch, reg);
+ b(ne, not_power_of_two);
+}
+
+
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
@@ -2269,13 +2559,11 @@
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
- ASSERT(!src.is(ip));
- LoadRoot(ip, root_value_index);
- cmp(src, ip);
+ CompareRoot(src, root_value_index);
Assert(eq, message);
}
void MacroAssembler::JumpIfNotHeapNumber(Register object,
@@ -2387,10 +2675,64 @@
str(tmp, FieldMemOperand(dst, i * kPointerSize));
}
}
+void MacroAssembler::CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch) {
+ Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+ // Align src before copying in word size chunks.
+ bind(&align_loop);
+ cmp(length, Operand(0));
+ b(eq, &done);
+ bind(&align_loop_1);
+ tst(src, Operand(kPointerSize - 1));
+ b(eq, &word_loop);
+ ldrb(scratch, MemOperand(src, 1, PostIndex));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ sub(length, length, Operand(1), SetCC);
+ b(ne, &byte_loop_1);
+
+ // Copy bytes in word size chunks.
+ bind(&word_loop);
+ if (emit_debug_code()) {
+ tst(src, Operand(kPointerSize - 1));
+ Assert(eq, "Expecting alignment for CopyBytes");
+ }
+ cmp(length, Operand(kPointerSize));
+ b(lt, &byte_loop);
+ ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
+#if CAN_USE_UNALIGNED_ACCESSES
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
+#else
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+#endif
+ sub(length, length, Operand(kPointerSize));
+ b(&word_loop);
+
+ // Copy the last bytes if any left.
+ bind(&byte_loop);
+ cmp(length, Operand(0));
+ b(eq, &done);
+ bind(&byte_loop_1);
+ ldrb(scratch, MemOperand(src, 1, PostIndex));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ sub(length, length, Operand(1), SetCC);
+ b(ne, &byte_loop_1);
+ bind(&done);
+}
+
+
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
ASSERT(!zeros.is(source) || !source.is(scratch));
ASSERT(!zeros.is(scratch));
@@ -2452,15 +2794,18 @@
and_(scratch, type, Operand(kFlatAsciiStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag));
b(ne, failure);
}
+static const int kRegisterPassedArguments = 4;
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = ActivationFrameAlignment();
+
// Up to four simple arguments are passed in registers r0..r3.
- int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments;
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
mov(scratch, sp);
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
@@ -2473,21 +2818,32 @@
}
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- mov(ip, Operand(function));
- CallCFunction(ip, num_arguments);
+ CallCFunctionHelper(no_reg, function, ip, num_arguments);
}
+void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
+ int num_arguments) {
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(isolate()),
+ scratch,
+ num_arguments);
+}
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments) {
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
#if defined(V8_HOST_ARCH_ARM)
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
@@ -2502,12 +2858,17 @@
#endif
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ if (function.is(no_reg)) {
+ mov(scratch, Operand(function_reference));
+ function = scratch;
+ }
Call(function);
- int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments;
if (OS::ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
}
@@ -2517,11 +2878,11 @@
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Register result) {
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the instruction is a ldr reg, [pc + offset] .
and_(result, result, Operand(kLdrPCPattern));
cmp(result, Operand(kLdrPCPattern));
Check(eq, "The instruction to patch should be a load from pc.");
// Result was clobbered. Restore it.
@@ -2536,10 +2897,10 @@
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(address, size_ + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}