vendor/v8/src/x64/assembler-x64.h in mustang-0.0.1 vs vendor/v8/src/x64/assembler-x64.h in mustang-0.1.0

- old
+ new

@@ -28,11 +28,11 @@ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // A lightweight X64 Assembler. #ifndef V8_X64_ASSEMBLER_X64_H_ #define V8_X64_ASSEMBLER_X64_H_ @@ -91,12 +91,12 @@ // The non-allocatable registers are: // rsp - stack pointer // rbp - frame pointer // rsi - context register // r10 - fixed scratch register + // r12 - smi constant register // r13 - root register - // r15 - smi constant register static const int kNumRegisters = 16; static const int kNumAllocatableRegisters = 10; static int ToAllocationIndex(Register reg) { return kAllocationIndexByRegisterCode[reg.code()]; @@ -118,11 +118,11 @@ "rdi", "r8", "r9", "r11", "r14", - "r12" + "r15" }; return names[index]; } static Register toRegister(int code) { @@ -393,10 +393,17 @@ // Checks whether either base or index register is the given register. // Does not check the "reg" part of the Operand. bool AddressUsesRegister(Register reg) const; + // Queries related to the size of the generated instruction. + // Whether the generated instruction will have a REX prefix. + bool requires_rex() const { return rex_ != 0; } + // Size of the ModR/M, SIB and displacement parts of the generated + // instruction. + int operand_size() const { return len_; } + private: byte rex_; byte buf_[6]; // The number of bytes of buf_ in use. byte len_; @@ -429,54 +436,87 @@ // } class CpuFeatures : public AllStatic { public: // Detect features of the target CPU. Set safe defaults if the serializer // is enabled (snapshots must be portable). - static void Probe(bool portable); + static void Probe(); + // Check whether a feature is supported by the target CPU. static bool IsSupported(CpuFeature f) { + ASSERT(initialized_); if (f == SSE2 && !FLAG_enable_sse2) return false; if (f == SSE3 && !FLAG_enable_sse3) return false; if (f == CMOV && !FLAG_enable_cmov) return false; if (f == RDTSC && !FLAG_enable_rdtsc) return false; if (f == SAHF && !FLAG_enable_sahf) return false; return (supported_ & (V8_UINT64_C(1) << f)) != 0; } + +#ifdef DEBUG // Check whether a feature is currently enabled. static bool IsEnabled(CpuFeature f) { - return (enabled_ & (V8_UINT64_C(1) << f)) != 0; + ASSERT(initialized_); + Isolate* isolate = Isolate::UncheckedCurrent(); + if (isolate == NULL) { + // When no isolate is available, work as if we're running in + // release mode. + return IsSupported(f); + } + uint64_t enabled = isolate->enabled_cpu_features(); + return (enabled & (V8_UINT64_C(1) << f)) != 0; } +#endif + // Enable a specified feature within a scope. class Scope BASE_EMBEDDED { #ifdef DEBUG public: explicit Scope(CpuFeature f) { - uint64_t mask = (V8_UINT64_C(1) << f); + uint64_t mask = V8_UINT64_C(1) << f; ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0); - old_enabled_ = CpuFeatures::enabled_; - CpuFeatures::enabled_ |= mask; + ASSERT(!Serializer::enabled() || + (CpuFeatures::found_by_runtime_probing_ & mask) == 0); + isolate_ = Isolate::UncheckedCurrent(); + old_enabled_ = 0; + if (isolate_ != NULL) { + old_enabled_ = isolate_->enabled_cpu_features(); + isolate_->set_enabled_cpu_features(old_enabled_ | mask); + } } - ~Scope() { CpuFeatures::enabled_ = old_enabled_; } + ~Scope() { + ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); + if (isolate_ != NULL) { + isolate_->set_enabled_cpu_features(old_enabled_); + } + } private: + Isolate* isolate_; uint64_t old_enabled_; #else public: explicit Scope(CpuFeature f) {} #endif }; + private: // Safe defaults include SSE2 and CMOV for X64. It is always available, if // anyone checks, but they shouldn't need to check. + // The required user mode extensions in X64 are (from AMD64 ABI Table A.1): + // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV); + +#ifdef DEBUG + static bool initialized_; +#endif static uint64_t supported_; - static uint64_t enabled_; static uint64_t found_by_runtime_probing_; + + DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; -class Assembler : public Malloced { +class Assembler : public AssemblerBase { private: // We check before assembling an instruction that there is sufficient // space to write an instruction and its relocation information. // The relocation writer's position must be kGap bytes above the end of // the generated instructions. This leaves enough space for the @@ -499,13 +539,16 @@ // // If the provided buffer is not NULL, the assembler uses the provided buffer // for code generation and assumes its size to be buffer_size. If the buffer // is too small, a fatal error occurs. No deallocation of the buffer is done // upon destruction of the assembler. - Assembler(void* buffer, int buffer_size); + Assembler(Isolate* isolate, void* buffer, int buffer_size); ~Assembler(); + // Overrides the default provided by FLAG_debug_code. + void set_emit_debug_code(bool value) { emit_debug_code_ = value; } + // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other // Assembler functions are invoked in between GetCode() calls. void GetCode(CodeDesc* desc); @@ -647,11 +690,11 @@ // position (after the move) to the destination. void movl(const Operand& dst, Label* src); // Move sign extended immediate to memory location. void movq(const Operand& dst, Immediate value); - // New x64 instructions to load a 64-bit immediate into a register. + // Instructions to load a 64-bit immediate into a register. // All 64-bit immediates must have a relocation mode. void movq(Register dst, void* ptr, RelocInfo::Mode rmode); void movq(Register dst, int64_t value, RelocInfo::Mode rmode); void movq(Register dst, const char* s, RelocInfo::Mode rmode); // Moves the address of the external reference into the register. @@ -672,11 +715,11 @@ void repmovsb(); void repmovsw(); void repmovsl(); void repmovsq(); - // New x64 instruction to load from an immediate 64-bit pointer into RAX. + // Instruction to load from an immediate 64-bit pointer into RAX. void load_rax(void* ptr, RelocInfo::Mode rmode); void load_rax(ExternalReference ext); // Conditional moves. void cmovq(Condition cc, Register dst, Register src); @@ -1107,10 +1150,11 @@ void bt(const Operand& dst, Register src); void bts(const Operand& dst, Register src); // Miscellaneous void clc(); + void cld(); void cpuid(); void hlt(); void int3(); void nop(); void nop(int n); @@ -1282,10 +1326,12 @@ void addsd(XMMRegister dst, XMMRegister src); void subsd(XMMRegister dst, XMMRegister src); void mulsd(XMMRegister dst, XMMRegister src); void divsd(XMMRegister dst, XMMRegister src); + void andpd(XMMRegister dst, XMMRegister src); + void orpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, const Operand& src); @@ -1339,10 +1385,13 @@ // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512*MB; static const int kMinimalBufferSize = 4*KB; + protected: + bool emit_debug_code() const { return emit_debug_code_; } + private: byte* addr_at(int pos) { return buffer_ + pos; } byte byte_at(int pos) { return buffer_[pos]; } void set_byte_at(int pos, byte value) { buffer_[pos] = value; } uint32_t long_at(int pos) { @@ -1532,21 +1581,22 @@ // The buffer into which code and relocation info are generated. byte* buffer_; int buffer_size_; // True if the assembler owns the buffer, false if buffer is external. bool own_buffer_; - // A previously allocated buffer of kMinimalBufferSize bytes, or NULL. - static byte* spare_buffer_; // code generation byte* pc_; // the program counter; moves forward RelocInfoWriter reloc_info_writer; List< Handle<Code> > code_targets_; // push-pop elimination byte* last_pc_; PositionsRecorder positions_recorder_; + + bool emit_debug_code_; + friend class PositionsRecorder; }; // Helper class that ensures that there is enough space for generating