HELLO·Android
系统源代码
IT资讯
技术文章
我的收藏
注册
登录
-
我收藏的文章
创建代码块
我的代码块
我的账号
Nougat 7.1
|
7.1.1_r28
下载
查看原文件
收藏
根目录
art
compiler
optimizing
code_generator_x86_64.cc
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "code_generator_x86_64.h" #include "art_method.h" #include "code_generator_utils.h" #include "compiled_method.h" #include "entrypoints/quick/quick_entrypoints.h" #include "gc/accounting/card_table.h" #include "intrinsics.h" #include "intrinsics_x86_64.h" #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/object_reference.h" #include "thread.h" #include "utils/assembler.h" #include "utils/stack_checks.h" #include "utils/x86_64/assembler_x86_64.h" #include "utils/x86_64/managed_register_x86_64.h" namespace art { template
class GcRoot; namespace x86_64 { static constexpr int kCurrentMethodStackOffset = 0; static constexpr Register kMethodRegisterArgument = RDI; // The compare/jump sequence will generate about (1.5 * num_entries) instructions. A jump // table version generates 7 instructions and num_entries literals. Compare/jump sequence will // generates less code/data with a small num_entries. static constexpr uint32_t kPackedSwitchJumpTableThreshold = 5; static constexpr Register kCoreCalleeSaves[] = { RBX, RBP, R12, R13, R14, R15 }; static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 }; static constexpr int kC2ConditionMask = 0x400; #define __ down_cast
(codegen->GetAssembler())-> #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x).Int32Value() class NullCheckSlowPathX86_64 : public SlowPathCode { public: explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { // Live registers will be restored in the catch block if caught. SaveLiveRegisters(codegen, instruction_->GetLocations()); } x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
(); } bool IsFatal() const OVERRIDE { return true; } const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64); }; class DivZeroCheckSlowPathX86_64 : public SlowPathCode { public: explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { // Live registers will be restored in the catch block if caught. SaveLiveRegisters(codegen, instruction_->GetLocations()); } x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
(); } bool IsFatal() const OVERRIDE { return true; } const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64); }; class DivRemMinusOneSlowPathX86_64 : public SlowPathCode { public: DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, Primitive::Type type, bool is_div) : SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { __ Bind(GetEntryLabel()); if (type_ == Primitive::kPrimInt) { if (is_div_) { __ negl(cpu_reg_); } else { __ xorl(cpu_reg_, cpu_reg_); } } else { DCHECK_EQ(Primitive::kPrimLong, type_); if (is_div_) { __ negq(cpu_reg_); } else { __ xorl(cpu_reg_, cpu_reg_); } } __ jmp(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "DivRemMinusOneSlowPathX86_64"; } private: const CpuRegister cpu_reg_; const Primitive::Type type_; const bool is_div_; DISALLOW_COPY_AND_ASSIGN(DivRemMinusOneSlowPathX86_64); }; class SuspendCheckSlowPathX86_64 : public SlowPathCode { public: SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor) : SlowPathCode(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, instruction_->GetLocations()); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
(); RestoreLiveRegisters(codegen, instruction_->GetLocations()); if (successor_ == nullptr) { __ jmp(GetReturnLabel()); } else { __ jmp(x86_64_codegen->GetLabelOf(successor_)); } } Label* GetReturnLabel() { DCHECK(successor_ == nullptr); return &return_label_; } HBasicBlock* GetSuccessor() const { return successor_; } const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86_64"; } private: HBasicBlock* const successor_; Label return_label_; DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86_64); }; class BoundsCheckSlowPathX86_64 : public SlowPathCode { public: explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); __ Bind(GetEntryLabel()); if (instruction_->CanThrowIntoCatchBlock()) { // Live registers will be restored in the catch block if caught. SaveLiveRegisters(codegen, instruction_->GetLocations()); } // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt, locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
(); } bool IsFatal() const OVERRIDE { return true; } const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64); }; class LoadClassSlowPathX86_64 : public SlowPathCode { public: LoadClassSlowPathX86_64(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit) : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); } void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = at_->GetLocations(); CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex())); x86_64_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) : QUICK_ENTRY_POINT(pInitializeType), at_, dex_pc_, this); if (do_clinit_) { CheckEntrypointTypes
(); } else { CheckEntrypointTypes
(); } Location out = locations->Out(); // Move the class to the desired location. if (out.IsValid()) { DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); x86_64_codegen->Move(out, Location::RegisterLocation(RAX)); } RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathX86_64"; } private: // The class this slow path will load. HLoadClass* const cls_; // The instruction where this slow path is happening. // (Might be the load class or an initialization check). HInstruction* const at_; // The dex PC of `at_`. const uint32_t dex_pc_; // Whether to initialize the class. const bool do_clinit_; DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64); }; class LoadStringSlowPathX86_64 : public SlowPathCode { public: explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex(); __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(string_index)); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
(); x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64); }; class TypeCheckSlowPathX86_64 : public SlowPathCode { public: TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal) : SlowPathCode(instruction), is_fatal_(is_fatal) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out(); uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); __ Bind(GetEntryLabel()); if (!is_fatal_) { SaveLiveRegisters(codegen, locations); } // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves( locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, object_class, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc, this); CheckEntrypointTypes< kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this); CheckEntrypointTypes
(); } if (!is_fatal_) { if (instruction_->IsInstanceOf()) { x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); } RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } } const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86_64"; } bool IsFatal() const OVERRIDE { return is_fatal_; } private: const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64); }; class DeoptimizationSlowPathX86_64 : public SlowPathCode { public: explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, instruction_->GetLocations()); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
(); } const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64); }; class ArraySetSlowPathX86_64 : public SlowPathCode { public: explicit ArraySetSlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; HParallelMove parallel_move(codegen->GetGraph()->GetArena()); parallel_move.AddMove( locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, nullptr); parallel_move.AddMove( locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt, nullptr); parallel_move.AddMove( locations->InAt(2), Location::RegisterLocation(calling_convention.GetRegisterAt(2)), Primitive::kPrimNot, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
(); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; } private: DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64); }; // Slow path marking an object during a read barrier. class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode { public: ReadBarrierMarkSlowPathX86_64(HInstruction* instruction, Location out, Location obj) : SlowPathCode(instruction), out_(out), obj_(obj) { DCHECK(kEmitCompilerReadBarrier); } const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathX86_64"; } void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); Register reg_out = out_.AsRegister
(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); DCHECK(instruction_->IsInstanceFieldGet() || instruction_->IsStaticFieldGet() || instruction_->IsArrayGet() || instruction_->IsLoadClass() || instruction_->IsLoadString() || instruction_->IsInstanceOf() || instruction_->IsCheckCast()) << "Unexpected instruction in read barrier marking slow path: " << instruction_->DebugName(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); x86_64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), obj_); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierMark), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
(); x86_64_codegen->Move(out_, Location::RegisterLocation(RAX)); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } private: const Location out_; const Location obj_; DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86_64); }; // Slow path generating a read barrier for a heap reference. class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { public: ReadBarrierForHeapReferenceSlowPathX86_64(HInstruction* instruction, Location out, Location ref, Location obj, uint32_t offset, Location index) : SlowPathCode(instruction), out_(out), ref_(ref), obj_(obj), offset_(offset), index_(index) { DCHECK(kEmitCompilerReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial // object has been overwritten by (or after) the heap object // reference load to be instrumented, e.g.: // // __ movl(out, Address(out, offset)); // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset); // // In that case, we have lost the information about the original // object, and the emitted read barrier cannot work properly. DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out; DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; } void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); LocationSummary* locations = instruction_->GetLocations(); CpuRegister reg_out = out_.AsRegister
(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.AsRegister())) << out_; DCHECK(!instruction_->IsInvoke() || (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified())) << "Unexpected instruction in read barrier for heap reference slow path: " << instruction_->DebugName(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); // We may have to change the index's value, but as `index_` is a // constant member (like other "inputs" of this slow path), // introduce a copy of it, `index`. Location index = index_; if (index_.IsValid()) { // Handle `index_` for HArrayGet and intrinsic UnsafeGetObject. if (instruction_->IsArrayGet()) { // Compute real offset and store it in index_. Register index_reg = index_.AsRegister
().AsRegister(); DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg)); if (codegen->IsCoreCalleeSaveRegister(index_reg)) { // We are about to change the value of `index_reg` (see the // calls to art::x86_64::X86_64Assembler::shll and // art::x86_64::X86_64Assembler::AddImmediate below), but it // has not been saved by the previous call to // art::SlowPathCode::SaveLiveRegisters, as it is a // callee-save register -- // art::SlowPathCode::SaveLiveRegisters does not consider // callee-save registers, as it has been designed with the // assumption that callee-save registers are supposed to be // handled by the called function. So, as a callee-save // register, `index_reg` _would_ eventually be saved onto // the stack, but it would be too late: we would have // changed its value earlier. Therefore, we manually save // it here into another freely available register, // `free_reg`, chosen of course among the caller-save // registers (as a callee-save `free_reg` register would // exhibit the same problem). // // Note we could have requested a temporary register from // the register allocator instead; but we prefer not to, as // this is a slow path, and we know we can find a // caller-save register that is available. Register free_reg = FindAvailableCallerSaveRegister(codegen).AsRegister(); __ movl(CpuRegister(free_reg), CpuRegister(index_reg)); index_reg = free_reg; index = Location::RegisterLocation(index_reg); } else { // The initial register stored in `index_` has already been // saved in the call to art::SlowPathCode::SaveLiveRegisters // (as it is not a callee-save register), so we can freely // use it. } // Shifting the index value contained in `index_reg` by the // scale factor (2) cannot overflow in practice, as the // runtime is unable to allocate object arrays with a size // larger than 2^26 - 1 (that is, 2^28 - 4 bytes). __ shll(CpuRegister(index_reg), Immediate(TIMES_4)); static_assert( sizeof(mirror::HeapReference
) == sizeof(int32_t), "art::mirror::HeapReference
and int32_t have different sizes."); __ AddImmediate(CpuRegister(index_reg), Immediate(offset_)); } else { DCHECK(instruction_->IsInvoke()); DCHECK(instruction_->GetLocations()->Intrinsified()); DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) || (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)) << instruction_->AsInvoke()->GetIntrinsic(); DCHECK_EQ(offset_, 0U); DCHECK(index_.IsRegister()); } } // We're moving two or three locations to locations that could // overlap, so we need a parallel move resolver. InvokeRuntimeCallingConvention calling_convention; HParallelMove parallel_move(codegen->GetGraph()->GetArena()); parallel_move.AddMove(ref_, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, nullptr); parallel_move.AddMove(obj_, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot, nullptr); if (index.IsValid()) { parallel_move.AddMove(index, Location::RegisterLocation(calling_convention.GetRegisterAt(2)), Primitive::kPrimInt, nullptr); codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } else { codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); __ movl(CpuRegister(calling_convention.GetRegisterAt(2)), Immediate(offset_)); } x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierSlow), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes< kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>(); x86_64_codegen->Move(out_, Location::RegisterLocation(RAX)); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathX86_64"; } private: CpuRegister FindAvailableCallerSaveRegister(CodeGenerator* codegen) { size_t ref = static_cast
(ref_.AsRegister
().AsRegister()); size_t obj = static_cast
(obj_.AsRegister
().AsRegister()); for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) { return static_cast
(i); } } // We shall never fail to find a free caller-save register, as // there are more than two core caller-save registers on x86-64 // (meaning it is possible to find one which is different from // `ref` and `obj`). DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u); LOG(FATAL) << "Could not find a free caller-save register"; UNREACHABLE(); } const Location out_; const Location ref_; const Location obj_; const uint32_t offset_; // An additional location containing an index to an array. // Only used for HArrayGet and the UnsafeGetObject & // UnsafeGetObjectVolatile intrinsics. const Location index_; DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathX86_64); }; // Slow path generating a read barrier for a GC root. class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode { public: ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root) : SlowPathCode(instruction), out_(out), root_(root) { DCHECK(kEmitCompilerReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg())); DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString()) << "Unexpected instruction in read barrier for GC root slow path: " << instruction_->DebugName(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; CodeGeneratorX86_64* x86_64_codegen = down_cast
(codegen); x86_64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierForRootSlow), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes
*>(); x86_64_codegen->Move(out_, Location::RegisterLocation(RAX)); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; } private: const Location out_; const Location root_; DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathX86_64); }; #undef __ #define __ down_cast
(GetAssembler())-> inline Condition X86_64IntegerCondition(IfCondition cond) { switch (cond) { case kCondEQ: return kEqual; case kCondNE: return kNotEqual; case kCondLT: return kLess; case kCondLE: return kLessEqual; case kCondGT: return kGreater; case kCondGE: return kGreaterEqual; case kCondB: return kBelow; case kCondBE: return kBelowEqual; case kCondA: return kAbove; case kCondAE: return kAboveEqual; } LOG(FATAL) << "Unreachable"; UNREACHABLE(); } // Maps FP condition to x86_64 name. inline Condition X86_64FPCondition(IfCondition cond) { switch (cond) { case kCondEQ: return kEqual; case kCondNE: return kNotEqual; case kCondLT: return kBelow; case kCondLE: return kBelowEqual; case kCondGT: return kAbove; case kCondGE: return kAboveEqual; default: break; // should not happen }; LOG(FATAL) << "Unreachable"; UNREACHABLE(); } HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch( const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, MethodReference target_method ATTRIBUTE_UNUSED) { switch (desired_dispatch_info.code_ptr_location) { case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup: case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect: // For direct code, we actually prefer to call via the code pointer from ArtMethod*. return HInvokeStaticOrDirect::DispatchInfo { desired_dispatch_info.method_load_kind, HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod, desired_dispatch_info.method_load_data, 0u }; default: return desired_dispatch_info; } } void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) { // All registers are assumed to be correctly set up. Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp. switch (invoke->GetMethodLoadKind()) { case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: // temp = thread->string_init_entrypoint __ gs()->movq(temp.AsRegister
(), Address::Absolute(invoke->GetStringInitOffset(), /* no_rip */ true)); break; case HInvokeStaticOrDirect::MethodLoadKind::kRecursive: callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()); break; case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress: __ movq(temp.AsRegister
(), Immediate(invoke->GetMethodAddress())); break; case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup: __ movl(temp.AsRegister
(), Immediate(0)); // Placeholder. method_patches_.emplace_back(invoke->GetTargetMethod()); __ Bind(&method_patches_.back().label); // Bind the label at the end of the "movl" insn. break; case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: { __ movq(temp.AsRegister
(), Address::Absolute(kDummy32BitOffset, /* no_rip */ false)); // Bind a new fixup label at the end of the "movl" insn. uint32_t offset = invoke->GetDexCacheArrayOffset(); __ Bind(NewPcRelativeDexCacheArrayPatch(*invoke->GetTargetMethod().dex_file, offset)); break; } case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: { Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()); Register method_reg; CpuRegister reg = temp.AsRegister
(); if (current_method.IsRegister()) { method_reg = current_method.AsRegister
(); } else { DCHECK(invoke->GetLocations()->Intrinsified()); DCHECK(!current_method.IsValid()); method_reg = reg.AsRegister(); __ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset)); } // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_; __ movq(reg, Address(CpuRegister(method_reg), ArtMethod::DexCacheResolvedMethodsOffset(kX86_64PointerSize).SizeValue())); // temp = temp[index_in_cache]; // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file. uint32_t index_in_cache = invoke->GetDexMethodIndex(); __ movq(reg, Address(reg, CodeGenerator::GetCachePointerOffset(index_in_cache))); break; } } switch (invoke->GetCodePtrLocation()) { case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf: __ call(&frame_entry_label_); break; case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: { relative_call_patches_.emplace_back(invoke->GetTargetMethod()); Label* label = &relative_call_patches_.back().label; __ call(label); // Bind to the patch label, override at link time. __ Bind(label); // Bind the label at the end of the "call" insn. break; } case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup: case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect: // Filtered out by GetSupportedInvokeStaticOrDirectDispatch(). LOG(FATAL) << "Unsupported"; UNREACHABLE(); case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod: // (callee_method + offset_of_quick_compiled_code)() __ call(Address(callee_method.AsRegister
(), ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); break; } DCHECK(!IsLeafMethod()); } void CodeGeneratorX86_64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) { CpuRegister temp = temp_in.AsRegister
(); size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue(); // Use the calling convention instead of the location of the receiver, as // intrinsics may have put the receiver in a different register. In the intrinsics // slow path, the arguments have been moved to the right place, so here we are // guaranteed that the receiver is the first register of the calling convention. InvokeDexCallingConvention calling_convention; Register receiver = calling_convention.GetRegisterAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); // /* HeapReference
*/ temp = receiver->klass_ __ movl(temp, Address(CpuRegister(receiver), class_offset)); MaybeRecordImplicitNullCheck(invoke); // Instead of simply (possibly) unpoisoning `temp` here, we should // emit a read barrier for the previous class reference load. // However this is not required in practice, as this is an // intermediate/temporary reference and because the current // concurrent copying collector keeps the from-space memory // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetMethodAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); } void CodeGeneratorX86_64::RecordSimplePatch() { if (GetCompilerOptions().GetIncludePatchInformation()) { simple_patches_.emplace_back(); __ Bind(&simple_patches_.back()); } } void CodeGeneratorX86_64::RecordStringPatch(HLoadString* load_string) { string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex()); __ Bind(&string_patches_.back().label); } Label* CodeGeneratorX86_64::NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset) { // Add a patch entry and return the label. pc_relative_dex_cache_patches_.emplace_back(dex_file, element_offset); return &pc_relative_dex_cache_patches_.back().label; } void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector
* linker_patches) { DCHECK(linker_patches->empty()); size_t size = method_patches_.size() + relative_call_patches_.size() + pc_relative_dex_cache_patches_.size() + simple_patches_.size() + string_patches_.size(); linker_patches->reserve(size); // The label points to the end of the "movl" insn but the literal offset for method // patch needs to point to the embedded constant which occupies the last 4 bytes. constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u; for (const MethodPatchInfo
& info : method_patches_) { uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset, info.target_method.dex_file, info.target_method.dex_method_index)); } for (const MethodPatchInfo
& info : relative_call_patches_) { uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset, info.target_method.dex_file, info.target_method.dex_method_index)); } for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) { uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(literal_offset, &info.target_dex_file, info.label.Position(), info.element_offset)); } for (const Label& label : simple_patches_) { uint32_t literal_offset = label.Position() - kLabelPositionToLiteralOffsetAdjustment; linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset)); } for (const StringPatchInfo
& info : string_patches_) { // These are always PC-relative, see GetSupportedLoadStringKind(). uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; linker_patches->push_back(LinkerPatch::RelativeStringPatch(literal_offset, &info.dex_file, info.label.Position(), info.string_index)); } } void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const { stream << Register(reg); } void CodeGeneratorX86_64::DumpFloatingPointRegister(std::ostream& stream, int reg) const { stream << FloatRegister(reg); } size_t CodeGeneratorX86_64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { __ movq(Address(CpuRegister(RSP), stack_index), CpuRegister(reg_id)); return kX86_64WordSize; } size_t CodeGeneratorX86_64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { __ movq(CpuRegister(reg_id), Address(CpuRegister(RSP), stack_index)); return kX86_64WordSize; } size_t CodeGeneratorX86_64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { __ movsd(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id)); return kX86_64WordSize; } size_t CodeGeneratorX86_64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { __ movsd(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index)); return kX86_64WordSize; } void CodeGeneratorX86_64::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { InvokeRuntime(GetThreadOffset
(entrypoint).Int32Value(), instruction, dex_pc, slow_path); } void CodeGeneratorX86_64::InvokeRuntime(int32_t entry_point_offset, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { ValidateInvokeRuntime(instruction, slow_path); __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true)); RecordPcInfo(instruction, dex_pc, slow_path); } static constexpr int kNumberOfCpuRegisterPairs = 0; // Use a fake return address register to mimic Quick. static constexpr Register kFakeReturnRegister = Register(kLastCpuRegister + 1); CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph, const X86_64InstructionSetFeatures& isa_features, const CompilerOptions& compiler_options, OptimizingCompilerStats* stats) : CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfFloatRegisters, kNumberOfCpuRegisterPairs, ComputeRegisterMask(reinterpret_cast
(kCoreCalleeSaves), arraysize(kCoreCalleeSaves)) | (1 << kFakeReturnRegister), ComputeRegisterMask(reinterpret_cast
(kFpuCalleeSaves), arraysize(kFpuCalleeSaves)), compiler_options, stats), block_labels_(nullptr), location_builder_(graph, this), instruction_visitor_(graph, this), move_resolver_(graph->GetArena(), this), assembler_(graph->GetArena()), isa_features_(isa_features), constant_area_start_(0), method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister)); } InstructionCodeGeneratorX86_64::InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen) : InstructionCodeGenerator(graph, codegen), assembler_(codegen->GetAssembler()), codegen_(codegen) {} void CodeGeneratorX86_64::SetupBlockedRegisters() const { // Stack register is always reserved. blocked_core_registers_[RSP] = true; // Block the register used as TMP. blocked_core_registers_[TMP] = true; } static dwarf::Reg DWARFReg(Register reg) { return dwarf::Reg::X86_64Core(static_cast
(reg)); } static dwarf::Reg DWARFReg(FloatRegister reg) { return dwarf::Reg::X86_64Fp(static_cast
(reg)); } void CodeGeneratorX86_64::GenerateFrameEntry() { __ cfi().SetCurrentCFAOffset(kX86_64WordSize); // return address __ Bind(&frame_entry_label_); bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64); DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); if (!skip_overflow_check) { __ testq(CpuRegister(RAX), Address( CpuRegister(RSP), -static_cast
(GetStackOverflowReservedBytes(kX86_64)))); RecordPcInfo(nullptr, 0); } if (HasEmptyFrame()) { return; } for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) { Register reg = kCoreCalleeSaves[i]; if (allocated_registers_.ContainsCoreRegister(reg)) { __ pushq(CpuRegister(reg)); __ cfi().AdjustCFAOffset(kX86_64WordSize); __ cfi().RelOffset(DWARFReg(reg), 0); } } int adjust = GetFrameSize() - GetCoreSpillSize(); __ subq(CpuRegister(RSP), Immediate(adjust)); __ cfi().AdjustCFAOffset(adjust); uint32_t xmm_spill_location = GetFpuSpillStart(); size_t xmm_spill_slot_size = GetFloatingPointSpillSlotSize(); for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) { if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) { int offset = xmm_spill_location + (xmm_spill_slot_size * i); __ movsd(Address(CpuRegister(RSP), offset), XmmRegister(kFpuCalleeSaves[i])); __ cfi().RelOffset(DWARFReg(kFpuCalleeSaves[i]), offset); } } __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(kMethodRegisterArgument)); } void CodeGeneratorX86_64::GenerateFrameExit() { __ cfi().RememberState(); if (!HasEmptyFrame()) { uint32_t xmm_spill_location = GetFpuSpillStart(); size_t xmm_spill_slot_size = GetFloatingPointSpillSlotSize(); for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) { if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) { int offset = xmm_spill_location + (xmm_spill_slot_size * i); __ movsd(XmmRegister(kFpuCalleeSaves[i]), Address(CpuRegister(RSP), offset)); __ cfi().Restore(DWARFReg(kFpuCalleeSaves[i])); } } int adjust = GetFrameSize() - GetCoreSpillSize(); __ addq(CpuRegister(RSP), Immediate(adjust)); __ cfi().AdjustCFAOffset(-adjust); for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) { Register reg = kCoreCalleeSaves[i]; if (allocated_registers_.ContainsCoreRegister(reg)) { __ popq(CpuRegister(reg)); __ cfi().AdjustCFAOffset(-static_cast
(kX86_64WordSize)); __ cfi().Restore(DWARFReg(reg)); } } } __ ret(); __ cfi().RestoreState(); __ cfi().DefCFAOffset(GetFrameSize()); } void CodeGeneratorX86_64::Bind(HBasicBlock* block) { __ Bind(GetLabelOf(block)); } void CodeGeneratorX86_64::Move(Location destination, Location source) { if (source.Equals(destination)) { return; } if (destination.IsRegister()) { CpuRegister dest = destination.AsRegister
(); if (source.IsRegister()) { __ movq(dest, source.AsRegister
()); } else if (source.IsFpuRegister()) { __ movd(dest, source.AsFpuRegister
()); } else if (source.IsStackSlot()) { __ movl(dest, Address(CpuRegister(RSP), source.GetStackIndex())); } else if (source.IsConstant()) { HConstant* constant = source.GetConstant(); if (constant->IsLongConstant()) { Load64BitValue(dest, constant->AsLongConstant()->GetValue()); } else { Load32BitValue(dest, GetInt32ValueOf(constant)); } } else { DCHECK(source.IsDoubleStackSlot()); __ movq(dest, Address(CpuRegister(RSP), source.GetStackIndex())); } } else if (destination.IsFpuRegister()) { XmmRegister dest = destination.AsFpuRegister
(); if (source.IsRegister()) { __ movd(dest, source.AsRegister
()); } else if (source.IsFpuRegister()) { __ movaps(dest, source.AsFpuRegister
()); } else if (source.IsConstant()) { HConstant* constant = source.GetConstant(); int64_t value = CodeGenerator::GetInt64ValueOf(constant); if (constant->IsFloatConstant()) { Load32BitValue(dest, static_cast
(value)); } else { Load64BitValue(dest, value); } } else if (source.IsStackSlot()) { __ movss(dest, Address(CpuRegister(RSP), source.GetStackIndex())); } else { DCHECK(source.IsDoubleStackSlot()); __ movsd(dest, Address(CpuRegister(RSP), source.GetStackIndex())); } } else if (destination.IsStackSlot()) { if (source.IsRegister()) { __ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), source.AsRegister
()); } else if (source.IsFpuRegister()) { __ movss(Address(CpuRegister(RSP), destination.GetStackIndex()), source.AsFpuRegister
()); } else if (source.IsConstant()) { HConstant* constant = source.GetConstant(); int32_t value = GetInt32ValueOf(constant); __ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), Immediate(value)); } else { DCHECK(source.IsStackSlot()) << source; __ movl(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex())); __ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP)); } } else { DCHECK(destination.IsDoubleStackSlot()); if (source.IsRegister()) { __ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), source.AsRegister
()); } else if (source.IsFpuRegister()) { __ movsd(Address(CpuRegister(RSP), destination.GetStackIndex()), source.AsFpuRegister
()); } else if (source.IsConstant()) { HConstant* constant = source.GetConstant(); int64_t value; if (constant->IsDoubleConstant()) { value = bit_cast
(constant->AsDoubleConstant()->GetValue()); } else { DCHECK(constant->IsLongConstant()); value = constant->AsLongConstant()->GetValue(); } Store64BitValueToStack(destination, value); } else { DCHECK(source.IsDoubleStackSlot()); __ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex())); __ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP)); } } } void CodeGeneratorX86_64::MoveConstant(Location location, int32_t value) { DCHECK(location.IsRegister()); Load64BitValue(location.AsRegister
(), static_cast
(value)); } void CodeGeneratorX86_64::MoveLocation( Location dst, Location src, Primitive::Type dst_type ATTRIBUTE_UNUSED) { Move(dst, src); } void CodeGeneratorX86_64::AddLocationAsTemp(Location location, LocationSummary* locations) { if (location.IsRegister()) { locations->AddTemp(location); } else { UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; } } void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock* successor) { DCHECK(!successor->IsExitBlock()); HBasicBlock* block = got->GetBlock(); HInstruction* previous = got->GetPrevious(); HLoopInformation* info = block->GetLoopInformation(); if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) { GenerateSuspendCheck(info->GetSuspendCheck(), successor); return; } if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); } if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) { __ jmp(codegen_->GetLabelOf(successor)); } } void LocationsBuilderX86_64::VisitGoto(HGoto* got) { got->SetLocations(nullptr); } void InstructionCodeGeneratorX86_64::VisitGoto(HGoto* got) { HandleGoto(got, got->GetSuccessor()); } void LocationsBuilderX86_64::VisitTryBoundary(HTryBoundary* try_boundary) { try_boundary->SetLocations(nullptr); } void InstructionCodeGeneratorX86_64::VisitTryBoundary(HTryBoundary* try_boundary) { HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor(); if (!successor->IsExitBlock()) { HandleGoto(try_boundary, successor); } } void LocationsBuilderX86_64::VisitExit(HExit* exit) { exit->SetLocations(nullptr); } void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { } template
void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label) { if (cond->IsFPConditionTrueIfNaN()) { __ j(kUnordered, true_label); } else if (cond->IsFPConditionFalseIfNaN()) { __ j(kUnordered, false_label); } __ j(X86_64FPCondition(cond->GetCondition()), true_label); } void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition) { LocationSummary* locations = condition->GetLocations(); Location left = locations->InAt(0); Location right = locations->InAt(1); Primitive::Type type = condition->InputAt(0)->GetType(); switch (type) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: case Primitive::kPrimChar: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimNot: { CpuRegister left_reg = left.AsRegister
(); if (right.IsConstant()) { int32_t value = CodeGenerator::GetInt32ValueOf(right.GetConstant()); if (value == 0) { __ testl(left_reg, left_reg); } else { __ cmpl(left_reg, Immediate(value)); } } else if (right.IsStackSlot()) { __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); } else { __ cmpl(left_reg, right.AsRegister
()); } break; } case Primitive::kPrimLong: { CpuRegister left_reg = left.AsRegister
(); if (right.IsConstant()) { int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); codegen_->Compare64BitValue(left_reg, value); } else if (right.IsDoubleStackSlot()) { __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); } else { __ cmpq(left_reg, right.AsRegister
()); } break; } case Primitive::kPrimFloat: { if (right.IsFpuRegister()) { __ ucomiss(left.AsFpuRegister
(), right.AsFpuRegister
()); } else if (right.IsConstant()) { __ ucomiss(left.AsFpuRegister
(), codegen_->LiteralFloatAddress( right.GetConstant()->AsFloatConstant()->GetValue())); } else { DCHECK(right.IsStackSlot()); __ ucomiss(left.AsFpuRegister
(), Address(CpuRegister(RSP), right.GetStackIndex())); } break; } case Primitive::kPrimDouble: { if (right.IsFpuRegister()) { __ ucomisd(left.AsFpuRegister
(), right.AsFpuRegister
()); } else if (right.IsConstant()) { __ ucomisd(left.AsFpuRegister
(), codegen_->LiteralDoubleAddress( right.GetConstant()->AsDoubleConstant()->GetValue())); } else { DCHECK(right.IsDoubleStackSlot()); __ ucomisd(left.AsFpuRegister
(), Address(CpuRegister(RSP), right.GetStackIndex())); } break; } default: LOG(FATAL) << "Unexpected condition type " << type; } } template
void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HCondition* condition, LabelType* true_target_in, LabelType* false_target_in) { // Generated branching requires both targets to be explicit. If either of the // targets is nullptr (fallthrough) use and bind `fallthrough_target` instead. LabelType fallthrough_target; LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in; LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in; // Generate the comparison to set the CC. GenerateCompareTest(condition); // Now generate the correct jump(s). Primitive::Type type = condition->InputAt(0)->GetType(); switch (type) { case Primitive::kPrimLong: { __ j(X86_64IntegerCondition(condition->GetCondition()), true_target); break; } case Primitive::kPrimFloat: { GenerateFPJumps(condition, true_target, false_target); break; } case Primitive::kPrimDouble: { GenerateFPJumps(condition, true_target, false_target); break; } default: LOG(FATAL) << "Unexpected condition type " << type; } if (false_target != &fallthrough_target) { __ jmp(false_target); } if (fallthrough_target.IsLinked()) { __ Bind(&fallthrough_target); } } static bool AreEflagsSetFrom(HInstruction* cond, HInstruction* branch) { // Moves may affect the eflags register (move zero uses xorl), so the EFLAGS // are set only strictly before `branch`. We can't use the eflags on long // conditions if they are materialized due to the complex branching. return cond->IsCondition() && cond->GetNext() == branch && !Primitive::IsFloatingPointType(cond->InputAt(0)->GetType()); } template
void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruction, size_t condition_input_index, LabelType* true_target, LabelType* false_target) { HInstruction* cond = instruction->InputAt(condition_input_index); if (true_target == nullptr && false_target == nullptr) { // Nothing to do. The code always falls through. return; } else if (cond->IsIntConstant()) { // Constant condition, statically compared against "true" (integer value 1). if (cond->AsIntConstant()->IsTrue()) { if (true_target != nullptr) { __ jmp(true_target); } } else { DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue(); if (false_target != nullptr) { __ jmp(false_target); } } return; } // The following code generates these patterns: // (1) true_target == nullptr && false_target != nullptr // - opposite condition true => branch to false_target // (2) true_target != nullptr && false_target == nullptr // - condition true => branch to true_target // (3) true_target != nullptr && false_target != nullptr // - condition true => branch to true_target // - branch to false_target if (IsBooleanValueOrMaterializedCondition(cond)) { if (AreEflagsSetFrom(cond, instruction)) { if (true_target == nullptr) { __ j(X86_64IntegerCondition(cond->AsCondition()->GetOppositeCondition()), false_target); } else { __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target); } } else { // Materialized condition, compare against 0. Location lhs = instruction->GetLocations()->InAt(condition_input_index); if (lhs.IsRegister()) { __ testl(lhs.AsRegister
(), lhs.AsRegister
()); } else { __ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()), Immediate(0)); } if (true_target == nullptr) { __ j(kEqual, false_target); } else { __ j(kNotEqual, true_target); } } } else { // Condition has not been materialized, use its inputs as the // comparison and its condition as the branch condition. HCondition* condition = cond->AsCondition(); // If this is a long or FP comparison that has been folded into // the HCondition, generate the comparison directly. Primitive::Type type = condition->InputAt(0)->GetType(); if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) { GenerateCompareTestAndBranch(condition, true_target, false_target); return; } Location lhs = condition->GetLocations()->InAt(0); Location rhs = condition->GetLocations()->InAt(1); if (rhs.IsRegister()) { __ cmpl(lhs.AsRegister
(), rhs.AsRegister
()); } else if (rhs.IsConstant()) { int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); codegen_->Compare32BitValue(lhs.AsRegister
(), constant); } else { __ cmpl(lhs.AsRegister
(), Address(CpuRegister(RSP), rhs.GetStackIndex())); } if (true_target == nullptr) { __ j(X86_64IntegerCondition(condition->GetOppositeCondition()), false_target); } else { __ j(X86_64IntegerCondition(condition->GetCondition()), true_target); } } // If neither branch falls through (case 3), the conditional branch to `true_target` // was already emitted (case 2) and we need to emit a jump to `false_target`. if (true_target != nullptr && false_target != nullptr) { __ jmp(false_target); } } void LocationsBuilderX86_64::VisitIf(HIf* if_instr) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { locations->SetInAt(0, Location::Any()); } } void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) { HBasicBlock* true_successor = if_instr->IfTrueSuccessor(); HBasicBlock* false_successor = if_instr->IfFalseSuccessor(); Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ? nullptr : codegen_->GetLabelOf(true_successor); Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? nullptr : codegen_->GetLabelOf(false_successor); GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target); } void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { locations->SetInAt(0, Location::Any()); } } void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath
(deoptimize); GenerateTestAndBranch
(deoptimize, /* condition_input_index */ 0, slow_path->GetEntryLabel(), /* false_target */ nullptr); } static bool SelectCanUseCMOV(HSelect* select) { // There are no conditional move instructions for XMMs. if (Primitive::IsFloatingPointType(select->GetType())) { return false; } // A FP condition doesn't generate the single CC that we need. HInstruction* condition = select->GetCondition(); if (condition->IsCondition() && Primitive::IsFloatingPointType(condition->InputAt(0)->GetType())) { return false; } // We can generate a CMOV for this Select. return true; } void LocationsBuilderX86_64::VisitSelect(HSelect* select) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select); if (Primitive::IsFloatingPointType(select->GetType())) { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); } else { locations->SetInAt(0, Location::RequiresRegister()); if (SelectCanUseCMOV(select)) { if (select->InputAt(1)->IsConstant()) { locations->SetInAt(1, Location::RequiresRegister()); } else { locations->SetInAt(1, Location::Any()); } } else { locations->SetInAt(1, Location::Any()); } } if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) { locations->SetInAt(2, Location::RequiresRegister()); } locations->SetOut(Location::SameAsFirstInput()); } void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) { LocationSummary* locations = select->GetLocations(); if (SelectCanUseCMOV(select)) { // If both the condition and the source types are integer, we can generate // a CMOV to implement Select. CpuRegister value_false = locations->InAt(0).AsRegister
(); Location value_true_loc = locations->InAt(1); DCHECK(locations->InAt(0).Equals(locations->Out())); HInstruction* select_condition = select->GetCondition(); Condition cond = kNotEqual; // Figure out how to test the 'condition'. if (select_condition->IsCondition()) { HCondition* condition = select_condition->AsCondition(); if (!condition->IsEmittedAtUseSite()) { // This was a previously materialized condition. // Can we use the existing condition code? if (AreEflagsSetFrom(condition, select)) { // Materialization was the previous instruction. Condition codes are right. cond = X86_64IntegerCondition(condition->GetCondition()); } else { // No, we have to recreate the condition code. CpuRegister cond_reg = locations->InAt(2).AsRegister
(); __ testl(cond_reg, cond_reg); } } else { GenerateCompareTest(condition); cond = X86_64IntegerCondition(condition->GetCondition()); } } else { // Must be a boolean condition, which needs to be compared to 0. CpuRegister cond_reg = locations->InAt(2).AsRegister
(); __ testl(cond_reg, cond_reg); } // If the condition is true, overwrite the output, which already contains false. // Generate the correct sized CMOV. bool is_64_bit = Primitive::Is64BitType(select->GetType()); if (value_true_loc.IsRegister()) { __ cmov(cond, value_false, value_true_loc.AsRegister
(), is_64_bit); } else { __ cmov(cond, value_false, Address(CpuRegister(RSP), value_true_loc.GetStackIndex()), is_64_bit); } } else { NearLabel false_target; GenerateTestAndBranch
(select, /* condition_input_index */ 2, /* true_target */ nullptr, &false_target); codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType()); __ Bind(&false_target); } } void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) { new (GetGraph()->GetArena()) LocationSummary(info); } void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo*) { // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile. } void CodeGeneratorX86_64::GenerateNop() { __ nop(); } void LocationsBuilderX86_64::HandleCondition(HCondition* cond) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); // Handle the long/FP comparisons made in instruction simplification. switch (cond->InputAt(0)->GetType()) { case Primitive::kPrimLong: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); break; case Primitive::kPrimFloat: case Primitive::kPrimDouble: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); break; default: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); break; } if (!cond->IsEmittedAtUseSite()) { locations->SetOut(Location::RequiresRegister()); } } void InstructionCodeGeneratorX86_64::HandleCondition(HCondition* cond) { if (cond->IsEmittedAtUseSite()) { return; } LocationSummary* locations = cond->GetLocations(); Location lhs = locations->InAt(0); Location rhs = locations->InAt(1); CpuRegister reg = locations->Out().AsRegister
(); NearLabel true_label, false_label; switch (cond->InputAt(0)->GetType()) { default: // Integer case. // Clear output register: setcc only sets the low byte. __ xorl(reg, reg); if (rhs.IsRegister()) { __ cmpl(lhs.AsRegister
(), rhs.AsRegister
()); } else if (rhs.IsConstant()) { int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); codegen_->Compare32BitValue(lhs.AsRegister
(), constant); } else { __ cmpl(lhs.AsRegister
(), Address(CpuRegister(RSP), rhs.GetStackIndex())); } __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg); return; case Primitive::kPrimLong: // Clear output register: setcc only sets the low byte. __ xorl(reg, reg); if (rhs.IsRegister()) { __ cmpq(lhs.AsRegister
(), rhs.AsRegister
()); } else if (rhs.IsConstant()) { int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue(); codegen_->Compare64BitValue(lhs.AsRegister
(), value); } else { __ cmpq(lhs.AsRegister
(), Address(CpuRegister(RSP), rhs.GetStackIndex())); } __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg); return; case Primitive::kPrimFloat: { XmmRegister lhs_reg = lhs.AsFpuRegister
(); if (rhs.IsConstant()) { float value = rhs.GetConstant()->AsFloatConstant()->GetValue(); __ ucomiss(lhs_reg, codegen_->LiteralFloatAddress(value)); } else if (rhs.IsStackSlot()) { __ ucomiss(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex())); } else { __ ucomiss(lhs_reg, rhs.AsFpuRegister
()); } GenerateFPJumps(cond, &true_label, &false_label); break; } case Primitive::kPrimDouble: { XmmRegister lhs_reg = lhs.AsFpuRegister
(); if (rhs.IsConstant()) { double value = rhs.GetConstant()->AsDoubleConstant()->GetValue(); __ ucomisd(lhs_reg, codegen_->LiteralDoubleAddress(value)); } else if (rhs.IsDoubleStackSlot()) { __ ucomisd(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex())); } else { __ ucomisd(lhs_reg, rhs.AsFpuRegister
()); } GenerateFPJumps(cond, &true_label, &false_label); break; } } // Convert the jumps into the result. NearLabel done_label; // False case: result = 0. __ Bind(&false_label); __ xorl(reg, reg); __ jmp(&done_label); // True case: result = 1. __ Bind(&true_label); __ movl(reg, Immediate(1)); __ Bind(&done_label); } void LocationsBuilderX86_64::VisitEqual(HEqual* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitEqual(HEqual* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitNotEqual(HNotEqual* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitNotEqual(HNotEqual* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitLessThan(HLessThan* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitLessThan(HLessThan* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitLessThanOrEqual(HLessThanOrEqual* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitLessThanOrEqual(HLessThanOrEqual* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitGreaterThan(HGreaterThan* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitGreaterThan(HGreaterThan* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitBelow(HBelow* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitBelow(HBelow* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitAbove(HAbove* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitAbove(HAbove* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) { HandleCondition(comp); } void InstructionCodeGeneratorX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) { HandleCondition(comp); } void LocationsBuilderX86_64::VisitCompare(HCompare* compare) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); switch (compare->InputAt(0)->GetType()) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimChar: case Primitive::kPrimInt: case Primitive::kPrimLong: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } case Primitive::kPrimFloat: case Primitive::kPrimDouble: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::RequiresRegister()); break; } default: LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType(); } } void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) { LocationSummary* locations = compare->GetLocations(); CpuRegister out = locations->Out().AsRegister
(); Location left = locations->InAt(0); Location right = locations->InAt(1); NearLabel less, greater, done; Primitive::Type type = compare->InputAt(0)->GetType(); Condition less_cond = kLess; switch (type) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimChar: case Primitive::kPrimInt: { CpuRegister left_reg = left.AsRegister
(); if (right.IsConstant()) { int32_t value = right.GetConstant()->AsIntConstant()->GetValue(); codegen_->Compare32BitValue(left_reg, value); } else if (right.IsStackSlot()) { __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); } else { __ cmpl(left_reg, right.AsRegister
()); } break; } case Primitive::kPrimLong: { CpuRegister left_reg = left.AsRegister
(); if (right.IsConstant()) { int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); codegen_->Compare64BitValue(left_reg, value); } else if (right.IsDoubleStackSlot()) { __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); } else { __ cmpq(left_reg, right.AsRegister
()); } break; } case Primitive::kPrimFloat: { XmmRegister left_reg = left.AsFpuRegister
(); if (right.IsConstant()) { float value = right.GetConstant()->AsFloatConstant()->GetValue(); __ ucomiss(left_reg, codegen_->LiteralFloatAddress(value)); } else if (right.IsStackSlot()) { __ ucomiss(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); } else { __ ucomiss(left_reg, right.AsFpuRegister
()); } __ j(kUnordered, compare->IsGtBias() ? &greater : &less); less_cond = kBelow; // ucomis{s,d} sets CF break; } case Primitive::kPrimDouble: { XmmRegister left_reg = left.AsFpuRegister
(); if (right.IsConstant()) { double value = right.GetConstant()->AsDoubleConstant()->GetValue(); __ ucomisd(left_reg, codegen_->LiteralDoubleAddress(value)); } else if (right.IsDoubleStackSlot()) { __ ucomisd(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); } else { __ ucomisd(left_reg, right.AsFpuRegister
()); } __ j(kUnordered, compare->IsGtBias() ? &greater : &less); less_cond = kBelow; // ucomis{s,d} sets CF break; } default: LOG(FATAL) << "Unexpected compare type " << type; } __ movl(out, Immediate(0)); __ j(kEqual, &done); __ j(less_cond, &less); __ Bind(&greater); __ movl(out, Immediate(1)); __ jmp(&done); __ Bind(&less); __ movl(out, Immediate(-1)); __ Bind(&done); } void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. } void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. } void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. } void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. } void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); locations->SetOut(Location::ConstantLocation(constant)); } void InstructionCodeGeneratorX86_64::VisitDoubleConstant( HDoubleConstant* constant ATTRIBUTE_UNUSED) { // Will be generated at use site. } void LocationsBuilderX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { memory_barrier->SetLocations(nullptr); } void InstructionCodeGeneratorX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind()); } void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) { ret->SetLocations(nullptr); } void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) { codegen_->GenerateFrameExit(); } void LocationsBuilderX86_64::VisitReturn(HReturn* ret) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); switch (ret->InputAt(0)->GetType()) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: case Primitive::kPrimChar: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimNot: case Primitive::kPrimLong: locations->SetInAt(0, Location::RegisterLocation(RAX)); break; case Primitive::kPrimFloat: case Primitive::kPrimDouble: locations->SetInAt(0, Location::FpuRegisterLocation(XMM0)); break; default: LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType(); } } void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) { if (kIsDebugBuild) { switch (ret->InputAt(0)->GetType()) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: case Primitive::kPrimChar: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimNot: case Primitive::kPrimLong: DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister
().AsRegister(), RAX); break; case Primitive::kPrimFloat: case Primitive::kPrimDouble: DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister
().AsFloatRegister(), XMM0); break; default: LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType(); } } codegen_->GenerateFrameExit(); } Location InvokeDexCallingConventionVisitorX86_64::GetReturnLocation(Primitive::Type type) const { switch (type) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: case Primitive::kPrimChar: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimNot: case Primitive::kPrimLong: return Location::RegisterLocation(RAX); case Primitive::kPrimVoid: return Location::NoLocation(); case Primitive::kPrimDouble: case Primitive::kPrimFloat: return Location::FpuRegisterLocation(XMM0); } UNREACHABLE(); } Location InvokeDexCallingConventionVisitorX86_64::GetMethodLocation() const { return Location::RegisterLocation(kMethodRegisterArgument); } Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(Primitive::Type type) { switch (type) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: case Primitive::kPrimChar: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimNot: { uint32_t index = gp_index_++; stack_index_++; if (index < calling_convention.GetNumberOfRegisters()) { return Location::RegisterLocation(calling_convention.GetRegisterAt(index)); } else { return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1)); } } case Primitive::kPrimLong: { uint32_t index = gp_index_; stack_index_ += 2; if (index < calling_convention.GetNumberOfRegisters()) { gp_index_ += 1; return Location::RegisterLocation(calling_convention.GetRegisterAt(index)); } else { gp_index_ += 2; return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2)); } } case Primitive::kPrimFloat: { uint32_t index = float_index_++; stack_index_++; if (index < calling_convention.GetNumberOfFpuRegisters()) { return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index)); } else { return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1)); } } case Primitive::kPrimDouble: { uint32_t index = float_index_++; stack_index_ += 2; if (index < calling_convention.GetNumberOfFpuRegisters()) { return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index)); } else { return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2)); } } case Primitive::kPrimVoid: LOG(FATAL) << "Unexpected parameter type " << type; break; } return Location::NoLocation(); } void LocationsBuilderX86_64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { // The trampoline uses the same calling convention as dex calling conventions, // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain // the method_idx. HandleInvoke(invoke); } void InstructionCodeGeneratorX86_64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke); } void LocationsBuilderX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { // Explicit clinit checks triggered by static invokes must have been pruned by // art::PrepareForRegisterAllocation. DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); IntrinsicLocationsBuilderX86_64 intrinsic(codegen_); if (intrinsic.TryDispatch(invoke)) { return; } HandleInvoke(invoke); } static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorX86_64* codegen) { if (invoke->GetLocations()->Intrinsified()) { IntrinsicCodeGeneratorX86_64 intrinsic(codegen); intrinsic.Dispatch(invoke); return true; } return false; } void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { // Explicit clinit checks triggered by static invokes must have been pruned by // art::PrepareForRegisterAllocation. DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); if (TryGenerateIntrinsicCode(invoke, codegen_)) { return; } LocationSummary* locations = invoke->GetLocations(); codegen_->GenerateStaticOrDirectCall( invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation()); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) { InvokeDexCallingConventionVisitorX86_64 calling_convention_visitor; CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor); } void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) { IntrinsicLocationsBuilderX86_64 intrinsic(codegen_); if (intrinsic.TryDispatch(invoke)) { return; } HandleInvoke(invoke); } void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) { if (TryGenerateIntrinsicCode(invoke, codegen_)) { return; } codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0)); DCHECK(!codegen_->IsLeafMethod()); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) { HandleInvoke(invoke); // Add the hidden argument. invoke->GetLocations()->AddTemp(Location::RegisterLocation(RAX)); } void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. LocationSummary* locations = invoke->GetLocations(); CpuRegister temp = locations->GetTemp(0).AsRegister
(); CpuRegister hidden_reg = locations->GetTemp(1).AsRegister
(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); // Set the hidden argument. This is safe to do this here, as RAX // won't be modified thereafter, before the `call` instruction. DCHECK_EQ(RAX, hidden_reg.AsRegister()); codegen_->Load64BitValue(hidden_reg, invoke->GetDexMethodIndex()); if (receiver.IsStackSlot()) { __ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex())); // /* HeapReference
*/ temp = temp->klass_ __ movl(temp, Address(temp, class_offset)); } else { // /* HeapReference
*/ temp = receiver->klass_ __ movl(temp, Address(receiver.AsRegister
(), class_offset)); } codegen_->MaybeRecordImplicitNullCheck(invoke); // Instead of simply (possibly) unpoisoning `temp` here, we should // emit a read barrier for the previous class reference load. // However this is not required in practice, as this is an // intermediate/temporary reference and because the current // concurrent copying collector keeps the from-space memory // intact/accessible until the end of the marking phase (the // concurrent copying collector may not in the future). __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetAddressOfIMT() __ movq(temp, Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value())); // temp = temp->GetImtEntryAt(method_offset); uint32_t method_offset = static_cast
(ImTable::OffsetOfElement( invoke->GetImtIndex() % ImTable::kSize, kX86_64PointerSize)); // temp = temp->GetImtEntryAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64WordSize).SizeValue())); DCHECK(!codegen_->IsLeafMethod()); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void LocationsBuilderX86_64::VisitNeg(HNeg* neg) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); switch (neg->GetResultType()) { case Primitive::kPrimInt: case Primitive::kPrimLong: locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::SameAsFirstInput()); break; case Primitive::kPrimFloat: case Primitive::kPrimDouble: locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresFpuRegister()); break; default: LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); } } void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) { LocationSummary* locations = neg->GetLocations(); Location out = locations->Out(); Location in = locations->InAt(0); switch (neg->GetResultType()) { case Primitive::kPrimInt: DCHECK(in.IsRegister()); DCHECK(in.Equals(out)); __ negl(out.AsRegister
()); break; case Primitive::kPrimLong: DCHECK(in.IsRegister()); DCHECK(in.Equals(out)); __ negq(out.AsRegister
()); break; case Primitive::kPrimFloat: { DCHECK(in.Equals(out)); XmmRegister mask = locations->GetTemp(0).AsFpuRegister
(); // Implement float negation with an exclusive or with value // 0x80000000 (mask for bit 31, representing the sign of a // single-precision floating-point number). __ movss(mask, codegen_->LiteralInt32Address(0x80000000)); __ xorps(out.AsFpuRegister
(), mask); break; } case Primitive::kPrimDouble: { DCHECK(in.Equals(out)); XmmRegister mask = locations->GetTemp(0).AsFpuRegister
(); // Implement double negation with an exclusive or with value // 0x8000000000000000 (mask for bit 63, representing the sign of // a double-precision floating-point number). __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000))); __ xorpd(out.AsFpuRegister
(), mask); break; } default: LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); } } void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); Primitive::Type result_type = conversion->GetResultType(); Primitive::Type input_type = conversion->GetInputType(); DCHECK_NE(result_type, input_type); // The Java language does not allow treating boolean as an integral type but // our bit representation makes it safe. switch (result_type) { case Primitive::kPrimByte: switch (input_type) { case Primitive::kPrimLong: // Type conversion from long to byte is a result of code transformations. case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-byte' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimShort: switch (input_type) { case Primitive::kPrimLong: // Type conversion from long to short is a result of code transformations. case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-short' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimInt: switch (input_type) { case Primitive::kPrimLong: // Processing a Dex `long-to-int' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; case Primitive::kPrimFloat: // Processing a Dex `float-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; case Primitive::kPrimDouble: // Processing a Dex `double-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimLong: switch (input_type) { case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-long' instruction. // TODO: We would benefit from a (to-be-implemented) // Location::RegisterOrStackSlot requirement for this input. locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); break; case Primitive::kPrimFloat: // Processing a Dex `float-to-long' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; case Primitive::kPrimDouble: // Processing a Dex `double-to-long' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimChar: switch (input_type) { case Primitive::kPrimLong: // Type conversion from long to char is a result of code transformations. case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimInt: // Processing a Dex `int-to-char' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimFloat: switch (input_type) { case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-float' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; case Primitive::kPrimLong: // Processing a Dex `long-to-float' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; case Primitive::kPrimDouble: // Processing a Dex `double-to-float' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; }; break; case Primitive::kPrimDouble: switch (input_type) { case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-double' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; case Primitive::kPrimLong: // Processing a Dex `long-to-double' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister()); break; case Primitive::kPrimFloat: // Processing a Dex `float-to-double' instruction. locations->SetInAt(0, Location::Any()); locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } } void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conversion) { LocationSummary* locations = conversion->GetLocations(); Location out = locations->Out(); Location in = locations->InAt(0); Primitive::Type result_type = conversion->GetResultType(); Primitive::Type input_type = conversion->GetInputType(); DCHECK_NE(result_type, input_type); switch (result_type) { case Primitive::kPrimByte: switch (input_type) { case Primitive::kPrimLong: // Type conversion from long to byte is a result of code transformations. case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-byte' instruction. if (in.IsRegister()) { __ movsxb(out.AsRegister
(), in.AsRegister
()); } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) { __ movsxb(out.AsRegister
(), Address(CpuRegister(RSP), in.GetStackIndex())); } else { __ movl(out.AsRegister
(), Immediate(static_cast
(Int64FromConstant(in.GetConstant())))); } break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimShort: switch (input_type) { case Primitive::kPrimLong: // Type conversion from long to short is a result of code transformations. case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-short' instruction. if (in.IsRegister()) { __ movsxw(out.AsRegister
(), in.AsRegister
()); } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) { __ movsxw(out.AsRegister
(), Address(CpuRegister(RSP), in.GetStackIndex())); } else { __ movl(out.AsRegister
(), Immediate(static_cast
(Int64FromConstant(in.GetConstant())))); } break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimInt: switch (input_type) { case Primitive::kPrimLong: // Processing a Dex `long-to-int' instruction. if (in.IsRegister()) { __ movl(out.AsRegister
(), in.AsRegister
()); } else if (in.IsDoubleStackSlot()) { __ movl(out.AsRegister
(), Address(CpuRegister(RSP), in.GetStackIndex())); } else { DCHECK(in.IsConstant()); DCHECK(in.GetConstant()->IsLongConstant()); int64_t value = in.GetConstant()->AsLongConstant()->GetValue(); __ movl(out.AsRegister
(), Immediate(static_cast
(value))); } break; case Primitive::kPrimFloat: { // Processing a Dex `float-to-int' instruction. XmmRegister input = in.AsFpuRegister
(); CpuRegister output = out.AsRegister
(); NearLabel done, nan; __ movl(output, Immediate(kPrimIntMax)); // if input >= (float)INT_MAX goto done __ comiss(input, codegen_->LiteralFloatAddress(kPrimIntMax)); __ j(kAboveEqual, &done); // if input == NaN goto nan __ j(kUnordered, &nan); // output = float-to-int-truncate(input) __ cvttss2si(output, input, false); __ jmp(&done); __ Bind(&nan); // output = 0 __ xorl(output, output); __ Bind(&done); break; } case Primitive::kPrimDouble: { // Processing a Dex `double-to-int' instruction. XmmRegister input = in.AsFpuRegister
(); CpuRegister output = out.AsRegister
(); NearLabel done, nan; __ movl(output, Immediate(kPrimIntMax)); // if input >= (double)INT_MAX goto done __ comisd(input, codegen_->LiteralDoubleAddress(kPrimIntMax)); __ j(kAboveEqual, &done); // if input == NaN goto nan __ j(kUnordered, &nan); // output = double-to-int-truncate(input) __ cvttsd2si(output, input); __ jmp(&done); __ Bind(&nan); // output = 0 __ xorl(output, output); __ Bind(&done); break; } default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimLong: switch (input_type) { DCHECK(out.IsRegister()); case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-long' instruction. DCHECK(in.IsRegister()); __ movsxd(out.AsRegister
(), in.AsRegister
()); break; case Primitive::kPrimFloat: { // Processing a Dex `float-to-long' instruction. XmmRegister input = in.AsFpuRegister
(); CpuRegister output = out.AsRegister
(); NearLabel done, nan; codegen_->Load64BitValue(output, kPrimLongMax); // if input >= (float)LONG_MAX goto done __ comiss(input, codegen_->LiteralFloatAddress(kPrimLongMax)); __ j(kAboveEqual, &done); // if input == NaN goto nan __ j(kUnordered, &nan); // output = float-to-long-truncate(input) __ cvttss2si(output, input, true); __ jmp(&done); __ Bind(&nan); // output = 0 __ xorl(output, output); __ Bind(&done); break; } case Primitive::kPrimDouble: { // Processing a Dex `double-to-long' instruction. XmmRegister input = in.AsFpuRegister
(); CpuRegister output = out.AsRegister
(); NearLabel done, nan; codegen_->Load64BitValue(output, kPrimLongMax); // if input >= (double)LONG_MAX goto done __ comisd(input, codegen_->LiteralDoubleAddress(kPrimLongMax)); __ j(kAboveEqual, &done); // if input == NaN goto nan __ j(kUnordered, &nan); // output = double-to-long-truncate(input) __ cvttsd2si(output, input, true); __ jmp(&done); __ Bind(&nan); // output = 0 __ xorl(output, output); __ Bind(&done); break; } default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimChar: switch (input_type) { case Primitive::kPrimLong: // Type conversion from long to char is a result of code transformations. case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimInt: // Processing a Dex `int-to-char' instruction. if (in.IsRegister()) { __ movzxw(out.AsRegister
(), in.AsRegister
()); } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) { __ movzxw(out.AsRegister
(), Address(CpuRegister(RSP), in.GetStackIndex())); } else { __ movl(out.AsRegister
(), Immediate(static_cast
(Int64FromConstant(in.GetConstant())))); } break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } break; case Primitive::kPrimFloat: switch (input_type) { case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-float' instruction. if (in.IsRegister()) { __ cvtsi2ss(out.AsFpuRegister
(), in.AsRegister
(), false); } else if (in.IsConstant()) { int32_t v = in.GetConstant()->AsIntConstant()->GetValue(); XmmRegister dest = out.AsFpuRegister
(); codegen_->Load32BitValue(dest, static_cast
(v)); } else { __ cvtsi2ss(out.AsFpuRegister
(), Address(CpuRegister(RSP), in.GetStackIndex()), false); } break; case Primitive::kPrimLong: // Processing a Dex `long-to-float' instruction. if (in.IsRegister()) { __ cvtsi2ss(out.AsFpuRegister
(), in.AsRegister
(), true); } else if (in.IsConstant()) { int64_t v = in.GetConstant()->AsLongConstant()->GetValue(); XmmRegister dest = out.AsFpuRegister
(); codegen_->Load32BitValue(dest, static_cast
(v)); } else { __ cvtsi2ss(out.AsFpuRegister
(), Address(CpuRegister(RSP), in.GetStackIndex()), true); } break; case Primitive::kPrimDouble: // Processing a Dex `double-to-float' instruction. if (in.IsFpuRegister()) { __ cvtsd2ss(out.AsFpuRegister
(), in.AsFpuRegister
()); } else if (in.IsConstant()) { double v = in.GetConstant()->AsDoubleConstant()->GetValue(); XmmRegister dest = out.AsFpuRegister
(); codegen_->Load32BitValue(dest, static_cast
(v)); } else { __ cvtsd2ss(out.AsFpuRegister
(), Address(CpuRegister(RSP), in.GetStackIndex())); } break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; }; break; case Primitive::kPrimDouble: switch (input_type) { case Primitive::kPrimBoolean: // Boolean input is a result of code transformations. case Primitive::kPrimByte: case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimChar: // Processing a Dex `int-to-double' instruction. if (in.IsRegister()) { __ cvtsi2sd(out.AsFpuRegister
(), in.AsRegister
(), false); } else if (in.IsConstant()) { int32_t v = in.GetConstant()->AsIntConstant()->GetValue(); XmmRegister dest = out.AsFpuRegister
(); codegen_->Load64BitValue(dest, static_cast
(v)); } else { __ cvtsi2sd(out.AsFpuRegister
(), Address(CpuRegister(RSP), in.GetStackIndex()), false); } break; case Primitive::kPrimLong: // Processing a Dex `long-to-double' instruction. if (in.IsRegister()) { __ cvtsi2sd(out.AsFpuRegister
(), in.AsRegister
(), true); } else if (in.IsConstant()) { int64_t v = in.GetConstant()->AsLongConstant()->GetValue(); XmmRegister dest = out.AsFpuRegister
(); codegen_->Load64BitValue(dest, static_cast
(v)); } else { __ cvtsi2sd(out.AsFpuRegister
(), Address(CpuRegister(RSP), in.GetStackIndex()), true); } break; case Primitive::kPrimFloat: // Processing a Dex `float-to-double' instruction. if (in.IsFpuRegister()) { __ cvtss2sd(out.AsFpuRegister
(), in.AsFpuRegister
()); } else if (in.IsConstant()) { float v = in.GetConstant()->AsFloatConstant()->GetValue(); XmmRegister dest = out.AsFpuRegister
(); codegen_->Load64BitValue(dest, static_cast
(v)); } else { __ cvtss2sd(out.AsFpuRegister
(), Address(CpuRegister(RSP), in.GetStackIndex())); } break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; }; break; default: LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; } } void LocationsBuilderX86_64::VisitAdd(HAdd* add) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); switch (add->GetResultType()) { case Primitive::kPrimInt: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } case Primitive::kPrimLong: { locations->SetInAt(0, Location::RequiresRegister()); // We can use a leaq or addq if the constant can fit in an immediate. locations->SetInAt(1, Location::RegisterOrInt32Constant(add->InputAt(1))); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } case Primitive::kPrimDouble: case Primitive::kPrimFloat: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); break; } default: LOG(FATAL) << "Unexpected add type " << add->GetResultType(); } } void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) { LocationSummary* locations = add->GetLocations(); Location first = locations->InAt(0); Location second = locations->InAt(1); Location out = locations->Out(); switch (add->GetResultType()) { case Primitive::kPrimInt: { if (second.IsRegister()) { if (out.AsRegister
() == first.AsRegister
()) { __ addl(out.AsRegister
(), second.AsRegister
()); } else if (out.AsRegister
() == second.AsRegister
()) { __ addl(out.AsRegister
(), first.AsRegister
()); } else { __ leal(out.AsRegister
(), Address( first.AsRegister
(), second.AsRegister
(), TIMES_1, 0)); } } else if (second.IsConstant()) { if (out.AsRegister
() == first.AsRegister
()) { __ addl(out.AsRegister
(), Immediate(second.GetConstant()->AsIntConstant()->GetValue())); } else { __ leal(out.AsRegister
(), Address( first.AsRegister
(), second.GetConstant()->AsIntConstant()->GetValue())); } } else { DCHECK(first.Equals(locations->Out())); __ addl(first.AsRegister
(), Address(CpuRegister(RSP), second.GetStackIndex())); } break; } case Primitive::kPrimLong: { if (second.IsRegister()) { if (out.AsRegister
() == first.AsRegister
()) { __ addq(out.AsRegister
(), second.AsRegister
()); } else if (out.AsRegister
() == second.AsRegister
()) { __ addq(out.AsRegister
(), first.AsRegister
()); } else { __ leaq(out.AsRegister
(), Address( first.AsRegister
(), second.AsRegister
(), TIMES_1, 0)); } } else { DCHECK(second.IsConstant()); int64_t value = second.GetConstant()->AsLongConstant()->GetValue(); int32_t int32_value = Low32Bits(value); DCHECK_EQ(int32_value, value); if (out.AsRegister
() == first.AsRegister
()) { __ addq(out.AsRegister
(), Immediate(int32_value)); } else { __ leaq(out.AsRegister
(), Address( first.AsRegister
(), int32_value)); } } break; } case Primitive::kPrimFloat: { if (second.IsFpuRegister()) { __ addss(first.AsFpuRegister
(), second.AsFpuRegister
()); } else if (second.IsConstant()) { __ addss(first.AsFpuRegister
(), codegen_->LiteralFloatAddress( second.GetConstant()->AsFloatConstant()->GetValue())); } else { DCHECK(second.IsStackSlot()); __ addss(first.AsFpuRegister
(), Address(CpuRegister(RSP), second.GetStackIndex())); } break; } case Primitive::kPrimDouble: { if (second.IsFpuRegister()) { __ addsd(first.AsFpuRegister
(), second.AsFpuRegister
()); } else if (second.IsConstant()) { __ addsd(first.AsFpuRegister
(), codegen_->LiteralDoubleAddress( second.GetConstant()->AsDoubleConstant()->GetValue())); } else { DCHECK(second.IsDoubleStackSlot()); __ addsd(first.AsFpuRegister
(), Address(CpuRegister(RSP), second.GetStackIndex())); } break; } default: LOG(FATAL) << "Unexpected add type " << add->GetResultType(); } } void LocationsBuilderX86_64::VisitSub(HSub* sub) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); switch (sub->GetResultType()) { case Primitive::kPrimInt: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); break; } case Primitive::kPrimLong: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrInt32Constant(sub->InputAt(1))); locations->SetOut(Location::SameAsFirstInput()); break; } case Primitive::kPrimFloat: case Primitive::kPrimDouble: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); break; } default: LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); } } void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) { LocationSummary* locations = sub->GetLocations(); Location first = locations->InAt(0); Location second = locations->InAt(1); DCHECK(first.Equals(locations->Out())); switch (sub->GetResultType()) { case Primitive::kPrimInt: { if (second.IsRegister()) { __ subl(first.AsRegister
(), second.AsRegister
()); } else if (second.IsConstant()) { Immediate imm(second.GetConstant()->AsIntConstant()->GetValue()); __ subl(first.AsRegister
(), imm); } else { __ subl(first.AsRegister
(), Address(CpuRegister(RSP), second.GetStackIndex())); } break; } case Primitive::kPrimLong: { if (second.IsConstant()) { int64_t value = second.GetConstant()->AsLongConstant()->GetValue(); DCHECK(IsInt<32>(value)); __ subq(first.AsRegister
(), Immediate(static_cast
(value))); } else { __ subq(first.AsRegister
(), second.AsRegister
()); } break; } case Primitive::kPrimFloat: { if (second.IsFpuRegister()) { __ subss(first.AsFpuRegister
(), second.AsFpuRegister
()); } else if (second.IsConstant()) { __ subss(first.AsFpuRegister
(), codegen_->LiteralFloatAddress( second.GetConstant()->AsFloatConstant()->GetValue())); } else { DCHECK(second.IsStackSlot()); __ subss(first.AsFpuRegister
(), Address(CpuRegister(RSP), second.GetStackIndex())); } break; } case Primitive::kPrimDouble: { if (second.IsFpuRegister()) { __ subsd(first.AsFpuRegister
(), second.AsFpuRegister
()); } else if (second.IsConstant()) { __ subsd(first.AsFpuRegister
(), codegen_->LiteralDoubleAddress( second.GetConstant()->AsDoubleConstant()->GetValue())); } else { DCHECK(second.IsDoubleStackSlot()); __ subsd(first.AsFpuRegister
(), Address(CpuRegister(RSP), second.GetStackIndex())); } break; } default: LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); } } void LocationsBuilderX86_64::VisitMul(HMul* mul) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); switch (mul->GetResultType()) { case Primitive::kPrimInt: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); if (mul->InputAt(1)->IsIntConstant()) { // Can use 3 operand multiply. locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } else { locations->SetOut(Location::SameAsFirstInput()); } break; } case Primitive::kPrimLong: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); if (mul->InputAt(1)->IsLongConstant() && IsInt<32>(mul->InputAt(1)->AsLongConstant()->GetValue())) { // Can use 3 operand multiply. locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } else { locations->SetOut(Location::SameAsFirstInput()); } break; } case Primitive::kPrimFloat: case Primitive::kPrimDouble: { locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetInAt(1, Location::Any()); locations->SetOut(Location::SameAsFirstInput()); break; } default: LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); } } void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) { LocationSummary* locations = mul->GetLocations(); Location first = locations->InAt(0); Location second = locations->InAt(1); Location out = locations->Out(); switch (mul->GetResultType()) { case Primitive::kPrimInt: // The constant may have ended up in a register, so test explicitly to avoid // problems where the output may not be the same as the first operand. if (mul->InputAt(1)->IsIntConstant()) { Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); __ imull(out.AsRegister
(), first.AsRegister