diff --git a/Android.mk b/Android.mk index 15e8308ba0b..8c78580949f 100644 --- a/Android.mk +++ b/Android.mk @@ -124,6 +124,8 @@ ifdef TARGET_2ND_ARCH ART_TARGET_DEPENDENCIES += $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so endif +ifdef DISABLED_FOR_XPOSED + ######################################################################## # test rules @@ -290,6 +292,8 @@ test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target- $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) endif +endif # DISABLED_FOR_XPOSED + ######################################################################## # oat-target and oat-target-sync rules diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index 6ce38777f53..db3611fcbcb 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -29,21 +29,15 @@ include art/build/Android.common.mk # Beware that tests may use the non-debug build for performance, notable 055-enum-performance # ART_BUILD_TARGET_NDEBUG ?= true -ART_BUILD_TARGET_DEBUG ?= true -ART_BUILD_HOST_NDEBUG ?= true -ART_BUILD_HOST_DEBUG ?= true +ART_BUILD_TARGET_DEBUG ?= false +ART_BUILD_HOST_NDEBUG = false +ART_BUILD_HOST_DEBUG = false ifeq ($(ART_BUILD_TARGET_NDEBUG),false) $(info Disabling ART_BUILD_TARGET_NDEBUG) endif -ifeq ($(ART_BUILD_TARGET_DEBUG),false) -$(info Disabling ART_BUILD_TARGET_DEBUG) -endif -ifeq ($(ART_BUILD_HOST_NDEBUG),false) -$(info Disabling ART_BUILD_HOST_NDEBUG) -endif -ifeq ($(ART_BUILD_HOST_DEBUG),false) -$(info Disabling ART_BUILD_HOST_DEBUG) +ifneq ($(ART_BUILD_TARGET_DEBUG),false) +$(info Enabling ART_BUILD_TARGET_DEBUG) endif # @@ -201,7 +195,7 @@ art_target_non_debug_cflags := \ ifeq ($(HOST_OS),linux) # Larger frame-size for host clang builds today art_host_non_debug_cflags += -Wframe-larger-than=3000 - art_target_non_debug_cflags += -Wframe-larger-than=1728 + art_target_non_debug_cflags += -Wframe-larger-than=1800 endif # FIXME: upstream LLVM has a vectorizer bug that needs to be fixed diff --git a/build/Android.executable.mk b/build/Android.executable.mk index 412f2ddfee2..6fd6b2bfff7 100644 --- a/build/Android.executable.mk +++ b/build/Android.executable.mk @@ -50,6 +50,7 @@ define build-art-executable art_target_or_host := $(5) art_ndebug_or_debug := $(6) art_multilib := $(7) + art_static_libraries := $(8) include $(CLEAR_VARS) LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) @@ -57,6 +58,7 @@ define build-art-executable LOCAL_SRC_FILES := $$(art_source) LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime $$(art_c_includes) LOCAL_SHARED_LIBRARIES += $$(art_shared_libraries) + LOCAL_STATIC_LIBRARIES += $$(art_static_libraries) LOCAL_WHOLE_STATIC_LIBRARIES += libsigchain ifeq ($$(art_ndebug_or_debug),ndebug) diff --git a/compiler/Android.mk b/compiler/Android.mk index b0ec8fb6caf..350d5a962a1 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -259,6 +259,9 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk ifeq ($$(art_target_or_host),target) LOCAL_SHARED_LIBRARIES += libcutils libvixl + ifeq ($(TARGET_HAVE_QC_PERF),true) + LOCAL_WHOLE_STATIC_LIBRARIES += libqc-art-compiler + endif include $(BUILD_SHARED_LIBRARY) else # host LOCAL_STATIC_LIBRARIES += libcutils libvixl @@ -282,13 +285,6 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT endef -# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target. -ifeq ($(ART_BUILD_NDEBUG),true) - $(eval $(call build-libart-compiler,host,ndebug)) -endif -ifeq ($(ART_BUILD_DEBUG),true) - $(eval $(call build-libart-compiler,host,debug)) -endif ifeq ($(ART_BUILD_TARGET_NDEBUG),true) $(eval $(call build-libart-compiler,target,ndebug)) endif diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h index 2769b4b33d9..052cc64117f 100644 --- a/compiler/dex/bb_optimizations.h +++ b/compiler/dex/bb_optimizations.h @@ -274,6 +274,17 @@ class BBOptimizations : public PassME { void Start(PassDataHolder* data) const; }; +// dummy pass, for placeholder only +class DummyPass : public PassME { + public: + DummyPass() : PassME("DummyPass", kNoNodes, "") { + } + + bool Gate(const PassDataHolder* data) const { + return false; + } +}; + } // namespace art #endif // ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_ diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h index dcc67c39865..daff79d8388 100644 --- a/compiler/dex/compiler_enums.h +++ b/compiler/dex/compiler_enums.h @@ -241,6 +241,7 @@ enum MIROptimizationFlagPositions { kMIRInlinedPred, // Invoke is inlined via prediction. kMIRCallee, // Instruction is inlined from callee. kMIRIgnoreSuspendCheck, + kMIRIgnoreZeroDivCheck, kMIRDup, kMIRMark, // Temporary node mark. kMIRLastMIRFlag, @@ -538,6 +539,7 @@ enum FixupKind { kFixupMovImmLST, // kThumb2MovImm16LST. kFixupMovImmHST, // kThumb2MovImm16HST. kFixupAlign4, // Align to 4-byte boundary. + kFixupA53Erratum835769, // Cortex A53 Erratum 835769. }; std::ostream& operator<<(std::ostream& os, const FixupKind& kind); diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 45fc19e6645..e6553f81927 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -55,7 +55,7 @@ static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimi // (1 << kMatch) | // (1 << kPromoteCompilerTemps) | // (1 << kSuppressExceptionEdges) | - // (1 << kSuppressMethodInlining) | + (1 << kSuppressMethodInlining) | 0; static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index b82c5c7f00e..fbda48030f4 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -472,10 +472,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C, // 93 DIV_INT vAA, vBB, vCC - DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C, + DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C | DF_ZERO_DIV_CHECK, // 94 REM_INT vAA, vBB, vCC - DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C, + DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C | DF_ZERO_DIV_CHECK, // 95 AND_INT vAA, vBB, vCC DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C, @@ -505,10 +505,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C, // 9E DIV_LONG vAA, vBB, vCC - DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C, + DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C | DF_ZERO_DIV_CHECK, // 9F REM_LONG vAA, vBB, vCC - DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C, + DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C | DF_ZERO_DIV_CHECK, // A0 AND_LONG vAA, vBB, vCC DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C, @@ -538,10 +538,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C, // A9 DIV_FLOAT vAA, vBB, vCC - DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C, + DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C | DF_ZERO_DIV_CHECK, // AA REM_FLOAT vAA, vBB, vCC - DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C, + DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C | DF_ZERO_DIV_CHECK, // AB ADD_DOUBLE vAA, vBB, vCC DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C, @@ -553,10 +553,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C, // AE DIV_DOUBLE vAA, vBB, vCC - DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C, + DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C | DF_ZERO_DIV_CHECK, // AF REM_DOUBLE vAA, vBB, vCC - DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C, + DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C | DF_ZERO_DIV_CHECK, // B0 ADD_INT_2ADDR vA, vB DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B, @@ -568,10 +568,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B, // B3 DIV_INT_2ADDR vA, vB - DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B, + DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B | DF_ZERO_DIV_CHECK, // B4 REM_INT_2ADDR vA, vB - DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B, + DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B | DF_ZERO_DIV_CHECK, // B5 AND_INT_2ADDR vA, vB DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B, @@ -601,10 +601,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B, // BE DIV_LONG_2ADDR vA, vB - DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B, + DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B | DF_ZERO_DIV_CHECK, // BF REM_LONG_2ADDR vA, vB - DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B, + DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B | DF_ZERO_DIV_CHECK, // C0 AND_LONG_2ADDR vA, vB DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B, @@ -634,10 +634,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B, // C9 DIV_FLOAT_2ADDR vA, vB - DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B, + DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B | DF_ZERO_DIV_CHECK, // CA REM_FLOAT_2ADDR vA, vB - DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B, + DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B | DF_ZERO_DIV_CHECK, // CB ADD_DOUBLE_2ADDR vA, vB DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B, @@ -649,10 +649,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B, // CE DIV_DOUBLE_2ADDR vA, vB - DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B, + DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B | DF_ZERO_DIV_CHECK, // CF REM_DOUBLE_2ADDR vA, vB - DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B, + DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B | DF_ZERO_DIV_CHECK, // D0 ADD_INT_LIT16 vA, vB, #+CCCC DF_DA | DF_UB | DF_CORE_A | DF_CORE_B, diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 2aafb747fcb..046d311e7e3 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -124,7 +124,8 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) ifield_lowering_infos_(arena, 0u), sfield_lowering_infos_(arena, 0u), method_lowering_infos_(arena, 0u), - gen_suspend_test_list_(arena, 0u) { + gen_suspend_test_list_(arena, 0u), + qcm(nullptr) { try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */); max_available_special_compiler_temps_ = std::abs(static_cast(kVRegNonSpecialTempBaseReg)) - std::abs(static_cast(kVRegTempBaseReg)); @@ -132,6 +133,11 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) MIRGraph::~MIRGraph() { STLDeleteElements(&m_units_); + CleanupGraphData(); +} + +void MIRGraph::CleanupGraphData() +{ } /* @@ -869,6 +875,11 @@ uint64_t MIRGraph::GetDataFlowAttributes(MIR* mir) { return GetDataFlowAttributes(opcode); } + +const char * MIRGraph::GetExtendedMirOpName(int index){ + return extended_mir_op_names_[index]; +} + // TODO: use a configurable base prefix, and adjust callers to supply pass name. /* Dump the CFG into a DOT graph */ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suffix) { @@ -916,7 +927,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff if (opcode > kMirOpSelect && opcode < kMirOpLast) { if (opcode == kMirOpConstVector) { fprintf(file, " {%04x %s %d %d %d %d %d %d\\l}%s\\\n", mir->offset, - extended_mir_op_names_[kMirOpConstVector - kMirOpFirst], + MIRGraph::GetExtendedMirOpName(kMirOpConstVector - kMirOpFirst), mir->dalvikInsn.vA, mir->dalvikInsn.vB, mir->dalvikInsn.arg[0], @@ -926,7 +937,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff mir->next ? " | " : " "); } else { fprintf(file, " {%04x %s %d %d %d\\l}%s\\\n", mir->offset, - extended_mir_op_names_[opcode - kMirOpFirst], + MIRGraph::GetExtendedMirOpName(opcode - kMirOpFirst), mir->dalvikInsn.vA, mir->dalvikInsn.vB, mir->dalvikInsn.vC, @@ -937,7 +948,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff mir->ssa_rep ? GetDalvikDisassembly(mir) : !MIR::DecodedInstruction::IsPseudoMirOp(opcode) ? Instruction::Name(mir->dalvikInsn.opcode) : - extended_mir_op_names_[opcode - kMirOpFirst], + MIRGraph::GetExtendedMirOpName(opcode - kMirOpFirst), (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ", (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ", (mir->optimization_flags & MIR_IGNORE_SUSPEND_CHECK) != 0 ? " no_suspendcheck" : " ", @@ -1226,7 +1237,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { // Handle special cases. if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) { - str.append(extended_mir_op_names_[opcode - kMirOpFirst]); + str.append(MIRGraph::GetExtendedMirOpName(opcode - kMirOpFirst)); str.append(": "); // Recover the original Dex instruction. insn = mir->meta.throw_insn->dalvikInsn; @@ -1243,7 +1254,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { } if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { - str.append(extended_mir_op_names_[opcode - kMirOpFirst]); + str.append(MIRGraph::GetExtendedMirOpName(opcode - kMirOpFirst)); } else { dalvik_format = Instruction::FormatOf(insn.opcode); flags = Instruction::FlagsOf(insn.opcode); @@ -1506,6 +1517,9 @@ void MIRGraph::InitializeMethodUses() { int num_ssa_regs = GetNumSSARegs(); use_counts_.Resize(num_ssa_regs + 32); raw_use_counts_.Resize(num_ssa_regs + 32); + // reset both lists to restart fresh + use_counts_.Reset(); + raw_use_counts_.Reset(); // Initialize list. for (int i = 0; i < num_ssa_regs; i++) { use_counts_.Insert(0); diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 5817f921469..0feb28a6e7c 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -32,6 +32,12 @@ #include "reg_location.h" #include "reg_storage.h" +#ifdef QC_STRONG +#define QC_WEAK +#else +#define QC_WEAK __attribute__((weak)) +#endif + namespace art { class GlobalValueNumbering; @@ -107,6 +113,7 @@ enum DataFlowAttributePos { kUsesIField, // Accesses an instance field (IGET/IPUT). kUsesSField, // Accesses a static field (SGET/SPUT). kDoLVN, // Worth computing local value numbers. + kZeroDivCheck, // check for zero divider }; #define DF_NOP UINT64_C(0) @@ -146,6 +153,7 @@ enum DataFlowAttributePos { #define DF_IFIELD (UINT64_C(1) << kUsesIField) #define DF_SFIELD (UINT64_C(1) << kUsesSField) #define DF_LVN (UINT64_C(1) << kDoLVN) +#define DF_ZERO_DIV_CHECK (UINT64_C(1) << kZeroDivCheck) #define DF_HAS_USES (DF_UA | DF_UB | DF_UC) @@ -193,6 +201,7 @@ enum OatMethodAttributes { #define MIR_INLINED_PRED (1 << kMIRInlinedPred) #define MIR_CALLEE (1 << kMIRCallee) #define MIR_IGNORE_SUSPEND_CHECK (1 << kMIRIgnoreSuspendCheck) +#define MIR_IGNORE_ZERO_DIV_CHECK (1 << kMIRIgnoreZeroDivCheck) #define MIR_DUP (1 << kMIRDup) #define BLOCK_NAME_LEN 80 @@ -261,6 +270,8 @@ struct SSARepresentation { static uint32_t GetStartUseIndex(Instruction::Code opcode); }; +struct ExtendedMIR; + /* * The Midlevel Intermediate Representation node, which may be largely considered a * wrapper around a Dalvik byte code. @@ -369,7 +380,7 @@ struct MIR { } meta; explicit MIR():offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId), - next(nullptr), ssa_rep(nullptr) { + next(nullptr), ssa_rep(nullptr) , extraData(nullptr){ memset(&meta, 0, sizeof(meta)); } @@ -384,6 +395,9 @@ struct MIR { return arena->Alloc(sizeof(MIR), kArenaAllocMIR); } static void operator delete(void* p) {} // Nop. + + ExtendedMIR* extraData; + }; struct SuccessorBlockInfo; @@ -536,6 +550,9 @@ struct CallInfo { const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, RegStorage(), INVALID_SREG, INVALID_SREG}; + +class QCMIRGraph; + class MIRGraph { public: MIRGraph(CompilationUnit* cu, ArenaAllocator* arena); @@ -732,6 +749,12 @@ class MIRGraph { Low32Bits(static_cast(constant_values_[loc.orig_sreg])); } + int64_t ConstantValueWide(int32_t s_reg) const { + DCHECK(IsConst(s_reg)); + return (static_cast(constant_values_[s_reg + 1]) << 32) | + Low32Bits(static_cast(constant_values_[s_reg])); + } + bool IsConstantNullRef(RegLocation loc) const { return loc.ref && loc.is_const && (ConstantValue(loc) == 0); } @@ -1025,7 +1048,7 @@ class MIRGraph { */ void CountUses(struct BasicBlock* bb); - static uint64_t GetDataFlowAttributes(Instruction::Code opcode); + static uint64_t GetDataFlowAttributes(Instruction::Code opcode) QC_WEAK; static uint64_t GetDataFlowAttributes(MIR* mir); /** @@ -1060,6 +1083,7 @@ class MIRGraph { static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst]; static const uint32_t analysis_attributes_[kMirOpLast]; + static const char * GetExtendedMirOpName(int index) QC_WEAK; void HandleSSADef(int* defs, int dalvik_reg, int reg_index); bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed); @@ -1077,6 +1101,8 @@ class MIRGraph { */ bool HasSuspendTestBetween(BasicBlock* source, BasicBlockId target_id); + void CleanupGraphData() QC_WEAK; + protected: int FindCommonParent(int block1, int block2); void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1, @@ -1205,6 +1231,12 @@ class MIRGraph { friend class GlobalValueNumberingTest; friend class LocalValueNumberingTest; friend class TopologicalSortOrderTest; + + friend class QCMIRGraph; + + public: + QCMIRGraph* qcm; + }; } // namespace art diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h index bd8f53cd5a1..bc5913c0431 100644 --- a/compiler/dex/pass_driver.h +++ b/compiler/dex/pass_driver.h @@ -21,6 +21,12 @@ #include "pass.h" #include "safe_map.h" +#ifdef QC_STRONG +#define QC_WEAK +#else +#define QC_WEAK __attribute__((weak)) +#endif + // Forward Declarations. class Pass; class PassDriver; @@ -35,6 +41,8 @@ const Pass* GetPassInstance() { return &pass; } +const Pass* GetMorePassInstance() QC_WEAK; + // Empty holder for the constructor. class PassDriverDataHolder { }; diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc index c72a4a667e0..d003cf918df 100644 --- a/compiler/dex/pass_driver_me_opts.cc +++ b/compiler/dex/pass_driver_me_opts.cc @@ -23,6 +23,10 @@ namespace art { +const Pass* GetMorePassInstance() { + static const DummyPass pass; + return &pass; +} /* * Create the pass list. These passes are immutable and are shared across the threads. * @@ -42,6 +46,7 @@ const Pass* const PassDriver::g_passes[] = { GetPassInstance(), GetPassInstance(), GetPassInstance(), + GetMorePassInstance(), }; // The number of the passes in the initial list of Passes (g_passes). diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h index 6272555983c..a54e8b7e33a 100644 --- a/compiler/dex/quick/arm/arm_lir.h +++ b/compiler/dex/quick/arm/arm_lir.h @@ -542,6 +542,9 @@ enum ArmOpcode { kThumb2LdrdPcRel8, // ldrd rt, rt2, pc +-/1024. kThumb2LdrdI8, // ldrd rt, rt2, [rn +-/1024]. kThumb2StrdI8, // strd rt, rt2, [rn +-/1024]. + + kThumb2Mls, + kArmLast, }; diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index 35c3597491e..12d06eb6664 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1035,6 +1035,10 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = { kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE_OFF4, "strd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone), + ENCODING_MAP(kThumb2Mls, 0xfb000010, + kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, + kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0 | REG_USE1 | REG_USE2 | REG_USE3, + "mls", "!0C, !1C, !2C, !3C", 4, kFixupNone), }; // new_lir replaces orig_lir in the pcrel_fixup list. @@ -1067,6 +1071,17 @@ void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { */ #define PADDING_MOV_R5_R5 0x1C2D +uint32_t ArmMir2Lir::ProcessMoreEncodings(const ArmEncodingMap* encoder, int i, uint32_t operand) { + LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind; + uint32_t value=0; + return value; +} + +const ArmEncodingMap * ArmMir2Lir::GetEncoder(int opcode) { + const ArmEncodingMap *encoder = &EncodingMap[opcode]; + return encoder; +} + uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) { for (; lir != NULL; lir = NEXT_LIR(lir)) { if (!lir->flags.is_nop) { @@ -1081,7 +1096,7 @@ uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) { } } } else if (LIKELY(!lir->flags.is_nop)) { - const ArmEncodingMap *encoder = &EncodingMap[lir->opcode]; + const ArmEncodingMap *encoder = GetEncoder(lir->opcode); uint32_t bits = encoder->skeleton; for (int i = 0; i < 4; i++) { uint32_t operand; @@ -1184,7 +1199,8 @@ uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) { } break; default: - LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind; + bits |= ProcessMoreEncodings(encoder, i, operand); + break; } } } @@ -1294,7 +1310,7 @@ void ArmMir2Lir::AssembleLIR() { base_reg, 0, 0, 0, 0, lir->target); new_adr->offset = lir->offset; new_adr->flags.fixup = kFixupAdr; - new_adr->flags.size = EncodingMap[kThumb2Adr].size; + new_adr->flags.size = GetEncoder(kThumb2Adr)->size; InsertLIRBefore(lir, new_adr); lir->offset += new_adr->flags.size; offset_adjustment += new_adr->flags.size; @@ -1309,7 +1325,7 @@ void ArmMir2Lir::AssembleLIR() { } else if (lir->opcode == kThumb2LdrdPcRel8) { lir->opcode = kThumb2LdrdI8; } - lir->flags.size = EncodingMap[lir->opcode].size; + lir->flags.size = GetEncoder(lir->opcode)->size; offset_adjustment += lir->flags.size; // Change the load to be relative to the new Adr base. if (lir->opcode == kThumb2LdrdI8) { @@ -1359,13 +1375,13 @@ void ArmMir2Lir::AssembleLIR() { /* operand[0] is src1 in both cb[n]z & CmpRI8 */ lir->operands[1] = 0; lir->target = 0; - lir->flags.size = EncodingMap[lir->opcode].size; + lir->flags.size = GetEncoder(lir->opcode)->size; // Add back the new size. offset_adjustment += lir->flags.size; // Set up the new following inst. new_inst->offset = lir->offset + lir->flags.size; new_inst->flags.fixup = kFixupCondBranch; - new_inst->flags.size = EncodingMap[new_inst->opcode].size; + new_inst->flags.size = GetEncoder(new_inst->opcode)->size; offset_adjustment += new_inst->flags.size; // lir no longer pcrel, unlink and link in new_inst. @@ -1415,7 +1431,7 @@ void ArmMir2Lir::AssembleLIR() { if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) { offset_adjustment -= lir->flags.size; lir->opcode = kThumb2BCond; - lir->flags.size = EncodingMap[lir->opcode].size; + lir->flags.size = GetEncoder(lir->opcode)->size; // Fixup kind remains the same. offset_adjustment += lir->flags.size; res = kRetryAll; @@ -1451,7 +1467,7 @@ void ArmMir2Lir::AssembleLIR() { offset_adjustment -= lir->flags.size; lir->opcode = kThumb2BUncond; lir->operands[0] = 0; - lir->flags.size = EncodingMap[lir->opcode].size; + lir->flags.size = GetEncoder(lir->opcode)->size; lir->flags.fixup = kFixupT2Branch; offset_adjustment += lir->flags.size; res = kRetryAll; @@ -1513,7 +1529,7 @@ void ArmMir2Lir::AssembleLIR() { LIR *new_mov16L = RawLIR(lir->dalvik_offset, kThumb2MovImm16LST, lir->operands[0], 0, WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target); - new_mov16L->flags.size = EncodingMap[new_mov16L->opcode].size; + new_mov16L->flags.size = GetEncoder(new_mov16L->opcode)->size; new_mov16L->flags.fixup = kFixupMovImmLST; new_mov16L->offset = lir->offset; // Link the new instruction, retaining lir. @@ -1525,7 +1541,7 @@ void ArmMir2Lir::AssembleLIR() { LIR *new_mov16H = RawLIR(lir->dalvik_offset, kThumb2MovImm16HST, lir->operands[0], 0, WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target); - new_mov16H->flags.size = EncodingMap[new_mov16H->opcode].size; + new_mov16H->flags.size = GetEncoder(new_mov16H->opcode)->size; new_mov16H->flags.fixup = kFixupMovImmHST; new_mov16H->offset = lir->offset; // Link the new instruction, retaining lir. @@ -1542,7 +1558,7 @@ void ArmMir2Lir::AssembleLIR() { lir->opcode = kThumbAddRRHH; } lir->operands[1] = rs_rARM_PC.GetReg(); - lir->flags.size = EncodingMap[lir->opcode].size; + lir->flags.size = GetEncoder(lir->opcode)->size; offset_adjustment += lir->flags.size; // Must stay in fixup list and have offset updated; will be used by LST/HSP pair. lir->flags.fixup = kFixupNone; @@ -1630,7 +1646,7 @@ void ArmMir2Lir::AssembleLIR() { size_t ArmMir2Lir::GetInsnSize(LIR* lir) { DCHECK(!IsPseudoLirOp(lir->opcode)); - return EncodingMap[lir->opcode].size; + return GetEncoder(lir->opcode)->size; } // Encode instruction bit pattern and assign offsets. @@ -1642,8 +1658,8 @@ uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offse if (!lir->flags.is_nop) { if (lir->flags.fixup != kFixupNone) { if (!IsPseudoLirOp(lir->opcode)) { - lir->flags.size = EncodingMap[lir->opcode].size; - lir->flags.fixup = EncodingMap[lir->opcode].fixup; + lir->flags.size = GetEncoder(lir->opcode)->size; + lir->flags.fixup = GetEncoder(lir->opcode)->fixup; } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) { lir->flags.size = (offset & 0x2); lir->flags.fixup = kFixupAlign4; diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h index cd6c9cc1e15..2185864a473 100644 --- a/compiler/dex/quick/arm/codegen_arm.h +++ b/compiler/dex/quick/arm/codegen_arm.h @@ -20,11 +20,19 @@ #include "arm_lir.h" #include "dex/compiler_internals.h" +#ifdef QC_STRONG +#define QC_WEAK +#else +#define QC_WEAK __attribute__((weak)) +#endif + namespace art { +class QCArmMir2Lir; class ArmMir2Lir FINAL : public Mir2Lir { public: ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); + ~ArmMir2Lir(); // Required for target - codegen helpers. bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src, @@ -147,6 +155,7 @@ class ArmMir2Lir FINAL : public Mir2Lir { LIR* OpMem(OpKind op, RegStorage r_base, int disp); LIR* OpPcRelLoad(RegStorage reg, LIR* target); LIR* OpReg(OpKind op, RegStorage r_dest_src); + LIR* OpBkpt(); void OpRegCopy(RegStorage r_dest, RegStorage r_src); LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src); LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value); @@ -186,6 +195,10 @@ class ArmMir2Lir FINAL : public Mir2Lir { LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE; size_t GetInstructionOffset(LIR* lir); + void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE; + void GenMoreMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) QC_WEAK; + void Cleanup() QC_WEAK; + //void MachineSpecificPreprocessMIR(BasicBlock* bb, MIR* mir); private: void GenNegLong(RegLocation rl_dest, RegLocation rl_src); @@ -211,9 +224,26 @@ class ArmMir2Lir FINAL : public Mir2Lir { bool GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops); void GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops); + + + static uint32_t ProcessMoreEncodings(const ArmEncodingMap* encoder, int i, uint32_t operand) QC_WEAK; + + static const ArmEncodingMap * GetEncoder(int opcode) QC_WEAK; + static constexpr ResourceMask GetRegMaskArm(RegStorage reg); static constexpr ResourceMask EncodeArmRegList(int reg_list); static constexpr ResourceMask EncodeArmRegFpcsList(int reg_list); + + virtual void ApplyArchOptimizations(LIR* head_lir, LIR* tail_lir, BasicBlock* bb) QC_WEAK; + + void CompilerPostInitializeRegAlloc() QC_WEAK; + void ArmMir2LirPostInit(ArmMir2Lir* mir_to_lir) QC_WEAK; + + friend class QCArmMir2Lir; + + public: + QCArmMir2Lir * qcm2l ; + }; } // namespace art diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index b299d9bed4f..94654546df3 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -720,8 +720,7 @@ RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStora RegStorage temp = AllocTemp(); OpRegRegReg(kOpDiv, temp, reg1, reg2); - OpRegReg(kOpMul, temp, reg2); - OpRegRegReg(kOpSub, rl_result.reg, reg1, temp); + NewLIR4(kThumb2Mls, rl_result.reg.GetReg(), temp.GetReg(), reg2.GetReg(), reg1.GetReg()); FreeTemp(temp); } diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index c853c490993..719dd711fda 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -139,11 +139,18 @@ ResourceMask ArmMir2Lir::GetRegMaskCommon(const RegStorage& reg) const { return GetRegMaskArm(reg); } +void ArmMir2Lir::CompilerPostInitializeRegAlloc() +{ + //nothing here +} + constexpr ResourceMask ArmMir2Lir::GetRegMaskArm(RegStorage reg) { - return reg.IsDouble() + return (reg.IsQuad()) + ? (ResourceMask::FourBits((reg.GetRegNum() * 4) + kArmFPReg0)) + : (reg.IsDouble() /* Each double register is equal to a pair of single-precision FP registers */ ? ResourceMask::TwoBits(reg.GetRegNum() * 2 + kArmFPReg0) - : ResourceMask::Bit(reg.IsSingle() ? reg.GetRegNum() + kArmFPReg0 : reg.GetRegNum()); + : ResourceMask::Bit(reg.IsSingle() ? reg.GetRegNum() + kArmFPReg0 : reg.GetRegNum())); } constexpr ResourceMask ArmMir2Lir::EncodeArmRegList(int reg_list) { @@ -559,6 +566,20 @@ ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* << static_cast(ArmMir2Lir::EncodingMap[i].opcode); } } + qcm2l = nullptr; + ArmMir2LirPostInit(this); +} + +void ArmMir2Lir::Cleanup() +{ +} + +ArmMir2Lir::~ArmMir2Lir() +{ + Cleanup(); +} + +void ArmMir2Lir::ArmMir2LirPostInit(ArmMir2Lir* mir_to_lir) { } Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, @@ -606,6 +627,8 @@ void ArmMir2Lir::CompilerInitializeRegAlloc() { reg_pool_->next_core_reg_ = 2; reg_pool_->next_sp_reg_ = 0; reg_pool_->next_dp_reg_ = 0; + + CompilerPostInitializeRegAlloc(); } /* @@ -735,17 +758,17 @@ LIR* ArmMir2Lir::CheckSuspendUsingLoad() { uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) { DCHECK(!IsPseudoLirOp(opcode)); - return ArmMir2Lir::EncodingMap[opcode].flags; + return GetEncoder(opcode)->flags; } const char* ArmMir2Lir::GetTargetInstName(int opcode) { DCHECK(!IsPseudoLirOp(opcode)); - return ArmMir2Lir::EncodingMap[opcode].name; + return GetEncoder(opcode)->name; } const char* ArmMir2Lir::GetTargetInstFmt(int opcode) { DCHECK(!IsPseudoLirOp(opcode)); - return ArmMir2Lir::EncodingMap[opcode].fmt; + return GetEncoder(opcode)->fmt; } /* @@ -825,4 +848,17 @@ RegStorage ArmMir2Lir::AllocPreservedSingle(int s_reg) { return res; } +void ArmMir2Lir::GenMoreMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir){ + // nothing here +} + +void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { + + GenMoreMachineSpecificExtendedMethodMIR(bb, mir); +} + + +void ArmMir2Lir::ApplyArchOptimizations(LIR* head_lir, LIR* tail_lir, BasicBlock* bb) { +} + } // namespace art diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index a3b099c620d..530c613c453 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -235,6 +235,11 @@ LIR* ArmMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) { return NewLIR1(opcode, r_dest_src.GetReg()); } +LIR* ArmMir2Lir::OpBkpt() { + LOG(ERROR) << "Inserting breakpoint"; + return NewLIR0(kThumbBkpt); +} + LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift) { bool thumb_form = @@ -352,15 +357,15 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_s break; } DCHECK(!IsPseudoLirOp(opcode)); - if (EncodingMap[opcode].flags & IS_BINARY_OP) { + if (GetEncoder(opcode)->flags & IS_BINARY_OP) { return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg()); - } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) { - if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) { + } else if (GetEncoder(opcode)->flags & IS_TERTIARY_OP) { + if (GetEncoder(opcode)->field_loc[2].kind == kFmtShift) { return NewLIR3(opcode, r_dest_src1.GetReg(), r_src2.GetReg(), shift); } else { return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg()); } - } else if (EncodingMap[opcode].flags & IS_QUAD_OP) { + } else if (GetEncoder(opcode)->flags & IS_QUAD_OP) { return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift); } else { LOG(FATAL) << "Unexpected encoding operand count"; @@ -448,10 +453,10 @@ LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src break; } DCHECK(!IsPseudoLirOp(opcode)); - if (EncodingMap[opcode].flags & IS_QUAD_OP) { + if (GetEncoder(opcode)->flags & IS_QUAD_OP) { return NewLIR4(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift); } else { - DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP); + DCHECK(GetEncoder(opcode)->flags & IS_TERTIARY_OP); return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg()); } } @@ -587,7 +592,7 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, in } else { RegStorage r_scratch = AllocTemp(); LoadConstant(r_scratch, value); - if (EncodingMap[alt_opcode].flags & IS_QUAD_OP) + if (GetEncoder(alt_opcode)->flags & IS_QUAD_OP) res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0); else res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg()); diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h index a449cbd4f7d..ff4543de976 100644 --- a/compiler/dex/quick/arm64/arm64_lir.h +++ b/compiler/dex/quick/arm64/arm64_lir.h @@ -317,8 +317,10 @@ enum ArmOpcode { kA64Mov2rr, // mov [00101010000] rm[20-16] [000000] [11111] rd[4-0]. kA64Mvn2rr, // mov [00101010001] rm[20-16] [000000] [11111] rd[4-0]. kA64Mul3rrr, // mul [00011011000] rm[20-16] [011111] rn[9-5] rd[4-0]. + kA64Madd4rrrr, // madd[s0011011000] rm[20-16] [0] ra[14-10] rn[9-5] rd[4-0]. kA64Msub4rrrr, // msub[s0011011000] rm[20-16] [1] ra[14-10] rn[9-5] rd[4-0]. kA64Neg3rro, // neg alias of "sub arg0, rzr, arg1, arg2". + kA64Nop0, // nop alias of "hint #0" [11010101000000110010000000011111]. kA64Orr3Rrl, // orr [s01100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0]. kA64Orr4rrro, // orr [s0101010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0]. kA64Ret, // ret [11010110010111110000001111000000]. @@ -331,7 +333,7 @@ enum ArmOpcode { kA64Scvtf2fw, // scvtf [000111100s100010000000] rn[9-5] rd[4-0]. kA64Scvtf2fx, // scvtf [100111100s100010000000] rn[9-5] rd[4-0]. kA64Sdiv3rrr, // sdiv[s0011010110] rm[20-16] [000011] rn[9-5] rd[4-0]. - kA64Smaddl4xwwx, // smaddl [10011011001] rm[20-16] [0] ra[14-10] rn[9-5] rd[4-0]. + kA64Smull3xww, // smull [10011011001] rm[20-16] [011111] rn[9-5] rd[4-0]. kA64Smulh3xxx, // smulh [10011011010] rm[20-16] [011111] rn[9-5] rd[4-0]. kA64Stp4ffXD, // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0]. kA64Stp4rrXD, // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0]. diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc index 15c89f2f188..72236de08f4 100644 --- a/compiler/dex/quick/arm64/assemble_arm64.cc +++ b/compiler/dex/quick/arm64/assemble_arm64.cc @@ -14,8 +14,9 @@ * limitations under the License. */ -#include "arm64_lir.h" #include "codegen_arm64.h" + +#include "arm64_lir.h" #include "dex/quick/mir_to_lir-inl.h" namespace art { @@ -462,14 +463,22 @@ const ArmEncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = { kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12, "mul", "!0r, !1r, !2r", kFixupNone), - ENCODING_MAP(WIDE(kA64Msub4rrrr), SF_VARIANTS(0x1b008000), + ENCODING_MAP(WIDE(kA64Madd4rrrr), SF_VARIANTS(0x1b000000), kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 14, 10, kFmtRegR, 20, 16, IS_QUAD_OP | REG_DEF0_USE123, - "msub", "!0r, !1r, !3r, !2r", kFixupNone), + "madd", "!0r, !1r, !3r, !2r", kFixupNone), + ENCODING_MAP(WIDE(kA64Msub4rrrr), SF_VARIANTS(0x1b008000), + kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16, + kFmtRegR, 14, 10, IS_QUAD_OP | REG_DEF0_USE123 | NEEDS_FIXUP, + "msub", "!0r, !1r, !2r, !3r", kFixupA53Erratum835769), ENCODING_MAP(WIDE(kA64Neg3rro), SF_VARIANTS(0x4b0003e0), kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtShift, -1, -1, kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1, "neg", "!0r, !1r!2o", kFixupNone), + ENCODING_MAP(kA64Nop0, NO_VARIANTS(0xd503201f), + kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1, + kFmtUnused, -1, -1, NO_OPERAND, + "nop", "", kFixupNone), ENCODING_MAP(WIDE(kA64Orr3Rrl), SF_VARIANTS(0x32000000), kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10, kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1, @@ -518,10 +527,10 @@ const ArmEncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = { kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12, "sdiv", "!0r, !1r, !2r", kFixupNone), - ENCODING_MAP(WIDE(kA64Smaddl4xwwx), NO_VARIANTS(0x9b200000), + ENCODING_MAP(kA64Smull3xww, NO_VARIANTS(0x9b207c00), kFmtRegX, 4, 0, kFmtRegW, 9, 5, kFmtRegW, 20, 16, - kFmtRegX, 14, 10, IS_QUAD_OP | REG_DEF0_USE123, - "smaddl", "!0x, !1w, !2w, !3x", kFixupNone), + kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12, + "smull", "!0x, !1w, !2w", kFixupNone), ENCODING_MAP(kA64Smulh3xxx, NO_VARIANTS(0x9b407c00), kFmtRegX, 4, 0, kFmtRegX, 9, 5, kFmtRegX, 20, 16, kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12, @@ -646,20 +655,33 @@ void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) } } +const ArmEncodingMap* Arm64Mir2Lir::GetEncoder(int opcode) { + const ArmEncodingMap* encoder = &EncodingMap[opcode]; + return encoder; +} + /* Nop, used for aligning code. Nop is an alias for hint #0. */ #define PADDING_NOP (UINT32_C(0xd503201f)) +uint32_t Arm64Mir2Lir::ProcessMoreEncodings(const ArmEncodingMap *encoder, + int i, uint32_t operand) { + LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind; + uint32_t value = 0; + return value; +} + uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) { for (; lir != nullptr; lir = NEXT_LIR(lir)) { bool opcode_is_wide = IS_WIDE(lir->opcode); ArmOpcode opcode = UNWIDE(lir->opcode); + bool extendedOpcode = false; if (UNLIKELY(IsPseudoLirOp(opcode))) { continue; } if (LIKELY(!lir->flags.is_nop)) { - const ArmEncodingMap *encoder = &EncodingMap[opcode]; + const ArmEncodingMap *encoder = GetEncoder(opcode); // Select the right variant of the skeleton. uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton; @@ -788,8 +810,9 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) { bits |= value; break; default: - LOG(FATAL) << "Bad fmt for arg. " << i << " in " << encoder->name - << " (" << kind << ")"; + bits |= ProcessMoreEncodings(encoder, i, operand); + extendedOpcode = true; + break; } } } @@ -810,6 +833,20 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) { // are better set directly from the code (they will require no more than 2 instructions). #define ALIGNED_DATA_OFFSET(offset) (((offset) + 0x7) & ~0x7) +/* + * Get the LIR which emits the instruction preceding the given LIR. + * Returns nullptr, if no previous emitting insn found. + */ +static LIR* GetPrevEmittingLIR(LIR* lir) { + DCHECK(lir != nullptr); + LIR* prev_lir = lir->prev; + while ((prev_lir != nullptr) && + (prev_lir->flags.is_nop || Mir2Lir::IsPseudoLirOp(prev_lir->opcode))) { + prev_lir = prev_lir->prev; + } + return prev_lir; +} + // Assemble the LIR into binary instruction format. void Arm64Mir2Lir::AssembleLIR() { LIR* lir; @@ -902,8 +939,35 @@ void Arm64Mir2Lir::AssembleLIR() { lir->operands[1] = delta; break; } + case kFixupA53Erratum835769: + // Avoid emitting code that could trigger Cortex A53's erratum 835769. + // This fixup should be carried out for all multiply-accumulate instructions: madd, msub, + // smaddl, smsubl, umaddl and umsubl. + if (cu_->GetInstructionSetFeatures().NeedFix835769()) { + // Check that this is a 64-bit multiply-accumulate. + if (IS_WIDE(lir->opcode)) { + LIR* prev_insn = GetPrevEmittingLIR(lir); + if (prev_insn == nullptr) { + break; + } + uint64_t prev_insn_flags = EncodingMap[UNWIDE(prev_insn->opcode)].flags; + // Check that the instruction preceding the multiply-accumulate is a load or store. + if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) { + // insert a NOP between the load/store and the multiply-accumulate. + LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL); + new_lir->offset = lir->offset; + new_lir->flags.fixup = kFixupNone; + new_lir->flags.size = EncodingMap[kA64Nop0].size; + InsertLIRBefore(lir, new_lir); + lir->offset += new_lir->flags.size; + offset_adjustment += new_lir->flags.size; + res = kRetryAll; + } + } + } + break; default: - LOG(FATAL) << "Unexpected case " << lir->flags.fixup; + LOG(FATAL) << "Unexpected case: opcode: " << lir->opcode << ", fixup: " << lir->flags.fixup; } prev_lir = lir; lir = lir->u.a.pcrel_next; @@ -953,7 +1017,7 @@ void Arm64Mir2Lir::AssembleLIR() { size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) { ArmOpcode opcode = UNWIDE(lir->opcode); DCHECK(!IsPseudoLirOp(opcode)); - return EncodingMap[opcode].size; + return GetEncoder(opcode)->size; } // Encode instruction bit pattern and assign offsets. @@ -966,8 +1030,8 @@ uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t off if (!lir->flags.is_nop) { if (lir->flags.fixup != kFixupNone) { if (!IsPseudoLirOp(opcode)) { - lir->flags.size = EncodingMap[opcode].size; - lir->flags.fixup = EncodingMap[opcode].fixup; + lir->flags.size = GetEncoder(opcode)->size; + lir->flags.fixup = GetEncoder(opcode)->fixup; } else { DCHECK_NE(static_cast(opcode), kPseudoPseudoAlign4); lir->flags.size = 0; diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h index 3e1c18baf4f..390cbb61291 100644 --- a/compiler/dex/quick/arm64/codegen_arm64.h +++ b/compiler/dex/quick/arm64/codegen_arm64.h @@ -22,8 +22,14 @@ #include -namespace art { +#ifdef QC_STRONG +#define QC_WEAK +#else +#define QC_WEAK __attribute__((weak)) +#endif +namespace art { +class QCArm64Mir2Lir; class Arm64Mir2Lir FINAL : public Mir2Lir { protected: // TODO: consolidate 64-bit target support. @@ -61,6 +67,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { public: Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); + ~Arm64Mir2Lir(); // Required for target - codegen helpers. bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src, @@ -259,7 +266,11 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE; - private: + virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE; + void GenMoreMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) QC_WEAK; + void Cleanup() QC_WEAK; + +private: /** * @brief Given register xNN (dNN), returns register wNN (sNN). * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2). @@ -394,6 +405,18 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { InToRegStorageMapping in_to_reg_storage_mapping_; static const ArmEncodingMap EncodingMap[kA64Last]; + +private: + static uint32_t ProcessMoreEncodings(const ArmEncodingMap* encoder, int i, uint32_t operand) QC_WEAK; + static const ArmEncodingMap* GetEncoder(int opcode) QC_WEAK; + + virtual void ApplyArchOptimizations(LIR* head_lir, LIR* tail_lir, BasicBlock* bb) QC_WEAK; + + void CompilerPostInitializeRegAlloc() QC_WEAK; + void Arm64Mir2LirPostInit(Arm64Mir2Lir* mir_to_lir) QC_WEAK; + + friend class QCArm64Mir2Lir; + QCArm64Mir2Lir* qcm2l; }; } // namespace art diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index 1e97a3246c2..0d3e38ad3e4 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -419,8 +419,7 @@ bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_d rl_src = LoadValue(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); RegStorage r_long_mul = AllocTemp(); - NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(), - r_magic.GetReg(), rl_src.reg.GetReg(), rxzr); + NewLIR3(kA64Smull3xww, As64BitReg(r_long_mul).GetReg(), r_magic.GetReg(), rl_src.reg.GetReg()); switch (pattern) { case Divide3: OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32); @@ -635,7 +634,7 @@ RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegS } OpRegRegReg(kOpDiv, temp, r_src1, r_src2); NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(), - r_src1.GetReg(), r_src2.GetReg()); + r_src2.GetReg(), r_src1.GetReg()); FreeTemp(temp); } return rl_result; @@ -1116,11 +1115,6 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); } - // If index is constant, just fold it into the data offset - if (constant_index) { - data_offset += mir_graph_->ConstantValue(rl_index) << scale; - } - /* null object? */ GenNullCheck(rl_array.reg, opt_flags); @@ -1134,43 +1128,23 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } else { ForceImplicitNullCheck(rl_array.reg, opt_flags); } - if (rl_dest.wide || rl_dest.fp || constant_index) { - RegStorage reg_ptr; - if (constant_index) { - reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case. - } else { - // No special indexed operation, lea + load w/ displacement - reg_ptr = AllocTempRef(); - OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg), - EncodeShift(kA64Lsl, scale)); - FreeTemp(rl_index.reg); - } + if (constant_index) { rl_result = EvalLoc(rl_dest, reg_class, true); if (needs_range_check) { - if (constant_index) { - GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); - } else { - GenArrayBoundsCheck(rl_index.reg, reg_len); - } + GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); FreeTemp(reg_len); } + // Fold the constant index into the data offset. + data_offset += mir_graph_->ConstantValue(rl_index) << scale; if (rl_result.ref) { - LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile); + LoadRefDisp(rl_array.reg, data_offset, rl_result.reg, kNotVolatile); } else { - LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile); + LoadBaseDisp(rl_array.reg, data_offset, rl_result.reg, size, kNotVolatile); } MarkPossibleNullPointerException(opt_flags); - if (!constant_index) { - FreeTemp(reg_ptr); - } - if (rl_dest.wide) { - StoreValueWide(rl_dest, rl_result); - } else { - StoreValue(rl_dest, rl_result); - } } else { - // Offset base, then use indexed load + // Offset base, then use indexed load. RegStorage reg_ptr = AllocTempRef(); OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); FreeTemp(rl_array.reg); @@ -1181,12 +1155,16 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, FreeTemp(reg_len); } if (rl_result.ref) { - LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale); + LoadRefIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale); } else { - LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size); + LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size); } MarkPossibleNullPointerException(opt_flags); FreeTemp(reg_ptr); + } + if (rl_dest.wide) { + StoreValueWide(rl_dest, rl_result); + } else { StoreValue(rl_dest, rl_result); } } @@ -1208,11 +1186,6 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); } - // If index is constant, just fold it into the data offset. - if (constant_index) { - data_offset += mir_graph_->ConstantValue(rl_index) << scale; - } - rl_array = LoadValue(rl_array, kRefReg); if (!constant_index) { rl_index = LoadValue(rl_index, kCoreReg); @@ -1245,24 +1218,18 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, ForceImplicitNullCheck(rl_array.reg, opt_flags); } /* at this point, reg_ptr points to array, 2 live temps */ - if (rl_src.wide || rl_src.fp || constant_index) { - if (rl_src.wide) { - rl_src = LoadValueWide(rl_src, reg_class); - } else { - rl_src = LoadValue(rl_src, reg_class); - } - if (!constant_index) { - OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg), - EncodeShift(kA64Lsl, scale)); - } + if (rl_src.wide) { + rl_src = LoadValueWide(rl_src, reg_class); + } else { + rl_src = LoadValue(rl_src, reg_class); + } + if (constant_index) { if (needs_range_check) { - if (constant_index) { - GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); - } else { - GenArrayBoundsCheck(rl_index.reg, reg_len); - } + GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); FreeTemp(reg_len); } + // Fold the constant index into the data offset. + data_offset += mir_graph_->ConstantValue(rl_index) << scale; if (rl_src.ref) { StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile); } else { @@ -1272,15 +1239,14 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, } else { /* reg_ptr -> array data */ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); - rl_src = LoadValue(rl_src, reg_class); if (needs_range_check) { GenArrayBoundsCheck(rl_index.reg, reg_len); FreeTemp(reg_len); } if (rl_src.ref) { - StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale); + StoreRefIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale); } else { - StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size); + StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size); } MarkPossibleNullPointerException(opt_flags); } diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc index 16a1078553d..180062ce77e 100644 --- a/compiler/dex/quick/arm64/target_arm64.cc +++ b/compiler/dex/quick/arm64/target_arm64.cc @@ -138,6 +138,11 @@ RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) { return res_reg; } +void Arm64Mir2Lir::CompilerPostInitializeRegAlloc() +{ + //nothing here +} + /* * Decode the register id. This routine makes assumptions on the encoding made by RegStorage. */ @@ -587,6 +592,21 @@ Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAlloca << static_cast(Arm64Mir2Lir::EncodingMap[i].opcode); } } + + qcm2l = nullptr; + Arm64Mir2LirPostInit(this); +} + +void Arm64Mir2Lir::Cleanup() +{ +} + +Arm64Mir2Lir::~Arm64Mir2Lir() +{ + Cleanup(); +} + +void Arm64Mir2Lir::Arm64Mir2LirPostInit(Arm64Mir2Lir* mir_to_lir) { } Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, @@ -633,6 +653,8 @@ void Arm64Mir2Lir::CompilerInitializeRegAlloc() { reg_pool_->next_core_reg_ = 2; reg_pool_->next_sp_reg_ = 0; reg_pool_->next_dp_reg_ = 0; + + CompilerPostInitializeRegAlloc(); } /* @@ -773,17 +795,17 @@ LIR* Arm64Mir2Lir::CheckSuspendUsingLoad() { uint64_t Arm64Mir2Lir::GetTargetInstFlags(int opcode) { DCHECK(!IsPseudoLirOp(opcode)); - return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].flags; + return GetEncoder(UNWIDE(opcode))->flags; } const char* Arm64Mir2Lir::GetTargetInstName(int opcode) { DCHECK(!IsPseudoLirOp(opcode)); - return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].name; + return GetEncoder(UNWIDE(opcode))->name; } const char* Arm64Mir2Lir::GetTargetInstFmt(int opcode) { DCHECK(!IsPseudoLirOp(opcode)); - return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].fmt; + return GetEncoder(UNWIDE(opcode))->fmt; } RegStorage Arm64Mir2Lir::InToRegStorageArm64Mapper::GetNextReg(bool is_double_or_float, @@ -1200,4 +1222,14 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, return call_state; } +void Arm64Mir2Lir::GenMoreMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { +} + +void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { + GenMoreMachineSpecificExtendedMethodMIR(bb, mir); +} + +void Arm64Mir2Lir::ApplyArchOptimizations(LIR* head_lir, LIR* tail_lir, BasicBlock* bb) { +} + } // namespace art diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc index f58f83070be..ab3ad03af41 100644 --- a/compiler/dex/quick/arm64/utility_arm64.cc +++ b/compiler/dex/quick/arm64/utility_arm64.cc @@ -91,7 +91,7 @@ size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) { bool opcode_is_wide = IS_WIDE(lir->opcode); ArmOpcode opcode = UNWIDE(lir->opcode); DCHECK(!IsPseudoLirOp(opcode)); - const ArmEncodingMap *encoder = &EncodingMap[opcode]; + const ArmEncodingMap *encoder = GetEncoder(opcode); uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton; return (bits >> 30); } @@ -617,11 +617,11 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r } DCHECK(!IsPseudoLirOp(opcode)); - if (EncodingMap[opcode].flags & IS_BINARY_OP) { + if (GetEncoder(opcode)->flags & IS_BINARY_OP) { DCHECK_EQ(shift, ENCODE_NO_SHIFT); return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg()); - } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) { - ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind; + } else if (GetEncoder(opcode)->flags & IS_TERTIARY_OP) { + ArmEncodingKind kind = GetEncoder(opcode)->field_loc[2].kind; if (kind == kFmtShift) { return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift); } @@ -654,8 +654,8 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage } DCHECK(!IsPseudoLirOp(opcode)); - if (EncodingMap[opcode].flags & IS_TERTIARY_OP) { - ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind; + if (GetEncoder(opcode)->flags & IS_TERTIARY_OP) { + ArmEncodingKind kind = GetEncoder(opcode)->field_loc[2].kind; if (kind == kFmtExtend) { return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), EncodeExtend(ext, amount)); @@ -750,11 +750,11 @@ LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_s ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode; CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit()); CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit()); - if (EncodingMap[opcode].flags & IS_QUAD_OP) { + if (GetEncoder(opcode)->flags & IS_QUAD_OP) { DCHECK(!IsExtendEncoding(shift)); return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift); } else { - DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP); + DCHECK(GetEncoder(opcode)->flags & IS_TERTIARY_OP); DCHECK_EQ(shift, ENCODE_NO_SHIFT); return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg()); } @@ -924,7 +924,7 @@ LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1 r_scratch = AllocTemp(); LoadConstant(r_scratch, value); } - if (EncodingMap[alt_opcode].flags & IS_QUAD_OP) + if (GetEncoder(alt_opcode)->flags & IS_QUAD_OP) res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info); else res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg()); @@ -998,7 +998,7 @@ LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) if (UNLIKELY(neg)) opcode = neg_opcode; - if (EncodingMap[opcode].flags & IS_QUAD_OP) + if (GetEncoder(opcode)->flags & IS_QUAD_OP) return NewLIR4(opcode | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), abs_value, (shift) ? 1 : 0); else @@ -1092,7 +1092,7 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto if (UNLIKELY(expected_scale == 0)) { // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale. - DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U); + DCHECK_NE(GetEncoder(UNWIDE(opcode))->flags & IS_TERTIARY_OP, 0U); DCHECK_EQ(scale, 0); load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg()); } else { @@ -1173,7 +1173,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt if (UNLIKELY(expected_scale == 0)) { // This is a tertiary op (e.g. strb), it does not not support scale. - DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U); + DCHECK_NE(GetEncoder(UNWIDE(opcode))->flags & IS_TERTIARY_OP, 0U); DCHECK_EQ(scale, 0); store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg()); } else { @@ -1259,7 +1259,9 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor // TODO: cleaner support for index/displacement registers? Not a reference, but must match width. RegStorage r_scratch = AllocTempWide(); LoadConstantWide(r_scratch, displacement); - load = LoadBaseIndexed(r_base, r_scratch, r_dest, 0, size); + load = LoadBaseIndexed(r_base, r_scratch, + (size == kReference) ? As64BitReg(r_dest) : r_dest, + 0, size); FreeTemp(r_scratch); } @@ -1350,7 +1352,9 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto // Use long sequence. RegStorage r_scratch = AllocTempWide(); LoadConstantWide(r_scratch, displacement); - store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size); + store = StoreBaseIndexed(r_base, r_scratch, + (size == kReference) ? As64BitReg(r_src) : r_src, + 0, size); FreeTemp(r_scratch); } diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index cb5fbb3d0c9..0a18b835530 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1056,6 +1056,8 @@ void Mir2Lir::Materialize() { CodegenDump(); } } + + Cleanup(); } CompiledMethod* Mir2Lir::GetCompiledMethod() { diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index d451401af0b..48bf803d18e 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -1419,7 +1419,8 @@ void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2) { + RegLocation rl_src1, RegLocation rl_src2, + int opt_flags) { DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); OpKind op = kOpBkpt; bool is_div_rem = false; @@ -1450,14 +1451,14 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, break; case Instruction::DIV_INT: case Instruction::DIV_INT_2ADDR: - check_zero = true; + check_zero = (opt_flags & MIR_IGNORE_ZERO_DIV_CHECK) ? false : true; op = kOpDiv; is_div_rem = true; break; /* NOTE: returns in kArg1 */ case Instruction::REM_INT: case Instruction::REM_INT_2ADDR: - check_zero = true; + check_zero = (opt_flags & MIR_IGNORE_ZERO_DIV_CHECK) ? false : true; op = kOpRem; is_div_rem = true; break; diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h index 2e4e2921bd2..941dd7f33d0 100644 --- a/compiler/dex/quick/mir_to_lir-inl.h +++ b/compiler/dex/quick/mir_to_lir-inl.h @@ -143,7 +143,9 @@ inline LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1 inline void Mir2Lir::SetupRegMask(ResourceMask* mask, int reg) { DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0); DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg; - *mask = mask->Union(reginfo_map_.Get(reg)->DefUseMask()); + if (reginfo_map_.Get(reg)) { + *mask = mask->Union(reginfo_map_.Get(reg)->DefUseMask()); + } } /* diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 6e0fe02af55..35f3deeda92 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -879,7 +879,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list case Instruction::NEG_INT: case Instruction::NOT_INT: - GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0]); + GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0], opt_flags); break; case Instruction::NEG_LONG: @@ -944,7 +944,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1].orig_sreg)); } else { - GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]); + GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags); } break; @@ -964,7 +964,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) { GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1])); } else { - GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]); + GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags); } break; @@ -1188,6 +1188,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { work_half->meta.throw_insn = mir; } + MachineSpecificPreprocessMIR(bb, mir); if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { HandleExtendedMethodMIR(bb, mir); continue; @@ -1199,6 +1200,8 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { if (head_lir) { // Eliminate redundant loads/stores and delay stores into later slots. ApplyLocalOptimizations(head_lir, last_lir_insn_); + // Apply architecture-specific optimizations + ApplyArchOptimizations(head_lir, last_lir_insn_, bb); } return false; } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index bfd78606841..e13dd09d2be 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -149,6 +149,7 @@ class DexFileMethodInliner; class MIRGraph; class Mir2Lir; + typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, const MethodReference& target_method, uint32_t method_idx, uintptr_t direct_code, @@ -348,6 +349,8 @@ class Mir2Lir : public Backend { static const uint32_t kLowSingleStorageMask = 0x00000001; static const uint32_t kHighSingleStorageMask = 0x00000002; static const uint32_t k64SoloStorageMask = 0x00000003; + static const uint32_t kLowDoubleStorageMask = 0x00000003; + static const uint32_t kHighDoubleStorageMask = 0x0000000c; static const uint32_t k128SoloStorageMask = 0x0000000f; static const uint32_t k256SoloStorageMask = 0x000000ff; static const uint32_t k512SoloStorageMask = 0x0000ffff; @@ -723,11 +726,13 @@ class Mir2Lir : public Backend { void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); + virtual void ApplyArchOptimizations(LIR* head_lir, LIR* tail_lir, BasicBlock* bb) { return; }; // Shared by all targets - implemented in ralloc_util.cc int GetSRegHi(int lowSreg); bool LiveOut(int s_reg); void SimpleRegAlloc(); + void Cleanup(); void ResetRegPool(); void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num); void DumpRegPool(GrowableArray* regs); @@ -783,6 +788,8 @@ class Mir2Lir : public Backend { void MarkClean(RegLocation loc); void MarkDirty(RegLocation loc); void MarkInUse(RegStorage reg); + void MarkFree(RegStorage reg); + void MarkDead(RegStorage reg); bool CheckCorePoolSanity(); virtual RegLocation UpdateLoc(RegLocation loc); virtual RegLocation UpdateLocWide(RegLocation loc); @@ -880,7 +887,7 @@ class Mir2Lir : public Backend { // This will be overridden by x86 implementation. virtual void GenConstWide(RegLocation rl_dest, int64_t value); virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_src1, RegLocation rl_src2); + RegLocation rl_src1, RegLocation rl_src2, int opt_flags); // Shared by all targets - implemented in gen_invoke.cc. LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc, @@ -980,6 +987,7 @@ class Mir2Lir : public Backend { bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile); bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, bool is_volatile, bool is_ordered); + virtual int LoadArgRegs(CallInfo* info, int call_state, NextCallInsn next_call_insn, const MethodReference& target_method, @@ -1331,6 +1339,9 @@ class Mir2Lir : public Backend { */ virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir); + /* non virtual so it doesn't have to be implemented */ + virtual void MachineSpecificPreprocessMIR(BasicBlock* bb, MIR* mir) { }; + /** * @brief Lowers the kMirOpSelect MIR into LIR. * @param bb The basic block in which the MIR is from. @@ -1395,6 +1406,9 @@ class Mir2Lir : public Backend { virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0; virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0; virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0; + virtual LIR* OpBkpt() { // not abstract so it doesn't have to be implemeted for other platforms + return NULL; + }; virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0; virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0; diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index bed86d86a3c..eb3fbc7ba78 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -944,6 +944,24 @@ void Mir2Lir::MarkInUse(RegStorage reg) { } } +void Mir2Lir::MarkFree(RegStorage reg) { + if (reg.IsPair()) { + GetRegInfo(reg.GetLow())->MarkFree(); + GetRegInfo(reg.GetHigh())->MarkFree(); + } else { + GetRegInfo(reg)->MarkFree(); + } +} + +void Mir2Lir::MarkDead(RegStorage reg) { + if (reg.IsPair()) { + GetRegInfo(reg.GetLow())->MarkDead(); + GetRegInfo(reg.GetHigh())->MarkDead(); + } else { + GetRegInfo(reg)->MarkDead(); + } +} + bool Mir2Lir::CheckCorePoolSanity() { GrowableArray::Iterator it(&tempreg_info_); for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { @@ -1407,6 +1425,9 @@ void Mir2Lir::SimpleRegAlloc() { frame_size_ = ComputeFrameSize(); } +void Mir2Lir::Cleanup() { +} + /* * Get the "real" sreg number associated with an s_reg slot. In general, * s_reg values passed through codegen are the SSA names created by diff --git a/compiler/dex/quick/resource_mask.h b/compiler/dex/quick/resource_mask.h index 436cdb54404..48e2555e58a 100644 --- a/compiler/dex/quick/resource_mask.h +++ b/compiler/dex/quick/resource_mask.h @@ -85,6 +85,14 @@ class ResourceMask { start_bit >= 64u ? UINT64_C(3) << (start_bit - 64u) : 0u); } + // Four consecutive bits. The start_bit must be even. + static constexpr ResourceMask FourBits(size_t start_bit) { + return + DCHECK_CONSTEXPR((start_bit & 1u) == 0u, << start_bit << " isn't even", Bit(0)) + ResourceMask(start_bit >= 64u ? 0u : UINT64_C(0xf) << start_bit, + start_bit >= 64u ? UINT64_C(0xf) << (start_bit - 64u) : 0u); + } + static constexpr ResourceMask NoBits() { return ResourceMask(UINT64_C(0), UINT64_C(0)); } diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index d74caae0c21..4755b6aff53 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -305,7 +305,8 @@ class X86Mir2Lir : public Mir2Lir { * @param rl_rhs Right hand operand. */ void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs, - RegLocation rl_rhs) OVERRIDE; + RegLocation rl_rhs, int opt_flags) OVERRIDE; + /* * @brief Load the Method* of a dex method into the register. diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index baa72a0e9c5..7d20ca92e5f 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -2690,7 +2690,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, } void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_lhs, RegLocation rl_rhs) { + RegLocation rl_lhs, RegLocation rl_rhs, int opt_flags) { OpKind op = kOpBkpt; bool is_div_rem = false; bool unary = false; diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h index 706933a1b47..6178bee5700 100644 --- a/compiler/dex/reg_storage.h +++ b/compiler/dex/reg_storage.h @@ -158,6 +158,10 @@ class RegStorage { return ((reg_ & kShapeMask) == k64BitSolo); } + constexpr bool Is128BitSolo() const { + return ((reg_ & kShapeMask) == k128BitSolo); + } + constexpr bool IsPair() const { return ((reg_ & kShapeMask) == k64BitPair); } @@ -174,6 +178,12 @@ class RegStorage { (reg_ & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits); } + constexpr bool IsQuad() const { + return + DCHECK_CONSTEXPR(Valid(), , false) + (reg_ & (kFloatingPoint | k128BitSolo)) == (kFloatingPoint | k128BitSolo); + } + constexpr bool IsSingle() const { return DCHECK_CONSTEXPR(Valid(), , false) @@ -188,6 +198,10 @@ class RegStorage { return (reg & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits); } + static constexpr bool IsQuad(uint16_t reg) { + return (reg & (kFloatingPoint | k128BitSolo)) == (kFloatingPoint | k128BitSolo); + } + static constexpr bool IsSingle(uint16_t reg) { return (reg & (kFloatingPoint | k64BitMask)) == kFloatingPoint; } @@ -229,24 +243,60 @@ class RegStorage { return ((reg_ & kRegTypeMask) | k32BitSolo); } + // Retrieve the low register num of a pair + int GetLowRegNum() const { + DCHECK(IsPair()); + return (reg_ & kRegNumMask); + } + // Create a stand-alone RegStorage from the low reg of a pair. RegStorage GetLow() const { DCHECK(IsPair()); return RegStorage(k32BitSolo, reg_ & kRegTypeMask); } + // Create a stand-alone RegStorage from the low 32bit of 64bit float solo. + RegStorage GetLowFromFloatSolo64() const { + DCHECK(IsFloat() && Is64BitSolo()); + return RegStorage(k32BitSolo, ((reg_ & kRegNumMask) << 1) | kFloatingPoint); + } + + // Create a stand-alone RegStorage from the low 64bit of 128bit float solo. + RegStorage GetLowFromFloatSolo128() const { + DCHECK(IsFloat() && Is128BitSolo()); + return RegStorage(k64BitSolo, ((reg_ & kRegNumMask) << 1) | kFloatingPoint); + } + // Retrieve the most significant register of a pair. int GetHighReg() const { DCHECK(IsPair()); return k32BitSolo | ((reg_ & kHighRegMask) >> kHighRegShift) | (reg_ & kFloatingPoint); } + // Retrieve the high register num of a pair. + int GetHighRegNum() const { + DCHECK(IsPair()); + return ((reg_ & kHighRegMask) >> kHighRegShift); + } + // Create a stand-alone RegStorage from the high reg of a pair. RegStorage GetHigh() const { DCHECK(IsPair()); return RegStorage(kValid | GetHighReg()); } + // Create a stand-alone RegStorage from the high 32bit of 64bit float solo. + RegStorage GetHighFromFloatSolo64() const { + DCHECK(IsFloat() && Is64BitSolo()); + return RegStorage(k32BitSolo, (((reg_ & kRegNumMask) << 1) +1) | kFloatingPoint); + } + + // Create a stand-alone RegStorage from the high 64bit of 128bit float solo. + RegStorage GetHighFromFloatSolo128() const { + DCHECK(IsFloat() && Is128BitSolo()); + return RegStorage(k64BitSolo, (((reg_ & kRegNumMask) << 1) +1) | kFloatingPoint); + } + void SetHighReg(int reg) { DCHECK(IsPair()); reg_ = (reg_ & ~kHighRegMask) | ((reg & kHighRegNumMask) << kHighRegShift); @@ -309,6 +359,11 @@ class RegStorage { return RegStorage(k64BitSolo, (reg_num & kRegNumMask) | kFloatingPoint); } + // Create a floating point 128-bit solo. + static RegStorage FloatSolo128(int reg_num) { + return RegStorage(k128BitSolo, (reg_num & kRegNumMask) | kFloatingPoint); + } + static constexpr RegStorage InvalidReg() { return RegStorage(kInvalid); } diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h index 4e51b0e150b..9b20b58c419 100644 --- a/compiler/driver/compiler_driver-inl.h +++ b/compiler/driver/compiler_driver-inl.h @@ -97,6 +97,15 @@ inline std::pair CompilerDriver::IsFastInstanceField( mirror::ArtField* resolved_field, uint16_t field_idx) { DCHECK(!resolved_field->IsStatic()); mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + // Keep these classes in sync with prepareSubclassReplacement() calls in libxposed-art. + mirror::Class* super_class = fields_class->GetSuperClass(); + while (super_class != nullptr) { + if (super_class->DescriptorEquals("Landroid/content/res/TypedArray;")) { + VLOG(compiler) << "Preventing fast access to " << PrettyField(resolved_field); + return std::make_pair(false, false); + } + super_class = super_class->GetSuperClass(); + } bool fast_get = referrer_class != nullptr && referrer_class->CanAccessResolvedField(fields_class, resolved_field, dex_cache, field_idx); diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index b99aca17a50..1e32c2e9a13 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1184,6 +1184,9 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType // TODO: support patching on all architectures. use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_); } + // Direct branching to the method's code offset means that Xposed hooks are not considered + // So we always need to go through the dex cache/ArtMethod + use_dex_cache = true; mirror::Class* declaring_class = method->GetDeclaringClass(); bool method_code_in_boot = (declaring_class->GetClassLoader() == nullptr); if (!use_dex_cache) { diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index ed1175b48bc..9be294bfc90 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -168,6 +168,7 @@ class JniCompilerTest : public CommonCompilerTest { void CheckParameterAlignImpl(); void MaxParamNumberImpl(); void WithoutImplementationImpl(); + void WithoutImplementationRefReturnImpl(); void StackArgsIntsFirstImpl(); void StackArgsFloatsFirstImpl(); void StackArgsMixedImpl(); @@ -1530,6 +1531,20 @@ void JniCompilerTest::WithoutImplementationImpl() { JNI_TEST(WithoutImplementation) +void JniCompilerTest::WithoutImplementationRefReturnImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(false, "withoutImplementationRefReturn", "()Ljava/lang/Object;", nullptr); + + env_->CallObjectMethod(jobj_, jmethod_); + + EXPECT_TRUE(Thread::Current()->IsExceptionPending()); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); +} + +JNI_TEST(WithoutImplementationRefReturn) + void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv* env, jclass klass, jint i1, jint i2, jint i3, jint i4, jint i5, jint i6, jint i7, jint i8, jint i9, jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4, diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h index a1a3312576e..e93ee866053 100644 --- a/compiler/utils/growable_array.h +++ b/compiler/utils/growable_array.h @@ -166,6 +166,9 @@ class GrowableArray { } // We should either have found the element, or it was the last (unscanned) element. DCHECK(found || (element == elem_list_[num_used_ - 1])); + // if element is not in array, don't touch anything + if(!found && element != elem_list_[num_used_ - 1]) + return; num_used_--; }; diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc index 67794c8e055..e31072b5295 100644 --- a/dalvikvm/dalvikvm.cc +++ b/dalvikvm/dalvikvm.cc @@ -168,7 +168,6 @@ static int dalvikvm(int argc, char** argv) { init_args.version = JNI_VERSION_1_6; init_args.options = options.get(); init_args.nOptions = curr_opt; - init_args.ignoreUnrecognized = JNI_FALSE; // Start the runtime. The current thread becomes the main thread. JavaVM* vm = NULL; diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk index 28db7115d1e..c24f6d6a3b1 100644 --- a/dex2oat/Android.mk +++ b/dex2oat/Android.mk @@ -35,11 +35,3 @@ endif ifeq ($(ART_BUILD_TARGET_DEBUG),true) $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libcutils libartd-compiler,art/compiler,target,debug,$(dex2oat_arch))) endif - -# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target. -ifeq ($(ART_BUILD_NDEBUG),true) - $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug)) -endif -ifeq ($(ART_BUILD_DEBUG),true) - $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug)) -endif diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index c41147265f9..0bfe0e800e3 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -70,6 +70,8 @@ namespace art { static int original_argc; static char** original_argv; +static uint32_t original_oat_checksum = 0; +static bool is_recompiling = false; static std::string CommandLine() { std::vector command; @@ -504,6 +506,10 @@ class Dex2Oat { return true; } + void SetRuntimeRecompiling(bool new_value) { + runtime_->SetRecompiling(new_value); + } + private: explicit Dex2Oat(const CompilerOptions* compiler_options, Compiler::Kind compiler_kind, @@ -604,7 +610,40 @@ static size_t OpenDexFiles(const std::vector& dex_filenames, LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'"; continue; } - if (!DexFile::Open(dex_filename, dex_location, &error_msg, &dex_files)) { + if (EndsWith(dex_filename, ".oat") || EndsWith(dex_filename, ".odex")) { + std::unique_ptr file(OS::OpenFileForReading(dex_filename)); + if (file.get() == nullptr) { + LOG(WARNING) << "Failed to open file '" << dex_filename << "': " << strerror(errno);; + ++failure_count; + continue; + } + std::unique_ptr elf_file(ElfFile::Open(file.release(), PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg)); + if (elf_file.get() == nullptr) { + LOG(WARNING) << "Failed to open ELF file from '" << dex_filename << "': " << error_msg; + ++failure_count; + continue; + } + const OatFile* oat_file = OatFile::OpenWithElfFile(elf_file.release(), dex_filename, &error_msg); + if (oat_file == nullptr) { + LOG(WARNING) << "Failed to open oat file from '" << dex_filename << "': " << error_msg; + ++failure_count; + continue; + } else { + for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) { + CHECK(oat_dex_file != nullptr); + std::unique_ptr dex_file(oat_dex_file->OpenDexFile(&error_msg)); + if (dex_file.get() != nullptr) { + dex_files.push_back(dex_file.release()); + } else { + LOG(WARNING) << "Failed to open dex file '" << oat_dex_file->GetDexFileLocation() + << "' from file '" << dex_filename << "': " << error_msg; + ++failure_count; + } + } + } + is_recompiling = true; + original_oat_checksum = oat_file->GetOatHeader().GetChecksum(); + } else if (!DexFile::Open(dex_filename, dex_location, &error_msg, &dex_files)) { LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg; ++failure_count; } @@ -751,6 +790,12 @@ static InstructionSetFeatures ParseFeatureList(std::string str) { } else if (feature == "nolpae") { // Turn off support for Large Physical Address Extension. result.SetHasLpae(false); + } else if (feature == "needfix_835769") { + // need fix CortexA53 errata 835769 + result.SetFix835769(true); + } else if (feature == "noneedfix_835769") { + // no need fix CortexA53 errata 835769 + result.SetFix835769(false); } else { Usage("Unknown instruction set feature: '%s'", feature.c_str()); } @@ -853,6 +898,7 @@ static int dex2oat(int argc, char** argv) { std::vector dex_locations; int zip_fd = -1; std::string zip_location; + std::string odex_filename; std::string oat_filename; std::string oat_symbols; std::string oat_location; @@ -1104,8 +1150,6 @@ static int dex2oat(int argc, char** argv) { if (swap_fd < 0) { Usage("--swap-fd passed a negative value %d", swap_fd); } - } else { - Usage("Unknown argument %s", option.data()); } } @@ -1377,6 +1421,10 @@ static int dex2oat(int argc, char** argv) { } std::unique_ptr dex2oat(p_dex2oat); + if (is_recompiling) { + dex2oat->SetRuntimeRecompiling(true); + } + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, // give it away now so that we don't starve GC. Thread* self = Thread::Current(); @@ -1435,6 +1483,16 @@ static int dex2oat(int argc, char** argv) { if (boot_image_option.empty()) { dex_files = Runtime::Current()->GetClassLinker()->GetBootClassPath(); } else { + if (dex_filenames.empty()) { + odex_filename = DexFilenameToOdexFilename(zip_location, instruction_set); + if (OS::FileExists(odex_filename.c_str())) { + LOG(INFO) << "Using '" << odex_filename << "' instead of file descriptor"; + dex_filenames.push_back(odex_filename.data()); + dex_locations.push_back(odex_filename.data()); + is_recompiling = true; + dex2oat->SetRuntimeRecompiling(true); + } + } if (dex_filenames.empty()) { ATRACE_BEGIN("Opening zip archive from file descriptor"); std::string error_msg; @@ -1463,6 +1521,9 @@ static int dex2oat(int argc, char** argv) { oat_file->Erase(); return EXIT_FAILURE; } + if (is_recompiling) { + dex2oat->SetRuntimeRecompiling(true); + } } const bool kSaveDexInput = false; @@ -1486,7 +1547,7 @@ static int dex2oat(int argc, char** argv) { } // Ensure opened dex files are writable for dex-to-dex transformations. for (const auto& dex_file : dex_files) { - if (!dex_file->EnableWrite()) { + if (!is_recompiling && !dex_file->EnableWrite()) { PLOG(ERROR) << "Failed to make .dex file writeable '" << dex_file->GetLocation() << "'\n"; } } @@ -1536,6 +1597,10 @@ static int dex2oat(int argc, char** argv) { oss << kRuntimeISA; key_value_store->Put(OatHeader::kDex2OatHostKey, oss.str()); key_value_store->Put(OatHeader::kPicKey, compile_pic ? "true" : "false"); + key_value_store->Put(OatHeader::kXposedOatVersionKey, OatHeader::kXposedOatCurrentVersion); + if (image && original_oat_checksum != 0) { + key_value_store->Put(OatHeader::kOriginalOatChecksumKey, StringPrintf("0x%08x", original_oat_checksum)); + } } std::unique_ptr compiler(dex2oat->CreateOatFile(boot_image_option, diff --git a/disassembler/Android.mk b/disassembler/Android.mk index a0abc9e1f06..e87f818f09d 100644 --- a/disassembler/Android.mk +++ b/disassembler/Android.mk @@ -98,10 +98,3 @@ endif ifeq ($(ART_BUILD_TARGET_DEBUG),true) $(eval $(call build-libart-disassembler,target,debug)) endif -# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target. -ifeq ($(ART_BUILD_NDEBUG),true) - $(eval $(call build-libart-disassembler,host,ndebug)) -endif -ifeq ($(ART_BUILD_DEBUG),true) - $(eval $(call build-libart-disassembler,host,debug)) -endif diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc index 54e77612a37..9ae66e6ee4d 100644 --- a/disassembler/disassembler_arm.cc +++ b/disassembler/disassembler_arm.cc @@ -1536,7 +1536,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) } else if ((op2 >> 3) == 6) { // 0110xxx // Multiply, multiply accumulate, and absolute difference op1 = (instr >> 20) & 0x7; - op2 = (instr >> 4) & 0x2; + op2 = (instr >> 4) & 0x1; ArmRegister Ra(instr, 12); ArmRegister Rn(instr, 16); ArmRegister Rm(instr, 0); diff --git a/oatdump/Android.mk b/oatdump/Android.mk index c35ff857c4a..68e8a2bbbe3 100644 --- a/oatdump/Android.mk +++ b/oatdump/Android.mk @@ -28,13 +28,6 @@ ifeq ($(ART_BUILD_TARGET_DEBUG),true) $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler,art/disassembler,target,debug)) endif -ifeq ($(ART_BUILD_HOST_NDEBUG),true) - $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libart-disassembler,art/disassembler,host,ndebug)) -endif -ifeq ($(ART_BUILD_HOST_DEBUG),true) - $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler,art/disassembler,host,debug)) -endif - ######################################################################## # oatdump targets diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 4734a9c478a..b76b7020a9e 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -102,6 +102,10 @@ static void usage() { " --no-disassemble may be used to disable disassembly.\n" " Example: --no-disassemble\n" "\n"); + fprintf(stderr, + " --method-filter=: only dumps methods that contain the filter.\n" + " Example: --method-filter=foo\n" + "\n"); exit(EXIT_FAILURE); } @@ -123,18 +127,21 @@ class OatDumperOptions { bool dump_raw_gc_map, bool dump_vmap, bool disassemble_code, - bool absolute_addresses) + bool absolute_addresses, + const char* method_filter) : dump_raw_mapping_table_(dump_raw_mapping_table), dump_raw_gc_map_(dump_raw_gc_map), dump_vmap_(dump_vmap), disassemble_code_(disassemble_code), - absolute_addresses_(absolute_addresses) {} + absolute_addresses_(absolute_addresses), + method_filter_(method_filter) {} const bool dump_raw_mapping_table_; const bool dump_raw_gc_map_; const bool dump_vmap_; const bool disassemble_code_; const bool absolute_addresses_; + const char* const method_filter_; }; class OatDumper { @@ -450,8 +457,12 @@ class OatDumper { uint32_t dex_method_idx, const DexFile::CodeItem* code_item, uint32_t method_access_flags) { bool success = true; + std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true); + if (pretty_method.find(options_->method_filter_) == std::string::npos) { + return success; + } os << StringPrintf("%d: %s (dex_method_idx=%d)\n", - class_method_index, PrettyMethod(dex_method_idx, dex_file, true).c_str(), + class_method_index, pretty_method.c_str(), dex_method_idx); Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); std::unique_ptr indent1_os(new std::ostream(&indent1_filter)); @@ -1701,6 +1712,7 @@ static int oatdump(int argc, char** argv) { } const char* oat_filename = nullptr; + const char* method_filter_ = ""; const char* image_location = nullptr; const char* boot_image_location = nullptr; InstructionSet instruction_set = kRuntimeISA; @@ -1749,9 +1761,8 @@ static int oatdump(int argc, char** argv) { usage(); } os = out.get(); - } else { - fprintf(stderr, "Unknown argument %s\n", option.data()); - usage(); + } else if (option.starts_with("--method-filter=")) { + method_filter_ = option.substr(strlen("--method-filter=")).data(); } } @@ -1771,7 +1782,8 @@ static int oatdump(int argc, char** argv) { dump_raw_gc_map, dump_vmap, disassemble_code, - absolute_addresses)); + absolute_addresses, + method_filter_)); MemMap::Init(); if (oat_filename != nullptr) { std::string error_msg; diff --git a/patchoat/Android.mk b/patchoat/Android.mk index 8b6b9ad773b..1f59bbdc6ed 100644 --- a/patchoat/Android.mk +++ b/patchoat/Android.mk @@ -30,16 +30,8 @@ else endif ifeq ($(ART_BUILD_TARGET_NDEBUG),true) - $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),libcutils,art/compiler,target,ndebug,$(patchoat_arch))) + $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),libcutils,art/compiler,target,ndebug,$(patchoat_arch),libz)) endif ifeq ($(ART_BUILD_TARGET_DEBUG),true) - $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),libcutils,art/compiler,target,debug,$(patchoat_arch))) -endif - -# We always build patchoat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target. -ifeq ($(ART_BUILD_NDEBUG),true) - $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),,art/compiler,host,ndebug)) -endif -ifeq ($(ART_BUILD_DEBUG),true) - $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),,art/compiler,host,debug)) + $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),libcutils,art/compiler,target,debug,$(patchoat_arch),libz)) endif diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index b046ea1ef43..23d58b642c1 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -45,6 +45,7 @@ #include "runtime.h" #include "scoped_thread_state_change.h" #include "thread.h" +#include "zlib.h" #include "utils.h" namespace art { @@ -195,7 +196,7 @@ bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t d File* output_oat, File* output_image, InstructionSet isa, TimingLogger* timings, bool output_oat_opened_from_fd, - bool new_oat_out) { + bool new_oat_out, bool input_oat_filename_dummy) { CHECK(Runtime::Current() == nullptr); CHECK(output_image != nullptr); CHECK_GE(output_image->Fd(), 0); @@ -284,11 +285,9 @@ bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t d // Error logged by IsOatPic return false; } else if (is_oat_pic == PIC) { - // Do not need to do ELF-file patching. Create a symlink and skip the ELF patching. - if (!ReplaceOatFileWithSymlink(input_oat->GetPath(), - output_oat->GetPath(), - output_oat_opened_from_fd, - new_oat_out)) { + // Do not need to do ELF-file patching. Create a symlink or make a copy. + if (!SymlinkOrCopy(input_oat, output_oat, output_oat_opened_from_fd, + new_oat_out, input_oat_filename_dummy)) { // Errors already logged by above call. return false; } @@ -398,36 +397,70 @@ PatchOat::MaybePic PatchOat::IsOatPic(const ElfFile* oat_in) { return is_pic ? PIC : NOT_PIC; } -bool PatchOat::ReplaceOatFileWithSymlink(const std::string& input_oat_filename, - const std::string& output_oat_filename, - bool output_oat_opened_from_fd, - bool new_oat_out) { - // Need a file when we are PIC, since we symlink over it. Refusing to symlink into FD. - if (output_oat_opened_from_fd) { - // TODO: installd uses --output-oat-fd. Should we change class linking logic for PIC? - LOG(ERROR) << "No output oat filename specified, needs filename for when we are PIC"; - return false; - } +const size_t COPY_BUFLEN = 16384; + +bool PatchOat::SymlinkOrCopy(File* input_oat, + File* output_oat, + bool output_oat_opened_from_fd, + bool new_oat_out, bool make_copy) { + std::string output_oat_filename = output_oat->GetPath(); + std::string input_oat_filename = input_oat->GetPath(); + + if (make_copy) { + // Make a copy of the PIC oat file. + std::unique_ptr buf(new char[COPY_BUFLEN]); + int64_t len; + int64_t read_size = 0; + while (true) { + len = input_oat->Read(buf.get(), COPY_BUFLEN, read_size); + if (len <= 0) { + break; + } + if (!output_oat->WriteFully(buf.get(), len)) { + len = -1; + break; + } + read_size += len; + } - // Image was PIC. Create symlink where the oat is supposed to go. - if (!new_oat_out) { - LOG(ERROR) << "Oat file " << output_oat_filename << " already exists, refusing to overwrite"; - return false; - } + if (len < 0) { + int err = errno; + LOG(ERROR) << "Failed to copy " << input_oat_filename << " to " << output_oat_filename + << ": error(" << err << "): " << strerror(err); + return false; + } - // Delete the original file, since we won't need it. - TEMP_FAILURE_RETRY(unlink(output_oat_filename.c_str())); + if (kIsDebugBuild) { + LOG(INFO) << "Copied " << input_oat_filename << " -> " << output_oat_filename; + } + } else { + // Need a file when we are PIC, since we symlink over it. Refusing to symlink into FD. + if (output_oat_opened_from_fd) { + // TODO: installd uses --output-oat-fd. Should we change class linking logic for PIC? + LOG(ERROR) << "No output oat filename specified, needs filename for when we are PIC"; + return false; + } - // Create a symlink from the old oat to the new oat - if (symlink(input_oat_filename.c_str(), output_oat_filename.c_str()) < 0) { - int err = errno; - LOG(ERROR) << "Failed to create symlink at " << output_oat_filename - << " error(" << err << "): " << strerror(err); - return false; - } + // Image was PIC. Create symlink where the oat is supposed to go. + if (!new_oat_out) { + LOG(ERROR) << "Oat file " << output_oat_filename << " already exists, refusing to overwrite"; + return false; + } - if (kIsDebugBuild) { - LOG(INFO) << "Created symlink " << output_oat_filename << " -> " << input_oat_filename; + // Delete the original file, since we won't need it. + TEMP_FAILURE_RETRY(unlink(output_oat_filename.c_str())); + + // Create a symlink from the old oat to the new oat. + if (symlink(input_oat_filename.c_str(), output_oat_filename.c_str()) == 0) { + if (kIsDebugBuild) { + LOG(INFO) << "Created symlink " << output_oat_filename << " -> " << input_oat_filename; + } + } else { + int err = errno; + LOG(ERROR) << "Failed to create symlink at " << output_oat_filename + << " error(" << err << "): " << strerror(err); + return false; + } } return true; @@ -561,7 +594,8 @@ void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) { } bool PatchOat::Patch(File* input_oat, off_t delta, File* output_oat, TimingLogger* timings, - bool output_oat_opened_from_fd, bool new_oat_out) { + bool output_oat_opened_from_fd, bool new_oat_out, + bool input_oat_filename_dummy) { CHECK(input_oat != nullptr); CHECK(output_oat != nullptr); CHECK_GE(input_oat->Fd(), 0); @@ -581,12 +615,10 @@ bool PatchOat::Patch(File* input_oat, off_t delta, File* output_oat, TimingLogge // Error logged by IsOatPic return false; } else if (is_oat_pic == PIC) { - // Do not need to do ELF-file patching. Create a symlink and skip the rest. + // Do not need to do ELF-file patching. Create a symlink or make a copy. // Any errors will be logged by the function call. - return ReplaceOatFileWithSymlink(input_oat->GetPath(), - output_oat->GetPath(), - output_oat_opened_from_fd, - new_oat_out); + return SymlinkOrCopy(input_oat, output_oat, output_oat_opened_from_fd, + new_oat_out, input_oat_filename_dummy); } else { CHECK(is_oat_pic == NOT_PIC); } @@ -800,6 +832,16 @@ static void Usage(const char *fmt, ...) { UsageError(" --input-oat-fd=: Specifies the file-descriptor of the oat file"); UsageError(" to be patched."); UsageError(""); + UsageError(" --input-oat-gz-file=: Specifies the exact filename of"); + UsageError(" the gzip-compressed oat file to be patched."); + UsageError(""); + UsageError(" --input-oat-gz-fd=: Specifies the file-descriptor of"); + UsageError(" the gzip-compressed oat file to be patched."); + UsageError(""); + UsageError(" --swap-file=: Specifies a temporary gzip file."); + UsageError(""); + UsageError(" --swap-fd=: Specifies a temporary gzip file descriptor."); + UsageError(""); UsageError(" --input-oat-location=: Specifies the 'location' to read the patched"); UsageError(" oat file from. If used one must also supply the --instruction-set"); UsageError(""); @@ -908,6 +950,67 @@ static bool FinishFile(File* file, bool close) { } } +static int Inflate(int input_oat_gz_fd, const std::string& input_oat_gz_filename, int tmp_fd, std::string* err) { + gzFile in_gzfile = input_oat_gz_fd != -1 ? gzdopen(input_oat_gz_fd, "rb") : + gzopen(input_oat_gz_filename.c_str(), "rb"); + if (in_gzfile == nullptr) { + *err = input_oat_gz_fd != -1 ? + StringPrintf("Could not open gzip file fd=%d: %s", + input_oat_gz_fd, strerror(errno)) : + StringPrintf("Could not open gzip file %s: %s", + input_oat_gz_filename.c_str(), strerror(errno)); + return -1; + } + + int out_fd = dup(tmp_fd); + if (out_fd == -1) { + *err = strerror(errno); + return -1; + } + + constexpr size_t INFLATE_BUFLEN = 16384; + std::unique_ptr out_file(new File(out_fd, false)); + std::unique_ptr buf(new Byte[INFLATE_BUFLEN]); + int len; + + while (0 < (len = gzread(in_gzfile, buf.get(), INFLATE_BUFLEN))) { + if (!out_file->WriteFully(buf.get(), len)) { + *err = StringPrintf("Could not write to fd=%d: %s", tmp_fd, out_file->GetPath().c_str()); + gzclose(in_gzfile); + return -1; + } + } + + int errnum; + const char* gzerrstr = gzerror(in_gzfile, &errnum); + + if (len < 0 || errnum != Z_OK) { + *err = input_oat_gz_fd != -1 ? + StringPrintf("Could not inflate gzip file fd=%d: %s", + input_oat_gz_fd, gzerrstr) : + StringPrintf("Could not inflate gzip file %s: %s", + input_oat_gz_filename.c_str(), gzerrstr); + gzclose(in_gzfile); + return -1; + } + + if ((errnum = gzclose(in_gzfile)) != Z_OK) { + *err = input_oat_gz_fd != -1 ? + StringPrintf("Could not close gzip file fd=%d: gzclose() returned %d", + input_oat_gz_fd, errnum) : + StringPrintf("Could not close gzip file %s: gzclose() returned %d", + input_oat_gz_filename.c_str(), errnum); + } + + if (out_file->Flush() != 0) { + *err = StringPrintf("Could not flush tmp file fd=%d", tmp_fd); + return -1; + } + + out_file->DisableAutoClose(); + return out_fd; +} + static int patchoat(int argc, char **argv) { InitLogging(argv); MemMap::Init(); @@ -932,8 +1035,13 @@ static int patchoat(int argc, char **argv) { bool isa_set = false; InstructionSet isa = kNone; std::string input_oat_filename; + bool input_oat_filename_dummy = false; + std::string input_oat_gz_filename; std::string input_oat_location; int input_oat_fd = -1; + int input_oat_gz_fd = -1; + std::string swap_file_name; + int swap_fd = -1; bool have_input_oat = false; std::string input_image_location; std::string output_oat_filename; @@ -968,19 +1076,29 @@ static int patchoat(int argc, char **argv) { } } else if (option.starts_with("--input-oat-location=")) { if (have_input_oat) { - Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used."); + Usage("Only one of --input-oat-file, --input-oat-gz-file, --input-oat-location, " + "--input-oat-fd and --input-oat-gz-fd may be used."); } have_input_oat = true; input_oat_location = option.substr(strlen("--input-oat-location=")).data(); } else if (option.starts_with("--input-oat-file=")) { if (have_input_oat) { - Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used."); + Usage("Only one of --input-oat-file, --input-oat-gz-file, --input-oat-location, " + "--input-oat-fd and --input-oat-gz-fd may be used."); } have_input_oat = true; input_oat_filename = option.substr(strlen("--input-oat-file=")).data(); + } else if (option.starts_with("--input-oat-gz-file=")) { + if (have_input_oat) { + Usage("Only one of --input-oat-file, --input-oat-gz-file, --input-oat-location, " + "--input-oat-fd and --input-oat-gz-fd may be used."); + } + have_input_oat = true; + input_oat_gz_filename = option.substr(strlen("--input-oat-gz-file=")).data(); } else if (option.starts_with("--input-oat-fd=")) { if (have_input_oat) { - Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used."); + Usage("Only one of --input-oat-file, --input-oat-gz-file, --input-oat-location, " + "--input-oat-fd and --input-oat-gz-fd may be used."); } have_input_oat = true; const char* oat_fd_str = option.substr(strlen("--input-oat-fd=")).data(); @@ -990,6 +1108,29 @@ static int patchoat(int argc, char **argv) { if (input_oat_fd < 0) { Usage("--input-oat-fd pass a negative value %d", input_oat_fd); } + } else if (option.starts_with("--input-oat-gz-fd=")) { + if (have_input_oat) { + Usage("Only one of --input-oat-file, --input-oat-gz-file, --input-oat-location, " + "--input-oat-fd and --input-oat-gz-fd may be used."); + } + have_input_oat = true; + const char* oat_gz_fd_str = option.substr(strlen("--input-oat-gz-fd=")).data(); + if (!ParseInt(oat_gz_fd_str, &input_oat_gz_fd)) { + Usage("Failed to parse --input-oat-fd argument '%s' as an integer", oat_gz_fd_str); + } + if (input_oat_gz_fd < 0) { + Usage("--input-oat-gz-fd pass a negative value %d", input_oat_gz_fd); + } + } else if (option.starts_with("--swap-file=")) { + swap_file_name = option.substr(strlen("--swap-file=")).data(); + } else if (option.starts_with("--swap-fd=")) { + const char* swap_fd_str = option.substr(strlen("--swap-fd=")).data(); + if (!ParseInt(swap_fd_str, &swap_fd)) { + Usage("Failed to parse --swap-fd argument '%s' as an integer", swap_fd_str); + } + if (swap_fd < 0) { + Usage("--swap-fd passed a negative value %d", swap_fd); + } } else if (option.starts_with("--input-image-location=")) { input_image_location = option.substr(strlen("--input-image-location=")).data(); } else if (option.starts_with("--output-oat-file=")) { @@ -1059,8 +1200,6 @@ static int patchoat(int argc, char **argv) { dump_timings = true; } else if (option == "--no-dump-timings") { dump_timings = false; - } else { - Usage("Unknown argument %s", option.data()); } } @@ -1088,6 +1227,11 @@ static int patchoat(int argc, char **argv) { Usage("Either both input and output image must be supplied or niether must be."); } + if ((input_oat_gz_fd != -1 || !input_oat_gz_filename.empty()) != + (swap_fd != -1 || !swap_file_name.empty())) { + Usage("Either both input gzip and swap must be supplied or niether must be."); + } + // We know we have both the input and output so rename for clarity. bool have_image_files = have_output_image; bool have_oat_files = have_output_oat; @@ -1111,6 +1255,26 @@ static int patchoat(int argc, char **argv) { LOG(INFO) << "Using input-oat-file " << input_oat_filename; } } + + // Swap file handling. + // + // If the swap fd is not -1, we assume this is the file descriptor of an open but unlinked file + // that we can use for swap. + // + // If the swap fd is -1 and we have a swap-file string, open the given file as a swap file. We + // will immediately unlink to satisfy the swap fd assumption. + std::unique_ptr swap_file; + if (swap_fd == -1 && !swap_file_name.empty()) { + swap_file.reset(OS::CreateEmptyFile(swap_file_name.c_str())); + if (swap_file.get() == nullptr) { + PLOG(ERROR) << "Failed to create swap file: " << swap_file_name; + return EXIT_FAILURE; + } + swap_fd = swap_file->Fd(); + swap_file->MarkUnchecked(); // We don't want to track this, it will be unlinked immediately. + unlink(swap_file_name.c_str()); + } + if (!patched_image_location.empty()) { if (!isa_set) { Usage("specifying a location requires specifying an instruction set"); @@ -1189,9 +1353,17 @@ static int patchoat(int argc, char **argv) { } if (have_oat_files) { + if (input_oat_gz_fd != -1 || !input_oat_gz_filename.empty()) { + std::string err; + input_oat_fd = Inflate(input_oat_gz_fd, input_oat_gz_filename, swap_fd, &err); + if (input_oat_fd == -1) { + LOG(ERROR) << "Failed to inflate input file: " << err; + } + } if (input_oat_fd != -1) { if (input_oat_filename.empty()) { input_oat_filename = "input-oat-file"; + input_oat_filename_dummy = true; } input_oat.reset(new File(input_oat_fd, input_oat_filename, false)); if (input_oat == nullptr) { @@ -1284,7 +1456,7 @@ static int patchoat(int argc, char **argv) { ret = PatchOat::Patch(input_oat.get(), input_image_location, base_delta, output_oat.get(), output_image.get(), isa, &timings, output_oat_fd >= 0, // was it opened from FD? - new_oat_out); + new_oat_out, input_oat_filename_dummy); // The order here doesn't matter. If the first one is successfully saved and the second one // erased, ImageSpace will still detect a problem and not use the files. ret = ret && FinishFile(output_image.get(), ret); @@ -1293,7 +1465,7 @@ static int patchoat(int argc, char **argv) { TimingLogger::ScopedTiming pt("patch oat", &timings); ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings, output_oat_fd >= 0, // was it opened from FD? - new_oat_out); + new_oat_out, input_oat_filename_dummy); ret = ret && FinishFile(output_oat.get(), ret); } else if (have_image_files) { TimingLogger::ScopedTiming pt("patch image", &timings); diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index 03d915abddb..3a21d8514ff 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -44,7 +44,8 @@ class PatchOat { // Patch only the oat file static bool Patch(File* oat_in, off_t delta, File* oat_out, TimingLogger* timings, bool output_oat_opened_from_fd, // Was this using --oatput-oat-fd ? - bool new_oat_out); // Output oat was a new file created by us? + bool new_oat_out, // Output oat was a new file created by us? + bool input_oat_filename_dummy); // Input cannot be symlinked // Patch only the image (art file) static bool Patch(const std::string& art_location, off_t delta, File* art_out, InstructionSet isa, @@ -55,7 +56,8 @@ class PatchOat { off_t delta, File* oat_out, File* art_out, InstructionSet isa, TimingLogger* timings, bool output_oat_opened_from_fd, // Was this using --oatput-oat-fd ? - bool new_oat_out); // Output oat was a new file created by us? + bool new_oat_out, // Output oat was a new file created by us? + bool input_oat_filename_dummy); // Input cannot be symlinked private: // Takes ownership only of the ElfFile. All other pointers are only borrowed. @@ -87,10 +89,11 @@ class PatchOat { // Attempt to replace the file with a symlink // Returns false if it fails - static bool ReplaceOatFileWithSymlink(const std::string& input_oat_filename, - const std::string& output_oat_filename, - bool output_oat_opened_from_fd, - bool new_oat_out); // Output oat was newly created? + static bool SymlinkOrCopy(File* input_oat, + File* output_oat, + bool output_oat_opened_from_fd, + bool new_oat_out, // Output oat was newly created? + bool make_copy); static void BitmapCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/Android.mk b/runtime/Android.mk index 84f2248ef12..6bd99b27d37 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -128,6 +128,7 @@ LIBART_COMMON_SRC_FILES := \ native/java_util_concurrent_atomic_AtomicLong.cc \ native/org_apache_harmony_dalvik_ddmc_DdmServer.cc \ native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc \ + native/samsung.cc \ native/sun_misc_Unsafe.cc \ oat.cc \ oat_file.cc \ @@ -427,6 +428,9 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT ifeq ($$(art_target_or_host),target) LOCAL_SHARED_LIBRARIES += libcutils libdl libselinux libutils libsigchain LOCAL_STATIC_LIBRARIES := libziparchive libz + ifeq ($(TARGET_HAVE_QC_PERF),true) + LOCAL_WHOLE_STATIC_LIBRARIES += libqc-art + endif else # host LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils LOCAL_SHARED_LIBRARIES += libsigchain @@ -470,15 +474,6 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT art_ndebug_or_debug := endef -# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since -# they are used to cross compile for the target. -ifeq ($(ART_BUILD_NDEBUG),true) - $(eval $(call build-libart,host,ndebug)) -endif -ifeq ($(ART_BUILD_DEBUG),true) - $(eval $(call build-libart,host,debug)) -endif - ifeq ($(ART_BUILD_TARGET_NDEBUG),true) # $(error $(call build-libart,target,ndebug)) $(eval $(call build-libart,target,ndebug)) diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc index 3eb92c85565..0fd8a051c19 100644 --- a/runtime/arch/arm64/context_arm64.cc +++ b/runtime/arch/arm64/context_arm64.cc @@ -154,7 +154,7 @@ void Arm64Context::DoLongJump() { gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : Arm64Context::kBadGprBase + i; } for (size_t i = 0; i < kNumberOfDRegisters; ++i) { - fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : Arm64Context::kBadGprBase + i; + fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : Arm64Context::kBadFprBase + i; } DCHECK_EQ(reinterpret_cast(Thread::Current()), gprs[TR]); art_quick_do_long_jump(gprs, fprs); diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 3c5db50f92d..300a6d63707 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -174,7 +174,7 @@ // Restore xSELF as it might be scratched. mov xSELF, xETR // ETR - ldr xETR, [sp, #16] + ldr xETR, [sp, #32] .cfi_restore x21 add sp, sp, #96 @@ -1060,6 +1060,8 @@ ENTRY art_quick_check_cast .cfi_adjust_cfa_offset -32 ret + .cfi_adjust_cfa_offset 32 // Reset unwind info so following code unwinds. + .Lthrow_class_cast_exception: // Restore ldp x0, x1, [sp] diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc index 789dbbb6d7d..ee472c4e458 100644 --- a/runtime/arch/mips/context_mips.cc +++ b/runtime/arch/mips/context_mips.cc @@ -108,7 +108,7 @@ void MipsContext::DoLongJump() { gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : MipsContext::kBadGprBase + i; } for (size_t i = 0; i < kNumberOfFRegisters; ++i) { - fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : MipsContext::kBadGprBase + i; + fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : MipsContext::kBadFprBase + i; } art_quick_do_long_jump(gprs, fprs); } diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 6a10755d758..6d5b0402719 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -629,6 +629,8 @@ DEFINE_FUNCTION art_quick_check_cast addl LITERAL(12), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-12) ret + + CFI_ADJUST_CFA_OFFSET(12) // Reset unwind info so following code unwinds. 1: POP eax // pop arguments POP ecx diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 0de8dfd8549..81183ef7230 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -930,6 +930,8 @@ DEFINE_FUNCTION art_quick_check_cast CFI_ADJUST_CFA_OFFSET(-16) ret + + CFI_ADJUST_CFA_OFFSET(16 + 4 * 8) // Reset unwind info so following code unwinds. 1: RESTORE_FP_CALLEE_SAVE_FRAME POP rsi // Pop arguments diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc index 1b9022e1705..69abdece757 100644 --- a/runtime/base/bit_vector.cc +++ b/runtime/base/bit_vector.cc @@ -380,6 +380,10 @@ bool BitVector::IsBitSet(const uint32_t* storage, uint32_t num) { return (val != 0); } +#if defined(__clang__) && defined(__ARM_64BIT_STATE) +// b/19180814 When POPCOUNT is inlined, boot up failed on arm64 devices. +__attribute__((optnone)) +#endif uint32_t BitVector::NumSetBits(const uint32_t* storage, uint32_t end) { uint32_t word_end = end >> 5; uint32_t partial_word_bits = end & 0x1f; diff --git a/runtime/base/logging.h b/runtime/base/logging.h index caeb946ff0b..eb91c8a46ed 100644 --- a/runtime/base/logging.h +++ b/runtime/base/logging.h @@ -133,6 +133,7 @@ #endif #define LOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, -1).stream() +#define XLOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity|LOG_XPOSED, -1).stream() #define PLOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, errno).stream() #define LG LOG(INFO) diff --git a/runtime/base/logging_android.cc b/runtime/base/logging_android.cc index 9b1ac58461f..97d48409e79 100644 --- a/runtime/base/logging_android.cc +++ b/runtime/base/logging_android.cc @@ -32,7 +32,12 @@ static const int kLogSeverityToAndroidLogPriority[] = { void LogMessage::LogLine(const LogMessageData& data, const char* message) { const char* tag = ProgramInvocationShortName(); - int priority = kLogSeverityToAndroidLogPriority[data.severity]; + LogSeverity severity = data.severity; + if ((severity & LOG_XPOSED) != 0) { + tag = "Xposed"; + severity &= ~LOG_XPOSED; + } + int priority = kLogSeverityToAndroidLogPriority[severity]; if (priority == ANDROID_LOG_FATAL) { LOG_PRI(priority, tag, "%s:%d] %s", data.file, data.line_number, message); } else { diff --git a/runtime/base/macros.h b/runtime/base/macros.h index fae9271d9e4..b1eab92f0c0 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -151,7 +151,7 @@ char (&ArraySizeHelper(T (&array)[N]))[N]; #define UNLIKELY(x) __builtin_expect((x), false) // Stringify the argument. -#define QUOTE(x) #x +#define QUOTE(x...) #x #define STRINGIFY(x) QUOTE(x) #ifndef NDEBUG diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 59117350c61..5fb30820fa5 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -72,6 +72,10 @@ #include "verifier/method_verifier.h" #include "well_known_classes.h" +#ifdef HAVE_ANDROID_OS +#include "cutils/properties.h" +#endif + namespace art { static void ThrowNoClassDefFoundError(const char* fmt, ...) @@ -620,8 +624,9 @@ bool ClassLinker::GenerateOatFile(const char* dex_filename, } boot_image_option += heap->GetImageSpace()->GetImageLocation(); + std::string odex_filename(DexFilenameToOdexFilename(dex_filename, kRuntimeISA)); std::string dex_file_option("--dex-file="); - dex_file_option += dex_filename; + dex_file_option += OS::FileExists(odex_filename.c_str()) ? odex_filename : dex_filename; std::string oat_fd_option("--oat-fd="); StringAppendF(&oat_fd_option, "%d", oat_fd); @@ -663,6 +668,17 @@ bool ClassLinker::GenerateOatFile(const char* dex_filename, argv.push_back(compiler_options[i].c_str()); } +#ifdef HAVE_ANDROID_OS + const char* propertyName = "ro.sys.fw.dex2oat_thread_count"; + char count[PROPERTY_VALUE_MAX]; + if (property_get(propertyName, count, "") > 0) { + std::string thread_count("-j"); + StringAppendF(&thread_count, "%s", count); + argv.push_back(thread_count); + LOG(INFO) << "Adjust thread count for runtime dex2oat"; + } +#endif + if (!Exec(argv, error_msg)) { // Manually delete the file. Ensures there is no garbage left over if the process unexpectedly // died. Ignore unlink failure, propagate the original error. @@ -778,8 +794,8 @@ static bool LoadMultiDexFilesFromOatFile(const OatFile* oat_file, break; // Not found, done. } - // Checksum test. Test must succeed when generated. - success = !generated; + // Checksum test. + success = true; if (next_location_checksum_pointer != nullptr) { success = next_location_checksum == oat_dex_file->GetDexFileLocationChecksum(); } @@ -926,8 +942,13 @@ bool ClassLinker::OpenDexFilesFromOat(const char* dex_location, const char* oat_ // Need a checksum, fail else. if (!have_checksum) { - error_msgs->push_back(checksum_error_msg); - return false; + std::string odex_filename(DexFilenameToOdexFilename(dex_location, kRuntimeISA)); + if (OS::FileExists(odex_filename.c_str())) { + error_msgs->clear(); + } else { + error_msgs->push_back(checksum_error_msg); + return false; + } } // Look in cache location if no oat_location is given. @@ -1018,6 +1039,11 @@ const OatFile* ClassLinker::FindOatFileInOatLocationForDexFile(const char* dex_l } } + if (!oat_file->GetOatHeader().IsXposedOatVersionValid()) { + *error_msg = StringPrintf("Failed to find oat file at '%s' with expected Xposed oat version", oat_location); + return nullptr; + } + const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, &dex_location_checksum); if (oat_dex_file == nullptr) { @@ -1238,6 +1264,10 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& const OatFile* oat_file = oat_files_[i]; DCHECK(oat_file != nullptr); if (oat_file->GetLocation() == oat_location) { + if (oat_file->IsCreatedInZygote() && !Runtime::Current()->IsZygote() && oat_location.find("/system@") == std::string::npos) { + LOG(INFO) << "Ignoring oat file opened by Zygote: " << oat_location; + continue; + } return oat_file; } } @@ -1556,6 +1586,11 @@ bool ClassLinker::CheckOatFile(const Runtime* runtime, const OatFile* oat_file, patch_delta_verified = true; } + if (!oat_header.IsXposedOatVersionValid()) { + compound_msg += " Oat Image Xposed oat version invalid"; + *checksum_verified = false; + } + bool ret = (*checksum_verified && offset_verified && patch_delta_verified); if (ret) { *error_msg = compound_msg; @@ -2395,7 +2430,7 @@ bool ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod // method for direct methods (or virtual methods made direct). mirror::Class* declaring_class = method->GetDeclaringClass(); size_t oat_method_index; - if (method->IsStatic() || method->IsDirect()) { + if (method->IsStatic() || method->IsDirect(true)) { // Simple case where the oat method index was stashed at load time. oat_method_index = method->GetMethodIndex(); } else { @@ -2586,6 +2621,9 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { portable_code = oat_method.GetPortableCode(); quick_code = oat_method.GetQuickCode(); } + if (UNLIKELY(method->IsXposedHookedMethod())) { + method = method->GetXposedOriginalMethod(); + } const bool enter_interpreter = NeedsInterpreter(method, quick_code, portable_code); bool have_portable_code = false; if (enter_interpreter) { diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 2e23eb8bdad..22af7eeb703 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -1617,10 +1617,32 @@ JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_g size_t direct_method_count = c->NumDirectMethods(); size_t virtual_method_count = c->NumVirtualMethods(); - expandBufAdd4BE(pReply, direct_method_count + virtual_method_count); + size_t xposed_method_count = 0; + for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) { + mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count); + if (UNLIKELY(m->IsXposedHookedMethod())) { + ++xposed_method_count; + } + } + + expandBufAdd4BE(pReply, direct_method_count + virtual_method_count + xposed_method_count); for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) { mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count); + + if (UNLIKELY(xposed_method_count > 0 && m->IsXposedHookedMethod())) { + expandBufAddMethodId(pReply, ToMethodId(m)); + expandBufAddUtf8String(pReply, StringPrintf("%s", m->GetName()).c_str()); + expandBufAddUtf8String(pReply, m->GetSignature().ToString()); + if (with_generic) { + static const char genericSignature[1] = ""; + expandBufAddUtf8String(pReply, genericSignature); + } + expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags())); + + m = m->GetXposedOriginalMethod(); + } + expandBufAddMethodId(pReply, ToMethodId(m)); expandBufAddUtf8String(pReply, m->GetName()); expandBufAddUtf8String(pReply, m->GetSignature().ToString()); diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index 6ede6de6704..96b0b20a3ee 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -343,4 +343,76 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons return zero; } } + +JValue InvokeXposedHandleHookedMethod(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty, + jobject rcvr_jobj, jmethodID method, + std::vector& args) { + // Build argument array possibly triggering GC. + soa.Self()->AssertThreadSuspensionIsAllowable(); + jobjectArray args_jobj = NULL; + const JValue zero; + int32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion(); + // Do not create empty arrays unless needed to maintain Dalvik bug compatibility. + if (args.size() > 0 || (target_sdk_version > 0 && target_sdk_version <= 21)) { + args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL); + if (args_jobj == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + return zero; + } + for (size_t i = 0; i < args.size(); ++i) { + if (shorty[i + 1] == 'L') { + jobject val = args.at(i).l; + soa.Env()->SetObjectArrayElement(args_jobj, i, val); + } else { + JValue jv; + jv.SetJ(args.at(i).j); + mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); + if (val == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + return zero; + } + soa.Decode* >(args_jobj)->Set(i, val); + } + } + } + + const XposedHookInfo* hookInfo = soa.DecodeMethod(method)->GetXposedHookInfo(); + + // Call XposedBridge.handleHookedMethod(Member method, int originalMethodId, Object additionalInfoObj, + // Object thisObject, Object[] args) + jvalue invocation_args[5]; + invocation_args[0].l = hookInfo->reflectedMethod; + invocation_args[1].i = 0; + invocation_args[2].l = hookInfo->additionalInfo; + invocation_args[3].l = rcvr_jobj; + invocation_args[4].l = args_jobj; + jobject result = + soa.Env()->CallStaticObjectMethodA(mirror::ArtMethod::xposed_callback_class, + mirror::ArtMethod::xposed_callback_method, + invocation_args); + + + // Unbox the result if necessary and return it. + if (UNLIKELY(soa.Self()->IsExceptionPending())) { + return zero; + } else { + if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) { + return zero; + } + StackHandleScope<1> hs(soa.Self()); + MethodHelper mh_method(hs.NewHandle(soa.DecodeMethod(method))); + // This can cause thread suspension. + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + ThrowLocation throw_location(rcvr, mh_method.GetMethod(), -1); + mirror::Object* result_ref = soa.Decode(result); + mirror::Class* result_type = mh_method.GetReturnType(); + JValue result_unboxed; + if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, &result_unboxed)) { + DCHECK(soa.Self()->IsExceptionPending()); + return zero; + } + return result_unboxed; + } +} + } // namespace art diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index bc95c234fec..50c5aacc3b1 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -181,6 +181,11 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons std::vector& args) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +JValue InvokeXposedHandleHookedMethod(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty, + jobject rcvr_jobj, jmethodID method, + std::vector& args) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Entry point for deoptimization. extern "C" void art_quick_deoptimize(); static inline uintptr_t GetQuickDeoptimizationEntryPoint() { diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index 653724989a4..dd83b682ad2 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -73,7 +73,6 @@ extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) { PopLocalReferences(saved_local_ref_cookie, self); } - extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self) { GoToRunnable(self); @@ -81,38 +80,34 @@ extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject lo PopLocalReferences(saved_local_ref_cookie, self); } -extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, - Thread* self) { - GoToRunnable(self); - mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. +// Common result handling for EndWithReference. +static mirror::Object* JniMethodEndWithReferenceHandleResult(jobject result, + uint32_t saved_local_ref_cookie, + Thread* self) + NO_THREAD_SAFETY_ANALYSIS { + // Must decode before pop. The 'result' may not be valid in case of an exception, though. + mirror::Object* o = self->IsExceptionPending() ? nullptr : self->DecodeJObject(result); PopLocalReferences(saved_local_ref_cookie, self); // Process result. if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } CheckReferenceResult(o, self); } VerifyObject(o); return o; } +extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, + Thread* self) { + GoToRunnable(self); + return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self); +} + extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, uint32_t saved_local_ref_cookie, jobject locked, Thread* self) { GoToRunnable(self); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - mirror::Object* o = self->DecodeJObject(result); - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - VerifyObject(o); - return o; + UnlockJniSynchronizedMethod(locked, self); + return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self); } } // namespace art diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 0651899736e..ce118b57669 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -610,7 +610,9 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, Thread* self, StackReference* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); - DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); + const bool is_xposed = proxy_method->IsXposedHookedMethod(); + const bool is_static = proxy_method->IsStatic(); + DCHECK(is_xposed || receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); // Ensure we don't get thread suspension until the object arguments are safely in jobjects. const char* old_cause = self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); @@ -626,20 +628,30 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, ScopedObjectAccessUnchecked soa(env); ScopedJniEnvLocalRefState env_state(env); // Create local ref. copies of proxy method and the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); + jobject rcvr_jobj = is_static ? nullptr : soa.AddLocalReference(receiver); // Placing arguments into args vector and remove the receiver. mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(); - CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " - << PrettyMethod(non_proxy_method); + CHECK(is_xposed || !non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " + << PrettyMethod(non_proxy_method); std::vector args; uint32_t shorty_len = 0; const char* shorty = proxy_method->GetShorty(&shorty_len); - BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); + BuildQuickArgumentVisitor local_ref_visitor(sp, is_static, shorty, shorty_len, &soa, &args); local_ref_visitor.VisitArguments(); - DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); - args.erase(args.begin()); + if (!is_static) { + DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); + args.erase(args.begin()); + } + + if (is_xposed) { + jmethodID proxy_methodid = soa.EncodeMethod(proxy_method); + self->EndAssertNoThreadSuspension(old_cause); + JValue result = InvokeXposedHandleHookedMethod(soa, shorty, rcvr_jobj, proxy_methodid, args); + local_ref_visitor.FixupReferences(); + return result.GetJ(); + } // Convert proxy method into expected interface method. mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod(); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index bad8de31acd..db65a69280b 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -3240,7 +3240,7 @@ void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) { void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) { CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || - (c->IsVariableSize() || c->GetObjectSize() == byte_count)); + (c->IsVariableSize() || c->GetObjectSize() <= byte_count)); CHECK_GE(byte_count, sizeof(mirror::Object)); } diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index f765f0e168e..15d899b1c97 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -117,30 +118,59 @@ static void RealPruneDalvikCache(const std::string& cache_dir_path) { CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(cache_dir))) << "Unable to close directory."; } -// We write out an empty file to the zygote's ISA specific cache dir at the start of -// every zygote boot and delete it when the boot completes. If we find a file already -// present, it usually means the boot didn't complete. We wipe the entire dalvik -// cache if that's the case. -static void MarkZygoteStart(const InstructionSet isa) { +// We write out a file with the number of failed boots to the zygote's ISA specific cache dir +// at the start of every zygote boot and delete it when the boot completes. If we find a file +// already present, we check if the number of failed boots is greater than the maximum failed +// boots allowed, and wipe the entire dalvik cache if that's the case. +static void MarkZygoteStart(const InstructionSet isa, const uint32_t max_failed_boots) { const std::string isa_subdir = GetDalvikCacheOrDie(GetInstructionSetString(isa), false); const std::string boot_marker = isa_subdir + "/.booting"; + const char* file_name = boot_marker.c_str(); - if (OS::FileExists(boot_marker.c_str())) { + uint32_t num_failed_boots = 0; + std::unique_ptr file(OS::OpenFileReadWrite(file_name)); + if (file.get() == nullptr) { + file.reset(OS::CreateEmptyFile(file_name)); + + if (file.get() == nullptr) { + PLOG(WARNING) << "Failed to create boot marker."; + return; + } + } else { + if (!file->ReadFully(&num_failed_boots, sizeof(num_failed_boots))) { + PLOG(WARNING) << "Failed to read boot marker."; + file->Erase(); + return; + } + } + + if (max_failed_boots != 0 && num_failed_boots >= max_failed_boots) { LOG(WARNING) << "Incomplete boot detected. Pruning dalvik cache"; RealPruneDalvikCache(isa_subdir); } - VLOG(startup) << "Creating boot start marker: " << boot_marker; - std::unique_ptr f(OS::CreateEmptyFile(boot_marker.c_str())); - if (f.get() != nullptr) { - if (f->FlushCloseOrErase() != 0) { - PLOG(WARNING) << "Failed to write boot marker."; - } + ++num_failed_boots; + VLOG(startup) << "Number of failed boots on : " << boot_marker << " = " << num_failed_boots; + + if (lseek(file->Fd(), 0, SEEK_SET) == -1) { + PLOG(WARNING) << "Failed to write boot marker."; + file->Erase(); + return; + } + + if (!file->WriteFully(&num_failed_boots, sizeof(num_failed_boots))) { + PLOG(WARNING) << "Failed to write boot marker."; + file->Erase(); + return; + } + + if (file->FlushCloseOrErase() != 0) { + PLOG(WARNING) << "Failed to flush boot marker."; } } static bool GenerateImage(const std::string& image_filename, InstructionSet image_isa, - std::string* error_msg) { + std::string* error_msg, std::string system_filename) { const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString()); std::vector boot_class_path; Split(boot_class_path_string, ':', boot_class_path); @@ -163,8 +193,14 @@ static bool GenerateImage(const std::string& image_filename, InstructionSet imag image_option_string += image_filename; arg_vector.push_back(image_option_string); - for (size_t i = 0; i < boot_class_path.size(); i++) { - arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]); + if (system_filename == nullptr) { + for (size_t i = 0; i < boot_class_path.size(); i++) { + arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]); + } + } else { + std::string dex_file_option_string("--dex-file="); + dex_file_option_string += ImageHeader::GetOatLocationFromImageLocation(system_filename); + arg_vector.push_back(dex_file_option_string); } std::string oat_file_option_string("--oat-file="); @@ -338,7 +374,14 @@ ImageHeader* ImageSpace::ReadImageHeader(const char* image_location, image_location, cache_filename.c_str()); return nullptr; } - if (sys_hdr->GetOatChecksum() != cache_hdr->GetOatChecksum()) { + std::string cache_oat_filename = ImageHeader::GetOatLocationFromImageLocation(cache_filename); + std::unique_ptr cache_oat_hdr(OatHeader::FromFile(cache_oat_filename, error_msg)); + if (cache_oat_hdr.get() == nullptr) { + *error_msg = StringPrintf("Unable to read oat file header for %s at %s: %s", + image_location, cache_oat_filename.c_str(), error_msg->c_str()); + return nullptr; + } + if (sys_hdr->GetOatChecksum() != cache_oat_hdr->GetOriginalChecksum(true)) { *error_msg = StringPrintf("Unable to find a relocated version of image file %s", image_location); return nullptr; @@ -360,8 +403,11 @@ ImageHeader* ImageSpace::ReadImageHeader(const char* image_location, error_msg)); std::unique_ptr cache(ReadSpecificImageHeader(cache_filename.c_str(), error_msg)); + std::string cache_oat_filename = ImageHeader::GetOatLocationFromImageLocation(cache_filename); + std::unique_ptr cache_oat_hdr(OatHeader::FromFile(cache_oat_filename, error_msg)); if (system.get() == nullptr || - (cache.get() != nullptr && cache->GetOatChecksum() == system->GetOatChecksum())) { + (cache.get() != nullptr && cache_oat_hdr.get() != nullptr && + cache_oat_hdr->GetOriginalChecksum(true) == system->GetOatChecksum())) { return cache.release(); } else { return system.release(); @@ -449,12 +495,42 @@ ImageSpace* ImageSpace::Create(const char* image_location, &has_cache, &is_global_cache); if (Runtime::Current()->IsZygote()) { - MarkZygoteStart(image_isa); + MarkZygoteStart(image_isa, 10); } ImageSpace* space; bool relocate = Runtime::Current()->ShouldRelocate(); bool can_compile = Runtime::Current()->IsImageDex2OatEnabled(); + + const std::string* image_filename = nullptr; + bool is_system = false; + bool relocated_version_used = false; + if (has_cache) { + std::string cache_oat_filename = ImageHeader::GetOatLocationFromImageLocation(cache_filename); + std::unique_ptr cache_oat_hdr(OatHeader::FromFile(cache_oat_filename, error_msg)); + if (cache_oat_hdr.get() == nullptr) { + LOG(INFO) << "Forcing image recompilation because " << cache_oat_filename + << " could not be opened: " << *error_msg; + } else if (!cache_oat_hdr->IsXposedOatVersionValid()) { + XLOG(INFO) << "Forcing image recompilation because " << cache_filename + << " is not compiled for the current Xposed version"; + } else { + std::unique_ptr system_hdr(new ImageHeader); + if (has_system && ReadSpecificImageHeader(system_filename.c_str(), system_hdr.get())) { + if (system_hdr->GetOatChecksum() == cache_oat_hdr->GetOriginalChecksum(true)) { + image_filename = &cache_filename; + is_system = true; + } + } else { + image_filename = &cache_filename; + is_system = has_system; + } + } + } + // TODO: Consider relocating in the rare case that the system image is already prepared for Xposed + + if (image_filename != nullptr) { +#if 0 if (found_image) { const std::string* image_filename; bool is_system = false; @@ -527,6 +603,7 @@ ImageSpace* ImageSpace::Create(const char* image_location, image_filename = &cache_filename; } } +#endif { // Note that we must not use the file descriptor associated with // ScopedFlock::GetFile to Init the image file. We want the file @@ -576,7 +653,7 @@ ImageSpace* ImageSpace::Create(const char* image_location, return nullptr; } else if (!ImageCreationAllowed(is_global_cache, error_msg)) { return nullptr; - } else if (!GenerateImage(cache_filename, image_isa, error_msg)) { + } else if (!GenerateImage(cache_filename, image_isa, error_msg, system_filename)) { *error_msg = StringPrintf("Failed to generate image '%s': %s", cache_filename.c_str(), error_msg->c_str()); // We failed to create files, remove any possibly garbage output. @@ -599,7 +676,7 @@ ImageSpace* ImageSpace::Create(const char* image_location, // we leave Create. ScopedFlock image_lock; image_lock.Init(cache_filename.c_str(), error_msg); - space = ImageSpace::Init(cache_filename.c_str(), image_location, true, error_msg); + space = ImageSpace::Init(cache_filename.c_str(), image_location, !has_system, error_msg); if (space == nullptr) { *error_msg = StringPrintf("Failed to load generated image '%s': %s", cache_filename.c_str(), error_msg->c_str()); @@ -806,7 +883,6 @@ void ImageSpace::Dump(std::ostream& os) const { << ",size=" << PrettySize(Size()) << ",name=\"" << GetName() << "\"]"; } - +} // namespace space } // namespace space } // namespace gc -} // namespace art diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h index da7d153c08d..257c3d38e19 100644 --- a/runtime/instruction_set.h +++ b/runtime/instruction_set.h @@ -180,6 +180,7 @@ size_t GetStackOverflowReservedBytes(InstructionSet isa); enum InstructionFeatures { kHwDiv = 0x1, // Supports hardware divide. kHwLpae = 0x2, // Supports Large Physical Address Extension. + kFix835769 = 0x4, // need fix CortexA53 errata 835769 }; // This is a bitmask of supported features per architecture. @@ -206,6 +207,14 @@ class PACKED(4) InstructionSetFeatures { mask_ = (mask_ & ~kHwLpae) | (v ? kHwLpae : 0); } + bool NeedFix835769() const { + return (mask_ & kFix835769) != 0; + } + + void SetFix835769(bool v) { + mask_ = (mask_ & ~kFix835769) | (v ? kFix835769 : 0); + } + std::string GetFeatureString() const; // Other features in here. diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index d06a67f9253..7728f5a9fcb 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -123,6 +123,9 @@ static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code, } void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) { + if (UNLIKELY(method->IsXposedHookedMethod())) { + method = method->GetXposedOriginalMethod(); + } if (method->IsAbstract() || method->IsProxyMethod()) { // Do not change stubs for these methods. return; diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index 938bf2c20e8..74b1ed89a80 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -2411,7 +2411,7 @@ class JNI { << c->GetDexCache()->GetLocation()->ToModifiedUtf8(); ThrowNoSuchMethodError(soa, c, name, sig, "static or non-static"); return JNI_ERR; - } else if (!m->IsNative()) { + } else if (!m->IsNative() && !(m->IsXposedHookedMethod() && m->GetXposedOriginalMethod()->IsNative())) { LOG(return_errors ? ERROR : FATAL) << "Failed to register non-native method " << PrettyDescriptor(c) << "." << name << sig << " as native"; @@ -2436,14 +2436,14 @@ class JNI { size_t unregistered_count = 0; for (size_t i = 0; i < c->NumDirectMethods(); ++i) { mirror::ArtMethod* m = c->GetDirectMethod(i); - if (m->IsNative()) { + if (m->IsNative() || (m->IsXposedHookedMethod() && m->GetXposedOriginalMethod()->IsNative())) { m->UnregisterNative(soa.Self()); unregistered_count++; } } for (size_t i = 0; i < c->NumVirtualMethods(); ++i) { mirror::ArtMethod* m = c->GetVirtualMethod(i); - if (m->IsNative()) { + if (m->IsNative() || (m->IsXposedHookedMethod() && m->GetXposedOriginalMethod()->IsNative())) { m->UnregisterNative(soa.Self()); unregistered_count++; } @@ -3022,7 +3022,7 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { JavaVMOption* option = &args->options[i]; options.push_back(std::make_pair(std::string(option->optionString), option->extraInfo)); } - bool ignore_unrecognized = args->ignoreUnrecognized; + bool ignore_unrecognized = true; if (!Runtime::Create(options, ignore_unrecognized)) { return JNI_ERR; } diff --git a/runtime/log_severity.h b/runtime/log_severity.h index 31682dfeee8..f552143e291 100644 --- a/runtime/log_severity.h +++ b/runtime/log_severity.h @@ -21,5 +21,6 @@ typedef int LogSeverity; const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5; const int INTERNAL_FATAL = 6; // For Runtime::Abort. +const int LOG_XPOSED = 1024; #endif // ART_RUNTIME_LOG_SEVERITY_H_ diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h index b28ea4d1cf2..aaf9ca7bab6 100644 --- a/runtime/mirror/art_method-inl.h +++ b/runtime/mirror/art_method-inl.h @@ -363,17 +363,32 @@ inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() { return QuickMethodFrameInfo(kStackAlignment, 0u, 0u); } Runtime* runtime = Runtime::Current(); - // For Proxy method we exclude direct method (there is only one direct method - constructor). - // Direct method is cloned from original java.lang.reflect.Proxy class together with code - // and as a result it is executed as usual quick compiled method without any stubs. - // So the frame info should be returned as it is a quick method not a stub. - if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod() && !IsDirect())) { + + if (UNLIKELY(IsAbstract() || IsXposedHookedMethod())) { return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); } + + // This goes before IsProxyMethod since runtime methods have a null declaring class. if (UNLIKELY(IsRuntimeMethod())) { return runtime->GetRuntimeMethodFrameInfo(this); } + // For Proxy method we add special handling for the direct method case (there is only one + // direct method - constructor). Direct method is cloned from original + // java.lang.reflect.Proxy class together with code and as a result it is executed as usual + // quick compiled method without any stubs. So the frame info should be returned as it is a + // quick method not a stub. However, if instrumentation stubs are installed, the + // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an + // oat code pointer, thus we have to add a special case here. + if (UNLIKELY(IsProxyMethod(true))) { + if (IsDirect()) { + CHECK(IsConstructor()); + return GetQuickFrameInfo(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode())); + } else { + return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); + } + } + const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)); // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline) @@ -463,6 +478,9 @@ inline const char* ArtMethod::GetName() { } inline const DexFile::CodeItem* ArtMethod::GetCodeItem() { + if (UNLIKELY(IsXposedHookedMethod())) { + return nullptr; + } mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); return method->GetDexFile()->GetCodeItem(method->GetCodeItemOffset()); } @@ -495,6 +513,9 @@ inline const DexFile::TypeList* ArtMethod::GetParameterTypeList() { } inline const char* ArtMethod::GetDeclaringClassSourceFile() { + if (UNLIKELY(IsXposedHookedMethod())) { + return ""; + } return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetSourceFile(); } @@ -530,12 +551,12 @@ inline mirror::DexCache* ArtMethod::GetDexCache() { return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetDexCache(); } -inline bool ArtMethod::IsProxyMethod() { - return GetDeclaringClass()->IsProxyClass(); +inline bool ArtMethod::IsProxyMethod(bool ignore_xposed) { + return GetDeclaringClass()->IsProxyClass() || (!ignore_xposed && IsXposedHookedMethod()); } inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy() { - if (LIKELY(!IsProxyMethod())) { + if (LIKELY(!IsProxyMethod(true))) { return this; } mirror::Class* klass = GetDeclaringClass(); diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc index afc10798789..62516a4582e 100644 --- a/runtime/mirror/art_method.cc +++ b/runtime/mirror/art_method.cc @@ -48,6 +48,8 @@ extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Th // TODO: get global references for these GcRoot ArtMethod::java_lang_reflect_ArtMethod_; +jclass ArtMethod::xposed_callback_class = NULL; +jmethodID ArtMethod::xposed_callback_method = NULL; ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject jlr_method) { @@ -352,6 +354,10 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* void ArtMethod::RegisterNative(Thread* self, const void* native_method, bool is_fast) { DCHECK(Thread::Current() == self); + if (UNLIKELY(IsXposedHookedMethod())) { + GetXposedOriginalMethod()->RegisterNative(self, native_method, is_fast); + return; + } CHECK(IsNative()) << PrettyMethod(this); CHECK(!IsFastNative()) << PrettyMethod(this); CHECK(native_method != NULL) << PrettyMethod(this); @@ -362,10 +368,54 @@ void ArtMethod::RegisterNative(Thread* self, const void* native_method, bool is_ } void ArtMethod::UnregisterNative(Thread* self) { + if (UNLIKELY(IsXposedHookedMethod())) { + GetXposedOriginalMethod()->UnregisterNative(self); + return; + } CHECK(IsNative() && !IsFastNative()) << PrettyMethod(this); // restore stub to lookup native pointer via dlsym RegisterNative(self, GetJniDlsymLookupStub(), false); } +void ArtMethod::EnableXposedHook(JNIEnv* env, jobject additional_info) { + if (UNLIKELY(IsXposedHookedMethod())) { + // Already hooked + return; + } else if (UNLIKELY(IsXposedOriginalMethod())) { + // This should never happen + ThrowIllegalArgumentException(nullptr, StringPrintf("Cannot hook the method backup: %s", PrettyMethod(this).c_str()).c_str()); + return; + } + + ScopedObjectAccess soa(env); + + // Create a backup of the ArtMethod object + ArtMethod* backup_method = down_cast(Clone(soa.Self())); + backup_method->SetAccessFlags(backup_method->GetAccessFlags() | kAccXposedOriginalMethod); + + // Create a Method/Constructor object for the backup ArtMethod object + jobject reflect_method; + if (IsConstructor()) { + reflect_method = env->AllocObject(WellKnownClasses::java_lang_reflect_Constructor); + } else { + reflect_method = env->AllocObject(WellKnownClasses::java_lang_reflect_Method); + } + env->SetObjectField(reflect_method, WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod, + env->NewGlobalRef(soa.AddLocalReference(backup_method))); + + // Save extra information in a separate structure, stored instead of the native method + XposedHookInfo* hookInfo = reinterpret_cast(calloc(1, sizeof(XposedHookInfo))); + hookInfo->reflectedMethod = env->NewGlobalRef(reflect_method); + hookInfo->additionalInfo = env->NewGlobalRef(additional_info); + hookInfo->originalMethod = backup_method; + SetEntryPointFromJni(reinterpret_cast(hookInfo)); + + SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler()); + SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); + + // Adjust access flags + SetAccessFlags((GetAccessFlags() & ~kAccNative) | kAccXposedHookedMethod); +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h index 2e26d9c893d..131daf80245 100644 --- a/runtime/mirror/art_method.h +++ b/runtime/mirror/art_method.h @@ -37,6 +37,12 @@ class ScopedObjectAccessAlreadyRunnable; class StringPiece; class ShadowFrame; +struct XposedHookInfo { + jobject reflectedMethod; + jobject additionalInfo; + mirror::ArtMethod* originalMethod; +}; + namespace mirror { typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper& mh, @@ -98,12 +104,16 @@ class MANAGED ArtMethod FINAL : public Object { } // Returns true if the method is static, private, or a constructor. - bool IsDirect() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return IsDirect(GetAccessFlags()); + bool IsDirect(bool ignore_xposed = false) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return IsDirect(GetAccessFlags(), ignore_xposed); } - static bool IsDirect(uint32_t access_flags) { - return (access_flags & (kAccStatic | kAccPrivate | kAccConstructor)) != 0; + static bool IsDirect(uint32_t access_flags, bool ignore_xposed = false) { + uint32_t mask = kAccStatic | kAccPrivate | kAccConstructor; + if (LIKELY(!ignore_xposed)) { + mask |= kAccXposedOriginalMethod; + } + return (access_flags & mask) != 0; } // Returns true if the method is declared synchronized. @@ -137,7 +147,7 @@ class MANAGED ArtMethod FINAL : public Object { return (GetAccessFlags() & kAccSynthetic) != 0; } - bool IsProxyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsProxyMethod(bool ignore_xposed = false) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPreverified) != 0; @@ -315,6 +325,7 @@ class MANAGED ArtMethod FINAL : public Object { template void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(!IsXposedHookedMethod()); CheckObjectSizeEqualsMirrorSize(); SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code, sizeof(void*)); @@ -323,6 +334,7 @@ class MANAGED ArtMethod FINAL : public Object { ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize( const void* entry_point_from_quick_compiled_code, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(!IsXposedHookedMethod()); SetFieldPtrWithSize( EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code, pointer_size); @@ -542,6 +554,29 @@ class MANAGED ArtMethod FINAL : public Object { ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Xposed + bool IsXposedHookedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return (GetAccessFlags() & kAccXposedHookedMethod) != 0; + } + + bool IsXposedOriginalMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return (GetAccessFlags() & kAccXposedOriginalMethod) != 0; + } + + void EnableXposedHook(JNIEnv* env, jobject additional_info) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + const XposedHookInfo* GetXposedHookInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(IsXposedHookedMethod()); + return reinterpret_cast(GetEntryPointFromJni()); + } + + ArtMethod* GetXposedOriginalMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetXposedHookInfo()->originalMethod; + } + + static jclass xposed_callback_class; + static jmethodID xposed_callback_method; + static size_t SizeWithoutPointerFields(size_t pointer_size) { size_t total = sizeof(ArtMethod) - sizeof(PtrSizedFields); #ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc index d6c11e8b0c1..9a49fa9e372 100644 --- a/runtime/mirror/dex_cache.cc +++ b/runtime/mirror/dex_cache.cc @@ -44,10 +44,10 @@ void DexCache::Init(const DexFile* dex_file, CHECK(resolved_methods != nullptr); CHECK(resolved_fields != nullptr); - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location); + SetFieldPtr(DexFileOffset(), dex_file); + SetFieldObject(LocationOffset(), location); SetFieldObject(StringsOffset(), strings); - SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types); + SetFieldObject(ResolvedTypesOffset(), resolved_types); SetFieldObject(ResolvedMethodsOffset(), resolved_methods); SetFieldObject(ResolvedFieldsOffset(), resolved_fields); diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h index 3c947ab37bc..ddc41d35cb9 100644 --- a/runtime/mirror/dex_cache.h +++ b/runtime/mirror/dex_cache.h @@ -22,6 +22,7 @@ #include "class.h" #include "object.h" #include "object_array.h" +#include "utils.h" namespace art { @@ -41,8 +42,8 @@ class MANAGED DexCache FINAL : public Object { static uint32_t ClassSize(); // Size of an instance of java.lang.DexCache not including referenced values. - static constexpr uint32_t InstanceSize() { - return sizeof(DexCache); + static uint32_t InstanceSize() { + return sizeof(DexCache) + (IsSamsungROM() ? 8 : 0); } void Init(const DexFile* dex_file, @@ -56,19 +57,31 @@ class MANAGED DexCache FINAL : public Object { void Fixup(ArtMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, location_)); + return GetFieldObject(LocationOffset()); + } + + static MemberOffset LocationOffset() { + return MemberOffset(OFFSETOF_MEMBER(DexCache, location_) + (IsSamsungROM() ? 4 : 0)); } static MemberOffset StringsOffset() { - return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_); + return MemberOffset(OFFSETOF_MEMBER(DexCache, strings_) + (IsSamsungROM() ? 4 : 0)); } static MemberOffset ResolvedFieldsOffset() { - return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_); + return MemberOffset(OFFSETOF_MEMBER(DexCache, resolved_fields_) + (IsSamsungROM() ? 4 : 0)); } static MemberOffset ResolvedMethodsOffset() { - return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_); + return MemberOffset(OFFSETOF_MEMBER(DexCache, resolved_methods_) + (IsSamsungROM() ? 4 : 0)); + } + + static MemberOffset ResolvedTypesOffset() { + return MemberOffset(OFFSETOF_MEMBER(DexCache, resolved_types_) + (IsSamsungROM() ? 4 : 0)); + } + + static MemberOffset DexFileOffset() { + return MemberOffset(OFFSETOF_MEMBER(DexCache, dex_file_) + (IsSamsungROM() ? 8 : 0)); } size_t NumStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -133,7 +146,7 @@ class MANAGED DexCache FINAL : public Object { ObjectArray* GetResolvedTypes() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetFieldObject>( - OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_)); + ResolvedTypesOffset()); } ObjectArray* GetResolvedMethods() ALWAYS_INLINE @@ -147,12 +160,12 @@ class MANAGED DexCache FINAL : public Object { } const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_)); + return GetFieldPtr(DexFileOffset()); } void SetDexFile(const DexFile* dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { - return SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file); + return SetFieldPtr(DexFileOffset(), dex_file); } private: diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index d2cc367d4dc..4019a71eb5c 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -120,19 +120,28 @@ class CopyObjectVisitor { }; Object* Object::Clone(Thread* self) { + return Clone(self, 0); +} + +Object* Object::Clone(Thread* self, size_t num_target_bytes) { CHECK(!IsClass()) << "Can't clone classes."; // Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would // be wrong. gc::Heap* heap = Runtime::Current()->GetHeap(); size_t num_bytes = SizeOf(); + if (LIKELY(num_target_bytes == 0)) { + num_target_bytes = num_bytes; + } else { + CHECK(num_target_bytes >= num_bytes); + } StackHandleScope<1> hs(self); Handle this_object(hs.NewHandle(this)); Object* copy; CopyObjectVisitor visitor(self, &this_object, num_bytes); if (heap->IsMovableObject(this)) { - copy = heap->AllocObject(self, GetClass(), num_bytes, visitor); + copy = heap->AllocObject(self, GetClass(), num_target_bytes, visitor); } else { - copy = heap->AllocNonMovableObject(self, GetClass(), num_bytes, visitor); + copy = heap->AllocNonMovableObject(self, GetClass(), num_target_bytes, visitor); } return copy; } diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index bf76c860ec7..f894113dcec 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -103,6 +103,7 @@ class MANAGED LOCKABLE Object { size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Object* Clone(Thread* self, size_t num_target_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t IdentityHashCode() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h index d9241417854..7bd029cd7a2 100644 --- a/runtime/mirror/string-inl.h +++ b/runtime/mirror/string-inl.h @@ -30,6 +30,11 @@ namespace mirror { inline uint32_t String::ClassSize() { uint32_t vtable_entries = Object::kVTableLength + 51; + if (IsSamsungROM()) { + // Samsung added a new method "string.clear()" + // Increase vtable length to support it + vtable_entries++; + } return Class::ComputeClassSize(true, vtable_entries, 1, 1, 2); } diff --git a/runtime/modifiers.h b/runtime/modifiers.h index 23c18f86ff4..4a901dce6f8 100644 --- a/runtime/modifiers.h +++ b/runtime/modifiers.h @@ -68,6 +68,9 @@ static constexpr uint32_t kAccReferenceFlagsMask = (kAccClassIsReference | kAccClassIsFinalizerReference | kAccClassIsPhantomReference); +static constexpr uint32_t kAccXposedHookedMethod = 0x10000000; // method has been hooked by Xposed +static constexpr uint32_t kAccXposedOriginalMethod = 0x20000000; // method is a backup created by Xposed + // Valid (meaningful) bits for a field. static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected | kAccStatic | kAccFinal | kAccVolatile | kAccTransient | kAccSynthetic | kAccEnum; diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc index 3298b461a46..3572c852b6b 100644 --- a/runtime/native/dalvik_system_DexFile.cc +++ b/runtime/native/dalvik_system_DexFile.cc @@ -304,6 +304,13 @@ static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char* error_msg.clear(); return kDexoptNeeded; } + if (!oat_file->GetOatHeader().IsXposedOatVersionValid()) { + if (kReasonLogging) { + LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename + << " needs to be recompiled with Xposed for " << filename; + } + return kDexoptNeeded; + } // Pass-up the information about if this is PIC. // TODO: Refactor this function to be less complicated. diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc index 5b41d5feeb2..9052eee5c12 100644 --- a/runtime/native/dalvik_system_ZygoteHooks.cc +++ b/runtime/native/dalvik_system_ZygoteHooks.cc @@ -26,7 +26,9 @@ #if defined(HAVE_PRCTL) #include #endif - +#ifdef HAVE_ANDROID_OS +#include +#endif #include namespace art { @@ -41,7 +43,18 @@ static void EnableDebugger() { #endif // We don't want core dumps, though, so set the core dump size to 0. rlimit rl; +#ifdef HAVE_ANDROID_OS + char prop_value[PROPERTY_VALUE_MAX]; + property_get("persist.debug.trace", prop_value, "0"); + if (prop_value[0] == '1') { + LOG(INFO) << "setting RLIM to infinity for process " << getpid(); + rl.rlim_cur = RLIM_INFINITY; + } else { + rl.rlim_cur = 0; + } +#else rl.rlim_cur = 0; +#endif rl.rlim_max = RLIM_INFINITY; if (setrlimit(RLIMIT_CORE, &rl) == -1) { PLOG(ERROR) << "setrlimit(RLIMIT_CORE) failed for pid " << getpid(); diff --git a/runtime/native/samsung.cc b/runtime/native/samsung.cc new file mode 100644 index 00000000000..ff7617182d2 --- /dev/null +++ b/runtime/native/samsung.cc @@ -0,0 +1,273 @@ +#include "field_helper.h" +#include "mirror/art_field-inl.h" +#include "mirror/art_method-inl.h" +#include "mirror/class-inl.h" +#include "scoped_fast_native_object_access.h" +#include "ScopedUtfChars.h" + +namespace art { + +//---------------------------------------------------- +// java.lang.Class + +static bool equalMethodParameters(mirror::ArtMethod* method, mirror::ObjectArray* parameterTypes) { + const DexFile::TypeList* params = method->GetParameterTypeList(); + if (params == nullptr) + return (parameterTypes->GetLength() == 0); + + int32_t numParams = params->Size(); + if (numParams != parameterTypes->GetLength()) + return false; + + for (int32_t i = 0; i < numParams; i++) { + uint16_t type_idx = params->GetTypeItem(i).type_idx_; + mirror::Class* param_type = method->GetDexCacheResolvedType(type_idx); + if (param_type == nullptr) { + param_type = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); + CHECK(param_type != nullptr || Thread::Current()->IsExceptionPending()); + } + + if (param_type != parameterTypes->Get(i)) + return false; + } + + return true; +} + +static mirror::ArtMethod* getDeclaredMethodInternal(mirror::Class* c, const StringPiece& name, mirror::ObjectArray* parameterTypes) { + mirror::ArtMethod* potentialResult = nullptr; + + for (size_t i = 0; i < c->NumVirtualMethods(); i++) { + mirror::ArtMethod* method = c->GetVirtualMethod(i); + + if (name != method->GetName()) + continue; + + if (!equalMethodParameters(method, parameterTypes)) + continue; + + if (!method->IsMiranda()) { + if (!method->IsSynthetic()) + return method; + + // Remember as potential result if it's not a miranda method. + potentialResult = method; + } + } + + for (size_t i = 0; i < c->NumDirectMethods(); i++) { + mirror::ArtMethod* method = c->GetDirectMethod(i); + if (method->IsConstructor()) + continue; + + if (name != method->GetName()) + continue; + + if (!equalMethodParameters(method, parameterTypes)) + continue; + + if (!method->IsMiranda() && !method->IsSynthetic()) + return method; + + // Direct methods cannot be miranda methods, + // so this potential result must be synthetic. + potentialResult = method; + } + + return potentialResult; +} + +static mirror::ArtMethod* getPublicMethodRecursive(mirror::Class* c, const StringPiece& name, mirror::ObjectArray* parameterTypes) { + // search superclasses + for (mirror::Class* klass = c; klass != nullptr; klass = klass->GetSuperClass()) { + mirror::ArtMethod* result = getDeclaredMethodInternal(klass, name, parameterTypes); + if (result != nullptr && result->IsPublic()) + return result; + } + + // search iftable which has a flattened and uniqued list of interfaces + int32_t iftable_count = c->GetIfTableCount(); + mirror::IfTable* iftable = c->GetIfTable(); + for (int32_t i = 0; i < iftable_count; i++) { + mirror::ArtMethod* result = getPublicMethodRecursive(iftable->GetInterface(i), name, parameterTypes); + if (result != nullptr && result->IsPublic()) + return result; + } + + return nullptr; +} + +static jobject Class_getMethodNative(JNIEnv* env, jobject javaThis, jstring javaName, jobjectArray javaParameterTypes, jboolean recursivePublicMethods) { + ScopedObjectAccess soa(env); + if (UNLIKELY(javaName == nullptr)) { + ThrowNullPointerException(nullptr, "name == null"); + return nullptr; + } + + mirror::ObjectArray* parameterTypes = + soa.Decode*>(javaParameterTypes); + size_t numParameterTypes = parameterTypes->GetLength(); + for (size_t i = 0; i < numParameterTypes; i++) { + if (parameterTypes->Get(i) == nullptr) { + Thread* self = Thread::Current(); + ThrowLocation computed_throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewException(computed_throw_location, "Ljava/lang/NoSuchMethodException;", "parameter type is null"); + return nullptr; + } + } + + mirror::Class* const c = soa.Decode(javaThis); + ScopedUtfChars name(env, javaName); + + mirror::ArtMethod* method = (recursivePublicMethods == JNI_TRUE) ? + getPublicMethodRecursive(c, name.c_str(), parameterTypes) + : getDeclaredMethodInternal(c, name.c_str(), parameterTypes); + + if (method == nullptr) + return nullptr; + + jobject artMethod = soa.AddLocalReference(method); + jobject reflectMethod = env->AllocObject(WellKnownClasses::java_lang_reflect_Method); + if (env->ExceptionCheck()) + return nullptr; + + env->SetObjectField(reflectMethod, WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod, artMethod); + return reflectMethod; +} + +static mirror::ArtField* getDeclaredFieldInternal(mirror::Class* c, const StringPiece& name) { + for (size_t i = 0; i < c->NumInstanceFields(); ++i) { + mirror::ArtField* f = c->GetInstanceField(i); + if (name == f->GetName()) { + return f; + } + } + + for (size_t i = 0; i < c->NumStaticFields(); ++i) { + mirror::ArtField* f = c->GetStaticField(i); + if (name == f->GetName()) { + return f; + } + } + + return nullptr; +} + +static mirror::ArtField* getPublicFieldRecursive(mirror::Class* c, const StringPiece& name) { + // search superclasses + for (mirror::Class* klass = c; klass != nullptr; klass = klass->GetSuperClass()) { + mirror::ArtField* result = getDeclaredFieldInternal(klass, name); + if (result != nullptr && result->IsPublic()) + return result; + } + + // search iftable which has a flattened and uniqued list of interfaces + int32_t iftable_count = c->GetIfTableCount(); + mirror::IfTable* iftable = c->GetIfTable(); + for (int32_t i = 0; i < iftable_count; i++) { + mirror::ArtField* result = getPublicFieldRecursive(iftable->GetInterface(i), name); + if (result != nullptr && result->IsPublic()) + return result; + } + + return nullptr; +} + +static jobject getDeclaredOrRecursiveField(JNIEnv* env, jobject javaThis, jstring javaName, bool recursiveFieldMethods) { + ScopedObjectAccess soa(env); + ScopedUtfChars name(env, javaName); + mirror::Class* const c = soa.Decode(javaThis); + + mirror::ArtField* field = recursiveFieldMethods ? + getPublicFieldRecursive(c, name.c_str()) + : getDeclaredFieldInternal(c, name.c_str()); + + if (field == nullptr) + return nullptr; + + jobject artField = soa.AddLocalReference(field); + jobject reflectField = env->AllocObject(WellKnownClasses::java_lang_reflect_Field); + if (env->ExceptionCheck()) + return nullptr; + + env->SetObjectField(reflectField, WellKnownClasses::java_lang_reflect_Field_artField, artField); + return reflectField; +} + +static jobject Class_getFieldNative(JNIEnv* env, jobject javaThis, jstring javaName) { + return getDeclaredOrRecursiveField(env, javaThis, javaName, true); +} + +static jobject Class_getDeclaredFieldInternalNative(JNIEnv* env, jobject javaThis, jstring javaName) { + return getDeclaredOrRecursiveField(env, javaThis, javaName, false); +} + + +//---------------------------------------------------- +// java.lang.reflect.ArtField + +static jobject ArtField_getNameNative(JNIEnv* env, jobject javaThis) { + ScopedFastNativeObjectAccess soa(env); + mirror::ArtField* const f = soa.Decode(javaThis)->AsArtField(); + return env->NewStringUTF(f->GetName()); +} + +static jclass ArtField_getTypeNative(JNIEnv* env, jobject javaThis) { + ScopedFastNativeObjectAccess soa(env); + StackHandleScope<1> hs(soa.Self()); + Handle f(hs.NewHandle(soa.Decode(javaThis)->AsArtField())); + return soa.AddLocalReference(FieldHelper(f).GetType()); +} + + +//---------------------------------------------------- +// java.lang.reflect.ArtMethod + +static jobject ArtMethod_getNameNative(JNIEnv* env, jobject javaThis) { + ScopedFastNativeObjectAccess soa(env); + mirror::ArtMethod* const f = soa.Decode(javaThis)->AsArtMethod(); + return env->NewStringUTF(f->GetName()); +} + +//---------------------------------------------------- +// dalvik.system.PathClassLoader + +static jobject PathClassLoader_openNative(JNIEnv* env, jobject javaThis) { + // Ignore Samsung native method and use the default PathClassLoader constructor + return nullptr; +} + + +//---------------------------------------------------- +static JNINativeMethod gMethodsClass[] = { + NATIVE_METHOD(Class, getMethodNative, "(Ljava/lang/String;[Ljava/lang/Class;Z)Ljava/lang/reflect/Method;"), + NATIVE_METHOD(Class, getFieldNative, "(Ljava/lang/String;)Ljava/lang/reflect/Field;"), + NATIVE_METHOD(Class, getDeclaredFieldInternalNative, "(Ljava/lang/String;)Ljava/lang/reflect/Field;"), +}; + +static JNINativeMethod gMethodsArtField[] = { + NATIVE_METHOD(ArtField, getNameNative, "!()Ljava/lang/String;"), + NATIVE_METHOD(ArtField, getTypeNative, "!()Ljava/lang/Class;"), +}; + +static JNINativeMethod gMethodsArtMethod[] = { + NATIVE_METHOD(ArtMethod, getNameNative, "!()Ljava/lang/String;"), +}; + +static JNINativeMethod gMethodsPathClassLoader[] = { + NATIVE_METHOD(PathClassLoader, openNative, "!(Ljava/lang/String;Ljava/lang/String;Ljava/lang/ClassLoader;)Ldalvik/system/PathClassLoader;"), +}; + + +//---------------------------------------------------- +void register_samsung_native_methods(JNIEnv* env) { + if (!IsSamsungROM()) + return; + + RegisterNativeMethods(env, "java/lang/Class", gMethodsClass, arraysize(gMethodsClass)); + RegisterNativeMethods(env, "java/lang/reflect/ArtField", gMethodsArtField, arraysize(gMethodsArtField)); + RegisterNativeMethods(env, "java/lang/reflect/ArtMethod", gMethodsArtMethod, arraysize(gMethodsArtMethod)); + RegisterNativeMethods(env, "dalvik/system/PathClassLoader", gMethodsPathClassLoader, arraysize(gMethodsPathClassLoader)); +} + +} // namespace art diff --git a/runtime/oat.cc b/runtime/oat.cc index 34c136249f1..e91077005ae 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -20,6 +20,12 @@ #include #include +#include "base/stringprintf.h" +#include "base/unix_file/fd_file.h" +#include "elf_file.h" +#include "oat_file.h" +#include "os.h" + namespace art { const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' }; @@ -59,6 +65,30 @@ OatHeader* OatHeader::Create(InstructionSet instruction_set, variable_data); } +OatHeader* OatHeader::FromFile(const std::string& filename, std::string* error_msg) { + std::unique_ptr file(OS::OpenFileForReading(filename.c_str())); + if (file.get() == nullptr) { + *error_msg = StringPrintf("Could not get oat header because file could not be opened: %s", filename.c_str()); + return nullptr; + } + std::unique_ptr elf_file(ElfFile::Open(file.get(), false, false, error_msg)); + if (elf_file.get() == nullptr) { + return nullptr; + } + std::unique_ptr oat_file(OatFile::OpenWithElfFile(elf_file.release(), filename, + error_msg)); + if (oat_file.get() == nullptr) { + return nullptr; + } + + const OatHeader& oat_header = oat_file->GetOatHeader(); + size_t header_size = oat_header.GetHeaderSize(); + void* memory = operator new (header_size); + std::unique_ptr hdr(new (memory) OatHeader()); + memcpy(hdr.get(), &oat_header, header_size); + return hdr.release(); +} + OatHeader::OatHeader(InstructionSet instruction_set, const InstructionSetFeatures& instruction_set_features, const std::vector* dex_files, @@ -128,6 +158,12 @@ bool OatHeader::IsValid() const { return true; } +bool OatHeader::IsXposedOatVersionValid() const { + CHECK(IsValid()); + const char* version = GetStoreValueByKey(OatHeader::kXposedOatVersionKey); + return version != nullptr && strcmp(version, kXposedOatCurrentVersion) == 0; +} + const char* OatHeader::GetMagic() const { CHECK(IsValid()); return reinterpret_cast(magic_); @@ -138,6 +174,18 @@ uint32_t OatHeader::GetChecksum() const { return adler32_checksum_; } +uint32_t OatHeader::GetOriginalChecksum(bool fallback) const { + CHECK(IsValid()); + const char* value = GetStoreValueByKey(OatHeader::kOriginalOatChecksumKey); + if (value != nullptr) { + uint32_t checksum = strtoul(value, nullptr, 0); + if (checksum != 0) { + return checksum; + } + } + return fallback ? adler32_checksum_ : 0; +} + void OatHeader::UpdateChecksum(const void* data, size_t length) { DCHECK(IsValid()); const uint8_t* bytes = reinterpret_cast(data); diff --git a/runtime/oat.h b/runtime/oat.h index 0534b1d2bbf..ee47ed4aceb 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -37,6 +37,10 @@ class PACKED(4) OatHeader { static constexpr const char* kDex2OatHostKey = "dex2oat-host"; static constexpr const char* kPicKey = "pic"; + static constexpr const char* kOriginalOatChecksumKey = "original-oat-checksum"; + static constexpr const char* kXposedOatVersionKey = "xposed-oat-version"; + static constexpr const char* kXposedOatCurrentVersion = "2"; + static OatHeader* Create(InstructionSet instruction_set, const InstructionSetFeatures& instruction_set_features, const std::vector* dex_files, @@ -44,9 +48,13 @@ class PACKED(4) OatHeader { uint32_t image_file_location_oat_data_begin, const SafeMap* variable_data); + static OatHeader* FromFile(const std::string& filename, std::string* error_msg); + bool IsValid() const; + bool IsXposedOatVersionValid() const; const char* GetMagic() const; uint32_t GetChecksum() const; + uint32_t GetOriginalChecksum(bool fallback) const; void UpdateChecksum(const void* data, size_t length); uint32_t GetDexFileCount() const { DCHECK(IsValid()); @@ -107,6 +115,8 @@ class PACKED(4) OatHeader { bool IsPic() const; private: + OatHeader() {} + OatHeader(InstructionSet instruction_set, const InstructionSetFeatures& instruction_set_features, const std::vector* dex_files, diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 8bf7ad4864d..4b76452c8b8 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -143,7 +143,7 @@ OatFile* OatFile::OpenElfFile(File* file, OatFile::OatFile(const std::string& location, bool is_executable) : location_(location), begin_(NULL), end_(NULL), is_executable_(is_executable), - dlopen_handle_(NULL), + is_created_in_zygote_(Runtime::Current() != nullptr && Runtime::Current()->IsZygote()), dlopen_handle_(NULL), secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) { CHECK(!location_.empty()); } @@ -321,6 +321,22 @@ bool OatFile::Setup(std::string* error_msg) { const DexFile::Header* header = reinterpret_cast(dex_file_pointer); const uint32_t* methods_offsets_pointer = reinterpret_cast(oat); + /* + * Samsung has introduced a TypeLookupTable for each dex file in their oat files. + * Its relative offset is stored between the dex_file_offset and the methods_offsets_pointer. + * The lookup tables themselves are stored between the OatDexFiles (which we are currently + * iterating over) and the DexFiles. The OatClasses are stored further down the file. + * + * This means that on Samsung ROMs (with files created by Samsung's adjusted libart-compiler.so), + * methods_offsets_pointer[0] will actually hold the offset of the TypeLookupTable, which + * is lower than the DexFile. If we detect this case, we skip (and ignore) the value and + * adjust methods_offsets_pointer to point to the correct address. + */ + if (methods_offsets_pointer[0] < dex_file_offset) { + oat += sizeof(uint32_t); + methods_offsets_pointer = reinterpret_cast(oat); + } + oat += (sizeof(*methods_offsets_pointer) * header->class_defs_size_); if (UNLIKELY(oat > End())) { *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated " diff --git a/runtime/oat_file.h b/runtime/oat_file.h index 5d92a05a6e5..8e118cbc4e3 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -75,6 +75,10 @@ class OatFile { bool IsPic() const; + bool IsCreatedInZygote() const { + return is_created_in_zygote_; + } + ElfFile* GetElfFile() const { CHECK_NE(reinterpret_cast(elf_file_.get()), reinterpret_cast(nullptr)) << "Cannot get an elf file from " << GetLocation(); @@ -327,6 +331,9 @@ class OatFile { // Was this oat_file loaded executable? const bool is_executable_; + // Was this file loaded in Zygote? + const bool is_created_in_zygote_; + // Backing memory map for oat file during when opened by ElfWriter during initial compilation. std::unique_ptr mem_map_; diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 6fd88147484..b40d9c3584f 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -628,45 +628,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize if (!ParseStringAfterChar(option, '=', &native_bridge_library_filename_)) { return false; } - } else if (StartsWith(option, "-ea") || - StartsWith(option, "-da") || - StartsWith(option, "-enableassertions") || - StartsWith(option, "-disableassertions") || - (option == "--runtime-arg") || - (option == "-esa") || - (option == "-dsa") || - (option == "-enablesystemassertions") || - (option == "-disablesystemassertions") || - (option == "-Xrs") || - StartsWith(option, "-Xint:") || - StartsWith(option, "-Xdexopt:") || - (option == "-Xnoquithandler") || - StartsWith(option, "-Xjniopts:") || - StartsWith(option, "-Xjnigreflimit:") || - (option == "-Xgenregmap") || - (option == "-Xnogenregmap") || - StartsWith(option, "-Xverifyopt:") || - (option == "-Xcheckdexsum") || - (option == "-Xincludeselectedop") || - StartsWith(option, "-Xjitop:") || - (option == "-Xincludeselectedmethod") || - StartsWith(option, "-Xjitthreshold:") || - StartsWith(option, "-Xjitcodecachesize:") || - (option == "-Xjitblocking") || - StartsWith(option, "-Xjitmethod:") || - StartsWith(option, "-Xjitclass:") || - StartsWith(option, "-Xjitoffset:") || - StartsWith(option, "-Xjitconfig:") || - (option == "-Xjitcheckcg") || - (option == "-Xjitverbose") || - (option == "-Xjitprofile") || - (option == "-Xjitdisableopt") || - (option == "-Xjitsuspendpoll") || - StartsWith(option, "-XX:mainThreadStackSize=")) { - // Ignored for backwards compatibility. - } else if (!ignore_unrecognized) { - Usage("Unrecognized option %s\n", option.c_str()); - return false; } } // If not set, background collector type defaults to homogeneous compaction diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 346d0a19336..e2790be7d14 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -100,6 +100,7 @@ Runtime* Runtime::instance_ = NULL; Runtime::Runtime() : instruction_set_(kNone), compiler_callbacks_(nullptr), + is_recompiling_(false), is_zygote_(false), must_relocate_(false), is_concurrent_gc_enabled_(true), @@ -998,6 +999,7 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) { REGISTER(register_org_apache_harmony_dalvik_ddmc_DdmServer); REGISTER(register_org_apache_harmony_dalvik_ddmc_DdmVmInternal); REGISTER(register_sun_misc_Unsafe); + REGISTER(register_samsung_native_methods); #undef REGISTER } diff --git a/runtime/runtime.h b/runtime/runtime.h index 6980e8d1ba8..b02630bff95 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -94,6 +94,14 @@ class Runtime { return compiler_callbacks_ != nullptr; } + bool IsRecompiling() const { + return is_recompiling_; + } + + void SetRecompiling(bool new_value) { + is_recompiling_ = new_value; + } + bool CanRelocate() const { return !IsCompiler() || compiler_callbacks_->IsRelocationPossible(); } @@ -532,6 +540,7 @@ class Runtime { QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType]; CompilerCallbacks* compiler_callbacks_; + bool is_recompiling_; bool is_zygote_; bool must_relocate_; bool is_concurrent_gc_enabled_; diff --git a/runtime/thread.cc b/runtime/thread.cc index 2f474f7ae13..49e5c2665c7 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1595,6 +1595,9 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray( line_number = -1; class_name_object.Assign(method->GetDeclaringClass()->GetName()); // source_name_object intentionally left null for proxy methods + if (method->IsXposedHookedMethod()) { + source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), "")); + } } else { mirror::IntArray* pc_trace = down_cast(method_trace->Get(depth)); uint32_t dex_pc = pc_trace->Get(i); diff --git a/runtime/utils.cc b/runtime/utils.cc index 376575918fa..adc701f96f8 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -385,6 +385,11 @@ std::string PrettyMethod(mirror::ArtMethod* m, bool with_signature) { result = PrettyReturnType(sig_as_string.c_str()) + " " + result + PrettyArguments(sig_as_string.c_str()); } + if (UNLIKELY(m->IsXposedHookedMethod())) { + result += " [XposedHooked]"; + } else if (UNLIKELY(m->IsXposedOriginalMethod())) { + result += " [XposedOriginal]"; + } return result; } @@ -1301,9 +1306,12 @@ std::string DexFilenameToOdexFilename(const std::string& location, const Instruc std::string odex_location(location); InsertIsaDirectory(isa, &odex_location); size_t dot_index = odex_location.size() - 3 - 1; // 3=dex or zip or apk - CHECK_EQ('.', odex_location[dot_index]) << location; - odex_location.resize(dot_index + 1); - CHECK_EQ('.', odex_location[odex_location.size()-1]) << location << " " << odex_location; + if (odex_location[dot_index] == '.') { + odex_location.resize(dot_index + 1); + CHECK_EQ('.', odex_location[odex_location.size()-1]) << location << " " << odex_location; + } else { + odex_location += "."; + } odex_location += "odex"; return odex_location; } @@ -1380,4 +1388,14 @@ std::string PrettyDescriptor(Primitive::Type type) { return PrettyDescriptor(Primitive::Descriptor(type)); } +bool IsSamsungROM() { + static bool checked = false; + static bool result = false; + if (!checked) { + result = OS::FileExists("/system/framework/twframework.jar"); + checked = true; + } + return result; +} + } // namespace art diff --git a/runtime/utils.h b/runtime/utils.h index e6235ad5a3e..c2bf395c463 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -557,6 +557,8 @@ struct FreeDelete { template using UniqueCPtr = std::unique_ptr; +bool IsSamsungROM(); + } // namespace art #endif // ART_RUNTIME_UTILS_H_ diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index ee7bd144bfc..7f332a7662e 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -352,7 +352,7 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, Handle need_precise_constants_(need_precise_constants), has_check_casts_(false), has_virtual_or_interface_invokes_(false), - verify_to_dump_(verify_to_dump) { + verify_to_dump_(verify_to_dump || Runtime::Current()->IsRecompiling()) { Runtime::Current()->AddMethodVerifier(this); DCHECK(class_def != nullptr); } @@ -3383,7 +3383,14 @@ mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instructio mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(), is_range); if (res_method == nullptr) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name(); + if (((method_access_flags_ & kAccConstructor) != 0) && ((method_access_flags_ & kAccStatic) != 0)) { + // Class initializers are never compiled, but always interpreted. + // The interpreter might throw a NPE, so this error can be ignored. + LOG(WARNING) << "Cannot infer method from " << inst->Name() + << " ignored in " << PrettyMethod(dex_method_idx_, *dex_file_, false); + } else { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name(); + } return nullptr; } if (FailOrAbort(this, !res_method->IsDirect(), "Quick-invoked method is direct at ", diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk index b7ff3606fdb..e8793d3c74f 100644 --- a/sigchainlib/Android.mk +++ b/sigchainlib/Android.mk @@ -53,6 +53,9 @@ LOCAL_SRC_FILES := sigchain_dummy.cc LOCAL_MODULE:= libsigchain LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk LOCAL_LDLIBS = -ldl +ifeq ($(strip $(HOST_OS)),darwin) +LOCAL_LDFLAGS += -Wl,-lstdc++ +endif LOCAL_MULTILIB := both include $(BUILD_HOST_SHARED_LIBRARY) diff --git a/test/MyClassNatives/MyClassNatives.java b/test/MyClassNatives/MyClassNatives.java index fab153b6247..8b4a9a4a31b 100644 --- a/test/MyClassNatives/MyClassNatives.java +++ b/test/MyClassNatives/MyClassNatives.java @@ -80,6 +80,7 @@ native void maxParamNumber(Object o0, Object o1, Object o2, Object o3, Object o4 Object o248, Object o249, Object o250, Object o251, Object o252, Object o253); native void withoutImplementation(); + native Object withoutImplementationRefReturn(); native static void stackArgsIntsFirst(int i1, int i2, int i3, int i4, int i5, int i6, int i7, int i8, int i9, int i10, float f1, float f2, float f3, float f4, float f5, float f6,