Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 44 additions & 29 deletions src/hotspot/share/opto/escape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
#include "opto/narrowptrnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/rootnode.hpp"
#include "opto/subtypenode.hpp"
#include "utilities/macros.hpp"

ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
Expand Down Expand Up @@ -506,7 +507,7 @@ bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const {
// I require the 'other' input to be a constant so that I can move the Cmp
// around safely.
bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const {
assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name());
assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN || cmp->Opcode() == Op_SubTypeCheck, "not expected node: %s", cmp->Name());
Node* left = cmp->in(1);
Node* right = cmp->in(2);

Expand Down Expand Up @@ -537,10 +538,10 @@ bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const {
// Check if we are able to untangle the merge. The following patterns are
// supported:
// - Phi -> SafePoints
// - Phi -> CmpP/N
// - Phi -> CmpP/CmpN/SubTypeCheck
// - Phi -> AddP -> Load
// - Phi -> CastPP -> SafePoints
// - Phi -> CastPP -> AddP -> Load
// - Phi -> CastPP/CheckCastPP -> SafePoints
// - Phi -> CastPP/CheckCastPP -> AddP -> Load
bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* use = n->fast_out(i);
Expand All @@ -549,7 +550,7 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);)
return false;
} else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) {
} else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->in(1)->as_Phi(), use->as_SafePoint())) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
return false;
}
Expand All @@ -570,12 +571,12 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
} else if (nesting > 0) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
return false;
} else if (use->is_CastPP()) {
} else if (use->is_CastPP() || use->is_CheckCastPP()) {
const Type* cast_t = _igvn->type(use);
if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
#ifndef PRODUCT
if (TraceReduceAllocationMerges) {
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP/CheckCastPP is not to an instance.", n->_idx, _invocation);
use->dump();
}
#endif
Expand All @@ -585,7 +586,7 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0);
if (!is_trivial_control) {
// If it's not a trivial control then we check if we can reduce the
// CmpP/N used by the If controlling the cast.
// CmpP/N/SubTypeCheck used by the If controlling the cast.
if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
Node* iff = use->in(0)->in(0);
// We may have an OpaqueConstantBool node between If and Bool nodes. But we could also have a sub class of IfNode,
Expand All @@ -594,12 +595,12 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
if (can_reduce) {
Node* iff_cmp = iff->in(1)->in(1);
int opc = iff_cmp->Opcode();
can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp);
can_reduce = (opc == Op_CmpP || opc == Op_CmpN || opc == Op_SubTypeCheck) && can_reduce_cmp(n, iff_cmp);
}
if (!can_reduce) {
#ifndef PRODUCT
if (TraceReduceAllocationMerges) {
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP/CheckCastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
n->dump(5);
}
#endif
Expand All @@ -611,9 +612,9 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
if (!can_reduce_check_users(use, nesting+1)) {
return false;
}
} else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) {
} else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN || use->Opcode() == Op_SubTypeCheck) {
if (!can_reduce_cmp(n, use)) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);)
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N/SubTypeCheck %d isn't reducible.", n->_idx, _invocation, use->_idx);)
return false;
}
} else {
Expand Down Expand Up @@ -653,15 +654,16 @@ bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const {
}

// This method will return a CmpP/N that we need to use on the If controlling a
// CastPP after it was split. This method is only called on bases that are
// nullable therefore we always need a controlling if for the splitted CastPP.
// CastPP/CheckCastPP after it was split. This method is only called on bases
// that are nullable therefore we always need a controlling if for the splitted
// CastPP/CheckCastPP.
//
// 'curr_ctrl' is the control of the CastPP that we want to split through phi.
// If the CastPP currently doesn't have a control then the CmpP/N will be
// against the null constant, otherwise it will be against the constant input of
// the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later
// case because we have constraints on it and because the CastPP has a control
// input.
// 'curr_ctrl' is the control of the CastPP/CheckCastPP that we want to split
// through phi. If the cast currently doesn't have a control then the CmpP/N
// will be against the null constant, otherwise it will be against the constant
// input of the existing CmpP/N/SubTypeCheck. It's guaranteed that there will be
// a CmpP/N/SubTypeCheck in the later case because we have constraints on it and
// because the cast has a control input.
Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
const Type* t = base->bottom_type();
Node* con = nullptr;
Expand All @@ -674,6 +676,16 @@ Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
Node* bol = curr_ctrl->in(0)->in(1);
assert(bol->is_Bool(), "unexpected node %s", bol->Name());
Node* curr_cmp = bol->in(1);
if (curr_cmp->Opcode() == Op_SubTypeCheck) {
// For SubTypeCheck(Phi, SuperKlass), create a new SubTypeCheck that is
// specialized to this particular base input of the Phi but using the
// existing superklass, method, and bci metadata.
int bci = curr_cmp->as_SubTypeCheck()->bci();
ciMethod* method = curr_cmp->as_SubTypeCheck()->method();
Node* superklass = curr_cmp->in(SubTypeCheckNode::SuperKlass);
return new SubTypeCheckNode(_compile, base, superklass, method, bci);
}

assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name());
con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2);
}
Expand Down Expand Up @@ -1006,7 +1018,10 @@ void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) {
Node* ophi_input = ophi->in(i);
Node* res_phi_input = nullptr;

const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other);
// Only CmpP and CmpN nodes are valid for `optimize_ptr_compare()`. If we
// come across a `SubTypeCheck` node, then treat the comparison as UNKNOWN.
const TypeInt* tcmp = cmp->Opcode() == Op_SubTypeCheck ?
TypeInt::CC : optimize_ptr_compare(ophi_input, other);
if (tcmp->singleton()) {
if ((mask == BoolTest::mask::eq && tcmp == TypeInt::CC_EQ) ||
(mask == BoolTest::mask::ne && tcmp == TypeInt::CC_GT)) {
Expand Down Expand Up @@ -1221,10 +1236,10 @@ bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) {
Node* use = ophi->raw_out(i);
if (use->is_SafePoint()) {
safepoints.push(use);
} else if (use->is_CastPP()) {
} else if (use->is_CastPP() || use->is_CheckCastPP()) {
casts.push(use);
} else {
assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left.");
assert(use->outcnt() == 0, "Only CastPP/CheckCastPP & SafePoint users should be left.");
}
}

Expand Down Expand Up @@ -1334,13 +1349,13 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_work

// Copying all users first because some will be removed and others won't.
// Ophi also may acquire some new users as part of Cast reduction.
// CastPPs also need to be processed before CmpPs.
// CastPP/CheckCastPP nodes also need to be processed before CmpP/SubTypeCheck.
Unique_Node_List castpps;
Unique_Node_List others;
for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) {
Node* use = ophi->fast_out(i);

if (use->is_CastPP()) {
if (use->is_CastPP() || use->is_CheckCastPP()) {
castpps.push(use);
} else if (use->is_AddP() || use->is_Cmp()) {
others.push(use);
Expand All @@ -1352,9 +1367,9 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_work

_compile->print_method(PHASE_EA_BEFORE_PHI_REDUCTION, 5, ophi);

// CastPPs need to be processed before Cmps because during the process of
// splitting CastPPs we make reference to the inputs of the Cmp that is used
// by the If controlling the CastPP.
// CastPP/CheckCastPP nodes need to be processed before Cmps because during
// the process of splitting casts we make reference to the inputs of the Cmp
// that is used by the If controlling the CastPP/CheckCastPP.
for (uint i = 0; i < castpps.size(); i++) {
reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist);
_compile->print_method(PHASE_EA_AFTER_PHI_CASTPP_REDUCTION, 6, castpps.at(i));
Expand Down Expand Up @@ -4737,7 +4752,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts.
for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) {
Node* use = phi->fast_out(j);
if (!use->is_SafePoint() && !use->is_CastPP()) {
if (!use->is_SafePoint() && !use->is_CastPP() && !use->is_CheckCastPP()) {
phi->dump(2);
phi->dump(-2);
assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt());
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/opto/memnode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1655,14 +1655,14 @@ static bool stable_phi(PhiNode* phi, PhaseGVN *phase) {
// by 'split_through_phi'. The first use of this method was in EA code as part
// of simplification of allocation merges.
// Some differences from original method (split_through_phi):
// - If base->is_CastPP(): base = base->in(1)
// - If base->is_CastPP() or base->is_CheckCastPP(): base = base->in(1)
bool LoadNode::can_split_through_phi_base(PhaseGVN* phase) {
Node* mem = in(Memory);
Node* address = in(Address);
intptr_t ignore = 0;
Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore);

if (base->is_CastPP()) {
if (base->is_CastPP() || base->is_CheckCastPP()) {
base = base->in(1);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,10 @@ public static void main(String[] args) {
"testString_two_C2",
"testLoadKlassFromCast_C2",
"testLoadKlassFromPhi_C2",
"testReReduce_C2"
"testReReduce_C2",
"testInstanceOfNullMerge_C2",
"testInstanceOfWithBinding_C2",
"testInstanceOfWithoutBinding_C2",
})
public void runner(RunInfo info) {
invocations++;
Expand Down Expand Up @@ -197,6 +200,9 @@ public void runner(RunInfo info) {
Asserts.assertEQ(testLoadKlassFromCast_Interp(cond1), testLoadKlassFromCast_C2(cond1));
Asserts.assertEQ(testLoadKlassFromPhi_Interp(cond1), testLoadKlassFromPhi_C2(cond1));
Asserts.assertEQ(testReReduce_Interp(cond1, x, y), testReReduce_C2(cond1, x, y));
Asserts.assertEQ(testInstanceOfNullMerge_Interp(cond1, x), testInstanceOfNullMerge_C2(cond1, x));
Asserts.assertEQ(testInstanceOfWithBinding_Interp(cond1, x, y), testInstanceOfWithBinding_C2(cond1, x, y));
Asserts.assertEQ(testInstanceOfWithoutBinding_Interp(cond1, x, y), testInstanceOfWithoutBinding_C2(cond1, x, y));

Asserts.assertEQ(testSRAndNSR_Trap_Interp(false, cond1, cond2, x, y),
testSRAndNSR_Trap_C2(info.isTestC2Compiled("testSRAndNSR_Trap_C2"), cond1, cond2, x, y));
Expand Down Expand Up @@ -1380,6 +1386,63 @@ int testReReduce(boolean cond, int x, int y) {
@DontCompile
int testReReduce_Interp(boolean cond1, int x, int y) { return testReReduce(cond1, x, y); }

// -------------------------------------------------------------------------

@ForceInline
int testInstanceOfNullMerge(boolean cond, int x) {
Shape s = null;
if (cond) {
s = new Square(x);
}
if (s instanceof Square sq) {
return sq.l;
}
return 0;
}

@Test
@IR(failOn = { IRNode.ALLOC })
int testInstanceOfNullMerge_C2(boolean cond, int x) { return testInstanceOfNullMerge(cond, x); }

@DontCompile
int testInstanceOfNullMerge_Interp(boolean cond, int x) { return testInstanceOfNullMerge(cond, x); }

// -------------------------------------------------------------------------

@ForceInline
int testInstanceOfWithBinding(boolean cond, int x, int y) {
Shape s = cond ? new Square(x) : new Circle(y);
if (s instanceof Square sq) {
return sq.l;
}
return s.l;
}

@Test
@IR(failOn = { IRNode.ALLOC })
int testInstanceOfWithBinding_C2(boolean cond, int x, int y) { return testInstanceOfWithBinding(cond, x, y); }

@DontCompile
int testInstanceOfWithBinding_Interp(boolean cond, int x, int y) { return testInstanceOfWithBinding(cond, x, y); }

// -------------------------------------------------------------------------

@ForceInline
int testInstanceOfWithoutBinding(boolean cond, int x, int y) {
Shape s = cond ? new Square(x) : new Circle(y);
if (s instanceof Square) {
return s.x;
}
return s.l;
}

@Test
@IR(failOn = { IRNode.ALLOC })
int testInstanceOfWithoutBinding_C2(boolean cond, int x, int y) { return testInstanceOfWithoutBinding(cond, x, y); }

@DontCompile
int testInstanceOfWithoutBinding_Interp(boolean cond, int x, int y) { return testInstanceOfWithoutBinding(cond, x, y); }

// ------------------ Utility for Testing ------------------- //

@DontCompile
Expand Down