diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp index 3527e46c24b7c..d7a57d072816f 100644 --- a/src/hotspot/share/opto/callnode.cpp +++ b/src/hotspot/share/opto/callnode.cpp @@ -918,7 +918,7 @@ Node *CallNode::result_cast() { } -void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) { +void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) const { projs->fallthrough_proj = nullptr; projs->fallthrough_catchproj = nullptr; projs->fallthrough_ioproj = nullptr; @@ -1303,6 +1303,57 @@ void CallLeafVectorNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_ //============================================================================= +bool CallLeafPureNode::is_unused() const { + return proj_out_or_null(TypeFunc::Parms) == nullptr; +} + +bool CallLeafPureNode::is_dead() const { + return proj_out_or_null(TypeFunc::Control) == nullptr; +} + +/* We make a tuple of the global input state + TOP for the output values. + * We use this to delete a pure function that is not used: by replacing the call with + * such a tuple, we let output Proj's idealization pick the corresponding input of the + * pure call, so jumping over it, and effectively, removing the call from the graph. + * This avoids doing the graph surgery manually, but leaves that to IGVN + * that is specialized for doing that right. We need also tuple components for output + * values of the function to respect the return arity, and in case there is a projection + * that would pick an output (which shouldn't happen at the moment). + */ +TupleNode* CallLeafPureNode::make_tuple_of_input_state_and_top_return_values(const Compile* C) const { + // Transparently propagate input state but parameters + TupleNode* tuple = TupleNode::make( + tf()->range(), + in(TypeFunc::Control), + in(TypeFunc::I_O), + in(TypeFunc::Memory), + in(TypeFunc::FramePtr), + in(TypeFunc::ReturnAdr)); + + // And add TOPs for the return values + for (uint i = TypeFunc::Parms; i < tf()->range()->cnt(); i++) { + tuple->set_req(i, C->top()); + } + + return tuple; +} + +Node* CallLeafPureNode::Ideal(PhaseGVN* phase, bool can_reshape) { + if (is_dead()) { + return nullptr; + } + + // We need to wait until IGVN because during parsing, usages might still be missing + // and we would remove the call immediately. + if (can_reshape && is_unused()) { + // The result is not used. We remove the call by replacing it with a tuple, that + // is later disintegrated by the projections. + return make_tuple_of_input_state_and_top_return_values(phase->C); + } + + return CallRuntimeNode::Ideal(phase, can_reshape); +} + #ifndef PRODUCT void CallLeafNode::dump_spec(outputStream *st) const { st->print("# "); diff --git a/src/hotspot/share/opto/callnode.hpp b/src/hotspot/share/opto/callnode.hpp index db857d4c6d1a6..f918ce0590289 100644 --- a/src/hotspot/share/opto/callnode.hpp +++ b/src/hotspot/share/opto/callnode.hpp @@ -737,7 +737,7 @@ class CallNode : public SafePointNode { // Collect all the interesting edges from a call for use in // replacing the call by something else. Used by macro expansion // and the late inlining support. - void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true); + void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true) const; virtual uint match_edge(uint idx) const; @@ -913,6 +913,33 @@ class CallLeafNode : public CallRuntimeNode { #endif }; +/* A pure function call, they are assumed not to be safepoints, not to read or write memory, + * have no exception... They just take parameters, return a value without side effect. It is + * always correct to create some, or remove them, if the result is not used. + * + * They still have control input to allow easy lowering into other kind of calls that require + * a control, but this is more a technical than a moral constraint. + * + * Pure calls must have only control and data input and output: I/O, Memory and so on must be top. + * Nevertheless, pure calls can typically be expensive math operations so care must be taken + * when letting the node float. + */ +class CallLeafPureNode : public CallLeafNode { +protected: + bool is_unused() const; + bool is_dead() const; + TupleNode* make_tuple_of_input_state_and_top_return_values(const Compile* C) const; + +public: + CallLeafPureNode(const TypeFunc* tf, address addr, const char* name, + const TypePtr* adr_type) + : CallLeafNode(tf, addr, name, adr_type) { + init_class_id(Class_CallLeafPure); + } + int Opcode() const override; + Node* Ideal(PhaseGVN* phase, bool can_reshape) override; +}; + //------------------------------CallLeafNoFPNode------------------------------- // CallLeafNode, not using floating point or using it in the same manner as // the generated code diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp index bc259eed2d101..587d5fad8f29e 100644 --- a/src/hotspot/share/opto/classes.hpp +++ b/src/hotspot/share/opto/classes.hpp @@ -61,6 +61,7 @@ macro(CallDynamicJava) macro(CallJava) macro(CallLeaf) macro(CallLeafNoFP) +macro(CallLeafPure) macro(CallLeafVector) macro(CallRuntime) macro(CallStaticJava) @@ -372,6 +373,7 @@ macro(SubI) macro(SubL) macro(TailCall) macro(TailJump) +macro(Tuple) macro(MacroLogicV) macro(ThreadLocal) macro(Unlock) diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index a5023408cdfe3..bef9d17e58a06 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -3298,6 +3298,25 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f case Op_Opaque1: // Remove Opaque Nodes before matching n->subsume_by(n->in(1), this); break; + case Op_CallLeafPure: { + // If the pure call is not supported, then lower to a CallLeaf. + if (!Matcher::match_rule_supported(Op_CallLeafPure)) { + CallNode* call = n->as_Call(); + CallNode* new_call = new CallLeafNode(call->tf(), call->entry_point(), + call->_name, TypeRawPtr::BOTTOM); + new_call->init_req(TypeFunc::Control, call->in(TypeFunc::Control)); + new_call->init_req(TypeFunc::I_O, C->top()); + new_call->init_req(TypeFunc::Memory, C->top()); + new_call->init_req(TypeFunc::ReturnAdr, C->top()); + new_call->init_req(TypeFunc::FramePtr, C->top()); + for (unsigned int i = TypeFunc::Parms; i < call->tf()->domain()->cnt(); i++) { + new_call->init_req(i, call->in(i)); + } + n->subsume_by(new_call, this); + } + frc.inc_call_count(); + break; + } case Op_CallStaticJava: case Op_CallJava: case Op_CallDynamicJava: diff --git a/src/hotspot/share/opto/divnode.cpp b/src/hotspot/share/opto/divnode.cpp index a70194274a793..e23754f01dc49 100644 --- a/src/hotspot/share/opto/divnode.cpp +++ b/src/hotspot/share/opto/divnode.cpp @@ -42,19 +42,19 @@ #include -ModFloatingNode::ModFloatingNode(Compile* C, const TypeFunc* tf, const char* name) : CallLeafNode(tf, nullptr, name, TypeRawPtr::BOTTOM) { +ModFloatingNode::ModFloatingNode(Compile* C, const TypeFunc* tf, address addr, const char* name) : CallLeafPureNode(tf, addr, name, TypeRawPtr::BOTTOM) { add_flag(Flag_is_macro); C->add_macro_node(this); } -ModDNode::ModDNode(Compile* C, Node* a, Node* b) : ModFloatingNode(C, OptoRuntime::Math_DD_D_Type(), "drem") { +ModDNode::ModDNode(Compile* C, Node* a, Node* b) : ModFloatingNode(C, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::drem), "drem") { init_req(TypeFunc::Parms + 0, a); init_req(TypeFunc::Parms + 1, C->top()); init_req(TypeFunc::Parms + 2, b); init_req(TypeFunc::Parms + 3, C->top()); } -ModFNode::ModFNode(Compile* C, Node* a, Node* b) : ModFloatingNode(C, OptoRuntime::modf_Type(), "frem") { +ModFNode::ModFNode(Compile* C, Node* a, Node* b) : ModFloatingNode(C, OptoRuntime::modf_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::frem), "frem") { init_req(TypeFunc::Parms + 0, a); init_req(TypeFunc::Parms + 1, b); } @@ -1517,16 +1517,16 @@ const Type* UModLNode::Value(PhaseGVN* phase) const { } Node* ModFNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* super = CallLeafPureNode::Ideal(phase, can_reshape); + if (super != nullptr) { + return super; + } + if (!can_reshape) { return nullptr; } - PhaseIterGVN* igvn = phase->is_IterGVN(); - bool result_is_unused = proj_out_or_null(TypeFunc::Parms) == nullptr; - bool not_dead = proj_out_or_null(TypeFunc::Control) != nullptr; - if (result_is_unused && not_dead) { - return replace_with_con(igvn, TypeF::make(0.)); - } + PhaseIterGVN* igvn = phase->is_IterGVN(); // Either input is TOP ==> the result is TOP const Type* t1 = phase->type(dividend()); @@ -1569,16 +1569,16 @@ Node* ModFNode::Ideal(PhaseGVN* phase, bool can_reshape) { } Node* ModDNode::Ideal(PhaseGVN* phase, bool can_reshape) { + Node* super = CallLeafPureNode::Ideal(phase, can_reshape); + if (super != nullptr) { + return super; + } + if (!can_reshape) { return nullptr; } - PhaseIterGVN* igvn = phase->is_IterGVN(); - bool result_is_unused = proj_out_or_null(TypeFunc::Parms) == nullptr; - bool not_dead = proj_out_or_null(TypeFunc::Control) != nullptr; - if (result_is_unused && not_dead) { - return replace_with_con(igvn, TypeD::make(0.)); - } + PhaseIterGVN* igvn = phase->is_IterGVN(); // Either input is TOP ==> the result is TOP const Type* t1 = phase->type(dividend()); @@ -1626,20 +1626,6 @@ Node* ModFloatingNode::replace_with_con(PhaseIterGVN* phase, const Type* con) { CallProjections projs; extract_projections(&projs, false, false); phase->replace_node(projs.fallthrough_proj, in(TypeFunc::Control)); - if (projs.fallthrough_catchproj != nullptr) { - phase->replace_node(projs.fallthrough_catchproj, in(TypeFunc::Control)); - } - if (projs.fallthrough_memproj != nullptr) { - phase->replace_node(projs.fallthrough_memproj, in(TypeFunc::Memory)); - } - if (projs.catchall_memproj != nullptr) { - phase->replace_node(projs.catchall_memproj, C->top()); - } - if (projs.fallthrough_ioproj != nullptr) { - phase->replace_node(projs.fallthrough_ioproj, in(TypeFunc::I_O)); - } - assert(projs.catchall_ioproj == nullptr, "no exceptions from floating mod"); - assert(projs.catchall_catchproj == nullptr, "no exceptions from floating mod"); if (projs.resproj != nullptr) { phase->replace_node(projs.resproj, con_node); } diff --git a/src/hotspot/share/opto/divnode.hpp b/src/hotspot/share/opto/divnode.hpp index 127e2431b0b3b..c217cd94079e8 100644 --- a/src/hotspot/share/opto/divnode.hpp +++ b/src/hotspot/share/opto/divnode.hpp @@ -156,12 +156,12 @@ class ModLNode : public Node { }; // Base class for float and double modulus -class ModFloatingNode : public CallLeafNode { +class ModFloatingNode : public CallLeafPureNode { protected: Node* replace_with_con(PhaseIterGVN* phase, const Type* con); public: - ModFloatingNode(Compile* C, const TypeFunc* tf, const char *name); + ModFloatingNode(Compile* C, const TypeFunc* tf, address addr, const char* name); }; // Float Modulus diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp index 20feca26ede55..9f99ad1d0e0df 100644 --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -1880,14 +1880,20 @@ Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_p // after the call, if this call has restricted memory effects. Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) { // Set fixed predefined input arguments - Node* memory = reset_memory(); - Node* m = narrow_mem == nullptr ? memory : narrow_mem; - call->init_req( TypeFunc::Control, control() ); - call->init_req( TypeFunc::I_O, top() ); // does no i/o - call->init_req( TypeFunc::Memory, m ); // may gc ptrs - call->init_req( TypeFunc::FramePtr, frameptr() ); - call->init_req( TypeFunc::ReturnAdr, top() ); - return memory; + call->init_req(TypeFunc::Control, control()); + call->init_req(TypeFunc::I_O, top()); // does no i/o + call->init_req(TypeFunc::ReturnAdr, top()); + if (call->is_CallLeafPure()) { + call->init_req(TypeFunc::Memory, top()); + call->init_req(TypeFunc::FramePtr, top()); + return nullptr; + } else { + Node* memory = reset_memory(); + Node* m = narrow_mem == nullptr ? memory : narrow_mem; + call->init_req(TypeFunc::Memory, m); // may gc ptrs + call->init_req(TypeFunc::FramePtr, frameptr()); + return memory; + } } //-------------------set_predefined_output_for_runtime_call-------------------- @@ -1905,6 +1911,11 @@ void GraphKit::set_predefined_output_for_runtime_call(Node* call, const TypePtr* hook_mem) { // no i/o set_control(_gvn.transform( new ProjNode(call,TypeFunc::Control) )); + if (call->is_CallLeafPure()) { + // Pure function have only control (for now) and data output, in particular + // the don't touch the memory, so we don't want a memory proj that is set after. + return; + } if (keep_mem) { // First clone the existing memory state set_all_memory(keep_mem); @@ -2491,6 +2502,8 @@ Node* GraphKit::make_runtime_call(int flags, } else if (flags & RC_VECTOR){ uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte; call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits); + } else if (flags & RC_PURE) { + call = new CallLeafPureNode(call_type, call_addr, call_name, adr_type); } else { call = new CallLeafNode(call_type, call_addr, call_name, adr_type); } diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp index 28773d75333ec..806a211d7e254 100644 --- a/src/hotspot/share/opto/graphKit.hpp +++ b/src/hotspot/share/opto/graphKit.hpp @@ -784,6 +784,7 @@ class GraphKit : public Phase { RC_NARROW_MEM = 16, // input memory is same as output RC_UNCOMMON = 32, // freq. expected to be like uncommon trap RC_VECTOR = 64, // CallLeafVectorNode + RC_PURE = 128, // CallLeaf is pure RC_LEAF = 0 // null value: no flags set }; diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index 29f737bce0824..04b5dbd700a6a 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -1797,7 +1797,7 @@ bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, c Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? argument(2) : nullptr; const TypePtr* no_memory_effects = nullptr; - Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName, + Node* trig = make_runtime_call(RC_LEAF | RC_PURE, call_type, funcAddr, funcName, no_memory_effects, a, top(), b, b ? top() : nullptr); Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0)); diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index 8f21ee13e7913..26e1c30b8ad40 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -2597,17 +2597,14 @@ bool PhaseMacroExpand::expand_macro_nodes() { switch (n->Opcode()) { case Op_ModD: case Op_ModF: { - bool is_drem = n->Opcode() == Op_ModD; CallNode* mod_macro = n->as_Call(); - CallNode* call = new CallLeafNode(mod_macro->tf(), - is_drem ? CAST_FROM_FN_PTR(address, SharedRuntime::drem) - : CAST_FROM_FN_PTR(address, SharedRuntime::frem), - is_drem ? "drem" : "frem", TypeRawPtr::BOTTOM); + CallNode* call = new CallLeafPureNode(mod_macro->tf(), mod_macro->entry_point(), + mod_macro->_name, TypeRawPtr::BOTTOM); call->init_req(TypeFunc::Control, mod_macro->in(TypeFunc::Control)); - call->init_req(TypeFunc::I_O, mod_macro->in(TypeFunc::I_O)); - call->init_req(TypeFunc::Memory, mod_macro->in(TypeFunc::Memory)); - call->init_req(TypeFunc::ReturnAdr, mod_macro->in(TypeFunc::ReturnAdr)); - call->init_req(TypeFunc::FramePtr, mod_macro->in(TypeFunc::FramePtr)); + call->init_req(TypeFunc::I_O, C->top()); + call->init_req(TypeFunc::Memory, C->top()); + call->init_req(TypeFunc::ReturnAdr, C->top()); + call->init_req(TypeFunc::FramePtr, C->top()); for (unsigned int i = 0; i < mod_macro->tf()->domain()->cnt() - TypeFunc::Parms; i++) { call->init_req(TypeFunc::Parms + i, mod_macro->in(TypeFunc::Parms + i)); } diff --git a/src/hotspot/share/opto/multnode.cpp b/src/hotspot/share/opto/multnode.cpp index 736e84315eee1..f429d5daac076 100644 --- a/src/hotspot/share/opto/multnode.cpp +++ b/src/hotspot/share/opto/multnode.cpp @@ -120,6 +120,10 @@ const TypePtr *ProjNode::adr_type() const { if (bottom_type() == Type::MEMORY) { // in(0) might be a narrow MemBar; otherwise we will report TypePtr::BOTTOM Node* ctrl = in(0); + if (ctrl->Opcode() == Op_Tuple) { + // Jumping over Tuples: the i-th projection of a Tuple is the i-th input of the Tuple. + ctrl = ctrl->in(_con); + } if (ctrl == nullptr) return nullptr; // node is dead const TypePtr* adr_type = ctrl->adr_type(); #ifdef ASSERT @@ -163,6 +167,15 @@ void ProjNode::check_con() const { assert(_con < t->is_tuple()->cnt(), "ProjNode::_con must be in range"); } +//------------------------------Identity--------------------------------------- +Node* ProjNode::Identity(PhaseGVN* phase) { + if (in(0) != nullptr && in(0)->Opcode() == Op_Tuple) { + // Jumping over Tuples: the i-th projection of a Tuple is the i-th input of the Tuple. + return in(0)->in(_con); + } + return this; +} + //------------------------------Value------------------------------------------ const Type* ProjNode::Value(PhaseGVN* phase) const { if (in(0) == nullptr) return Type::TOP; diff --git a/src/hotspot/share/opto/multnode.hpp b/src/hotspot/share/opto/multnode.hpp index dff2caed38d16..834dcfdca6dee 100644 --- a/src/hotspot/share/opto/multnode.hpp +++ b/src/hotspot/share/opto/multnode.hpp @@ -82,6 +82,7 @@ class ProjNode : public Node { virtual const Type *bottom_type() const; virtual const TypePtr *adr_type() const; virtual bool pinned() const; + virtual Node* Identity(PhaseGVN* phase); virtual const Type* Value(PhaseGVN* phase) const; virtual uint ideal_reg() const; virtual const RegMask &out_RegMask() const; @@ -105,4 +106,49 @@ class ProjNode : public Node { ProjNode* other_if_proj() const; }; +/* Tuples are used to avoid manual graph surgery. When a node with Proj outputs (such as a call) + * must be removed and its ouputs replaced by its input, or some other value, we can make its + * ::Ideal return a tuple of what we want for each output: the ::Identity of output Proj will + * take care to jump over the Tuple and directly pick up the right input of the Tuple. + * + * For instance, if a function call is proven to have no side effect and return the constant 0, + * we can replace it with the 6-tuple: + * (control input, IO input, memory input, frame ptr input, return addr input, Con:0) + * all the output projections will pick up the input of the now gone call, except for the result + * projection that is replaced by 0. + * + * Using TupleNode avoid manual graph surgery and leave that to our expert surgeon: IGVN. + * Since the user of a Tuple are expected to be Proj, when creating a tuple during idealization, + * the output Proj should be enqueued for IGVN immediately after, and the tuple should not survive + * after the current IGVN. + */ +class TupleNode : public MultiNode { + const TypeTuple* _tf; + + template + static void make_helper(TupleNode* tn, uint i, Node* node, NN... nn) { + tn->set_req(i, node); + make_helper(tn, i + 1, nn...); + } + + static void make_helper(TupleNode*, uint) {} + +public: + TupleNode(const TypeTuple* tf) : MultiNode(tf->cnt()), _tf(tf) {} + + int Opcode() const override; + const Type* bottom_type() const override { return _tf; } + + /* Give as many `Node*` as you want in the `nn` pack: + * TupleNode::make(tf, input1) + * TupleNode::make(tf, input1, input2, input3, input4) + */ + template + static TupleNode* make(const TypeTuple* tf, NN... nn) { + TupleNode* tn = new TupleNode(tf); + make_helper(tn, 0, nn...); + return tn; + } +}; + #endif // SHARE_OPTO_MULTNODE_HPP diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp index 7633f68ea1ed3..e4eb7c76ec922 100644 --- a/src/hotspot/share/opto/node.cpp +++ b/src/hotspot/share/opto/node.cpp @@ -2947,23 +2947,13 @@ bool Node::is_dead_loop_safe() const { bool Node::is_div_or_mod(BasicType bt) const { return Opcode() == Op_Div(bt) || Opcode() == Op_Mod(bt) || Opcode() == Op_UDiv(bt) || Opcode() == Op_UMod(bt); } -bool Node::is_pure_function() const { - switch (Opcode()) { - case Op_ModD: - case Op_ModF: - return true; - default: - return false; - } -} - // `maybe_pure_function` is assumed to be the input of `this`. This is a bit redundant, // but we already have and need maybe_pure_function in all the call sites, so // it makes it obvious that the `maybe_pure_function` is the same node as in the caller, // while it takes more thinking to realize that a locally computed in(0) must be equal to // the local in the caller. bool Node::is_data_proj_of_pure_function(const Node* maybe_pure_function) const { - return Opcode() == Op_Proj && as_Proj()->_con == TypeFunc::Parms && maybe_pure_function->is_pure_function(); + return Opcode() == Op_Proj && as_Proj()->_con == TypeFunc::Parms && maybe_pure_function->is_CallLeafPure(); } //============================================================================= diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp index 843baf48cf8ae..6f44950434736 100644 --- a/src/hotspot/share/opto/node.hpp +++ b/src/hotspot/share/opto/node.hpp @@ -54,6 +54,7 @@ class CallDynamicJavaNode; class CallJavaNode; class CallLeafNode; class CallLeafNoFPNode; +class CallLeafPureNode; class CallNode; class CallRuntimeNode; class CallStaticJavaNode; @@ -673,6 +674,7 @@ class Node { DEFINE_CLASS_ID(CallRuntime, Call, 1) DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) DEFINE_CLASS_ID(CallLeafNoFP, CallLeaf, 0) + DEFINE_CLASS_ID(CallLeafPure, CallLeaf, 1) DEFINE_CLASS_ID(Allocate, Call, 2) DEFINE_CLASS_ID(AllocateArray, Allocate, 0) DEFINE_CLASS_ID(AbstractLock, Call, 3) @@ -907,6 +909,7 @@ class Node { DEFINE_CLASS_QUERY(CallJava) DEFINE_CLASS_QUERY(CallLeaf) DEFINE_CLASS_QUERY(CallLeafNoFP) + DEFINE_CLASS_QUERY(CallLeafPure) DEFINE_CLASS_QUERY(CallRuntime) DEFINE_CLASS_QUERY(CallStaticJava) DEFINE_CLASS_QUERY(Catch) @@ -1289,8 +1292,6 @@ class Node { bool is_div_or_mod(BasicType bt) const; - bool is_pure_function() const; - bool is_data_proj_of_pure_function(const Node* maybe_pure_function) const; //----------------- Printing, etc diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index 1a4c3c91c4f08..04b6e49b620c6 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -1097,11 +1097,11 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, Node* Parse::floating_point_mod(Node* a, Node* b, BasicType type) { assert(type == BasicType::T_FLOAT || type == BasicType::T_DOUBLE, "only float and double are floating points"); - CallNode* mod = type == BasicType::T_DOUBLE ? static_cast(new ModDNode(C, a, b)) : new ModFNode(C, a, b); + CallLeafPureNode* mod = type == BasicType::T_DOUBLE ? static_cast(new ModDNode(C, a, b)) : new ModFNode(C, a, b); - Node* prev_mem = set_predefined_input_for_runtime_call(mod); - mod = _gvn.transform(mod)->as_Call(); - set_predefined_output_for_runtime_call(mod, prev_mem, TypeRawPtr::BOTTOM); + set_predefined_input_for_runtime_call(mod); + mod = _gvn.transform(mod)->as_CallLeafPure(); + set_predefined_output_for_runtime_call(mod); Node* result = _gvn.transform(new ProjNode(mod, TypeFunc::Parms + 0)); record_for_igvn(mod); return result;