1 /*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/arraycopynode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/compile.hpp"
34 #include "opto/connode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/loopnode.hpp"
37 #include "opto/machnode.hpp"
38 #include "opto/matcher.hpp"
39 #include "opto/memnode.hpp"
40 #include "opto/mulnode.hpp"
41 #include "opto/narrowptrnode.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/regmask.hpp"
44 #include "utilities/copy.hpp"
45
46 // Portions of code courtesy of Clifford Click
47
48 // Optimization - Graph Style
49
50 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
51
52 //=============================================================================
53 uint MemNode::size_of() const { return sizeof(*this); }
54
55 const TypePtr *MemNode::adr_type() const {
56 Node* adr = in(Address);
57 if (adr == NULL) return NULL; // node is dead
58 const TypePtr* cross_check = NULL;
59 DEBUG_ONLY(cross_check = _adr_type);
60 return calculate_adr_type(adr->bottom_type(), cross_check);
61 }
62
63 #ifndef PRODUCT
64 void MemNode::dump_spec(outputStream *st) const {
65 if (in(Address) == NULL) return; // node is dead
66 #ifndef ASSERT
67 // fake the missing field
68 const TypePtr* _adr_type = NULL;
69 if (in(Address) != NULL)
70 _adr_type = in(Address)->bottom_type()->isa_ptr();
71 #endif
72 dump_adr_type(this, _adr_type, st);
73
74 Compile* C = Compile::current();
75 if( C->alias_type(_adr_type)->is_volatile() )
76 st->print(" Volatile!");
77 }
78
79 void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
80 st->print(" @");
81 if (adr_type == NULL) {
82 st->print("NULL");
83 } else {
84 adr_type->dump_on(st);
85 Compile* C = Compile::current();
86 Compile::AliasType* atp = NULL;
87 if (C->have_alias_type(adr_type)) atp = C->alias_type(adr_type);
88 if (atp == NULL)
89 st->print(", idx=?\?;");
90 else if (atp->index() == Compile::AliasIdxBot)
91 st->print(", idx=Bot;");
92 else if (atp->index() == Compile::AliasIdxTop)
93 st->print(", idx=Top;");
94 else if (atp->index() == Compile::AliasIdxRaw)
95 st->print(", idx=Raw;");
96 else {
97 ciField* field = atp->field();
98 if (field) {
99 st->print(", name=");
100 field->print_name_on(st);
101 }
102 st->print(", idx=%d;", atp->index());
103 }
104 }
105 }
106
107 extern void print_alias_types();
108
109 #endif
110
111 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
112 assert((t_oop != NULL), "sanity");
113 bool is_instance = t_oop->is_known_instance_field();
114 bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
115 (load != NULL) && load->is_Load() &&
116 (phase->is_IterGVN() != NULL);
117 if (!(is_instance || is_boxed_value_load))
118 return mchain; // don't try to optimize non-instance types
119 uint instance_id = t_oop->instance_id();
120 Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
121 Node *prev = NULL;
122 Node *result = mchain;
123 while (prev != result) {
124 prev = result;
125 if (result == start_mem)
126 break; // hit one of our sentinels
127 // skip over a call which does not affect this memory slice
128 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
129 Node *proj_in = result->in(0);
130 if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
131 break; // hit one of our sentinels
132 } else if (proj_in->is_Call()) {
133 // ArrayCopyNodes processed here as well
134 CallNode *call = proj_in->as_Call();
135 if (!call->may_modify(t_oop, phase)) { // returns false for instances
136 result = call->in(TypeFunc::Memory);
137 }
138 } else if (proj_in->is_Initialize()) {
139 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
140 // Stop if this is the initialization for the object instance which
141 // contains this memory slice, otherwise skip over it.
142 if ((alloc == NULL) || (alloc->_idx == instance_id)) {
143 break;
144 }
145 if (is_instance) {
146 result = proj_in->in(TypeFunc::Memory);
147 } else if (is_boxed_value_load) {
148 Node* klass = alloc->in(AllocateNode::KlassNode);
149 const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
150 if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
151 result = proj_in->in(TypeFunc::Memory); // not related allocation
152 }
153 }
154 } else if (proj_in->is_MemBar()) {
155 if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase)) {
156 break;
157 }
158 result = proj_in->in(TypeFunc::Memory);
159 } else {
160 assert(false, "unexpected projection");
161 }
162 } else if (result->is_ClearArray()) {
163 if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
164 // Can not bypass initialization of the instance
165 // we are looking for.
166 break;
167 }
168 // Otherwise skip it (the call updated 'result' value).
169 } else if (result->is_MergeMem()) {
170 result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
171 }
172 }
173 return result;
174 }
175
176 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
177 const TypeOopPtr* t_oop = t_adr->isa_oopptr();
178 if (t_oop == NULL)
179 return mchain; // don't try to optimize non-oop types
180 Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
181 bool is_instance = t_oop->is_known_instance_field();
182 PhaseIterGVN *igvn = phase->is_IterGVN();
183 if (is_instance && igvn != NULL && result->is_Phi()) {
184 PhiNode *mphi = result->as_Phi();
185 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
186 const TypePtr *t = mphi->adr_type();
187 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
188 t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
189 t->is_oopptr()->cast_to_exactness(true)
190 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
191 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) {
192 // clone the Phi with our address type
193 result = mphi->split_out_instance(t_adr, igvn);
194 } else {
195 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
196 }
197 }
198 return result;
199 }
200
201 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
202 uint alias_idx = phase->C->get_alias_index(tp);
203 Node *mem = mmem;
204 #ifdef ASSERT
205 {
206 // Check that current type is consistent with the alias index used during graph construction
207 assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
208 bool consistent = adr_check == NULL || adr_check->empty() ||
209 phase->C->must_alias(adr_check, alias_idx );
210 // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
211 if( !consistent && adr_check != NULL && !adr_check->empty() &&
212 tp->isa_aryptr() && tp->offset() == Type::OffsetBot &&
213 adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
214 ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
215 adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
216 adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
217 // don't assert if it is dead code.
218 consistent = true;
219 }
220 if( !consistent ) {
221 st->print("alias_idx==%d, adr_check==", alias_idx);
222 if( adr_check == NULL ) {
223 st->print("NULL");
224 } else {
225 adr_check->dump();
226 }
227 st->cr();
228 print_alias_types();
229 assert(consistent, "adr_check must match alias idx");
230 }
231 }
232 #endif
233 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
234 // means an array I have not precisely typed yet. Do not do any
235 // alias stuff with it any time soon.
236 const TypeOopPtr *toop = tp->isa_oopptr();
237 if( tp->base() != Type::AnyPtr &&
238 !(toop &&
239 toop->klass() != NULL &&
240 toop->klass()->is_java_lang_Object() &&
241 toop->offset() == Type::OffsetBot) ) {
242 // compress paths and change unreachable cycles to TOP
243 // If not, we can update the input infinitely along a MergeMem cycle
244 // Equivalent code in PhiNode::Ideal
245 Node* m = phase->transform(mmem);
246 // If transformed to a MergeMem, get the desired slice
247 // Otherwise the returned node represents memory for every slice
248 mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m;
249 // Update input if it is progress over what we have now
250 }
251 return mem;
252 }
253
254 //--------------------------Ideal_common---------------------------------------
255 // Look for degenerate control and memory inputs. Bypass MergeMem inputs.
256 // Unhook non-raw memories from complete (macro-expanded) initializations.
257 Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
258 // If our control input is a dead region, kill all below the region
259 Node *ctl = in(MemNode::Control);
260 if (ctl && remove_dead_region(phase, can_reshape))
261 return this;
262 ctl = in(MemNode::Control);
263 // Don't bother trying to transform a dead node
264 if (ctl && ctl->is_top()) return NodeSentinel;
265
266 PhaseIterGVN *igvn = phase->is_IterGVN();
267 // Wait if control on the worklist.
268 if (ctl && can_reshape && igvn != NULL) {
269 Node* bol = NULL;
270 Node* cmp = NULL;
271 if (ctl->in(0)->is_If()) {
272 assert(ctl->is_IfTrue() || ctl->is_IfFalse(), "sanity");
273 bol = ctl->in(0)->in(1);
274 if (bol->is_Bool())
275 cmp = ctl->in(0)->in(1)->in(1);
276 }
277 if (igvn->_worklist.member(ctl) ||
278 (bol != NULL && igvn->_worklist.member(bol)) ||
279 (cmp != NULL && igvn->_worklist.member(cmp)) ) {
280 // This control path may be dead.
281 // Delay this memory node transformation until the control is processed.
282 phase->is_IterGVN()->_worklist.push(this);
283 return NodeSentinel; // caller will return NULL
284 }
285 }
286 // Ignore if memory is dead, or self-loop
287 Node *mem = in(MemNode::Memory);
288 if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
289 assert(mem != this, "dead loop in MemNode::Ideal");
290
291 if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
292 // This memory slice may be dead.
293 // Delay this mem node transformation until the memory is processed.
294 phase->is_IterGVN()->_worklist.push(this);
295 return NodeSentinel; // caller will return NULL
296 }
297
298 Node *address = in(MemNode::Address);
299 const Type *t_adr = phase->type(address);
300 if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL
301
302 if (can_reshape && igvn != NULL &&
303 (igvn->_worklist.member(address) ||
304 igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) {
305 // The address's base and type may change when the address is processed.
306 // Delay this mem node transformation until the address is processed.
307 phase->is_IterGVN()->_worklist.push(this);
308 return NodeSentinel; // caller will return NULL
309 }
310
311 // Do NOT remove or optimize the next lines: ensure a new alias index
312 // is allocated for an oop pointer type before Escape Analysis.
313 // Note: C++ will not remove it since the call has side effect.
314 if (t_adr->isa_oopptr()) {
315 int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
316 }
317
318 Node* base = NULL;
319 if (address->is_AddP()) {
320 base = address->in(AddPNode::Base);
321 }
322 if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
323 !t_adr->isa_rawptr()) {
324 // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
325 // Skip this node optimization if its address has TOP base.
326 return NodeSentinel; // caller will return NULL
327 }
328
329 // Avoid independent memory operations
330 Node* old_mem = mem;
331
332 // The code which unhooks non-raw memories from complete (macro-expanded)
333 // initializations was removed. After macro-expansion all stores catched
334 // by Initialize node became raw stores and there is no information
335 // which memory slices they modify. So it is unsafe to move any memory
336 // operation above these stores. Also in most cases hooked non-raw memories
337 // were already unhooked by using information from detect_ptr_independence()
338 // and find_previous_store().
339
340 if (mem->is_MergeMem()) {
341 MergeMemNode* mmem = mem->as_MergeMem();
342 const TypePtr *tp = t_adr->is_ptr();
343
344 mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty);
345 }
346
347 if (mem != old_mem) {
348 set_req(MemNode::Memory, mem);
349 if (can_reshape && old_mem->outcnt() == 0) {
350 igvn->_worklist.push(old_mem);
351 }
352 if (phase->type( mem ) == Type::TOP) return NodeSentinel;
353 return this;
354 }
355
356 // let the subclass continue analyzing...
357 return NULL;
358 }
359
360 // Helper function for proving some simple control dominations.
361 // Attempt to prove that all control inputs of 'dom' dominate 'sub'.
362 // Already assumes that 'dom' is available at 'sub', and that 'sub'
363 // is not a constant (dominated by the method's StartNode).
364 // Used by MemNode::find_previous_store to prove that the
365 // control input of a memory operation predates (dominates)
366 // an allocation it wants to look past.
367 bool MemNode::all_controls_dominate(Node* dom, Node* sub) {
368 if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top())
369 return false; // Conservative answer for dead code
370
371 // Check 'dom'. Skip Proj and CatchProj nodes.
372 dom = dom->find_exact_control(dom);
373 if (dom == NULL || dom->is_top())
374 return false; // Conservative answer for dead code
375
376 if (dom == sub) {
377 // For the case when, for example, 'sub' is Initialize and the original
378 // 'dom' is Proj node of the 'sub'.
379 return false;
380 }
381
382 if (dom->is_Con() || dom->is_Start() || dom->is_Root() || dom == sub)
383 return true;
384
385 // 'dom' dominates 'sub' if its control edge and control edges
386 // of all its inputs dominate or equal to sub's control edge.
387
388 // Currently 'sub' is either Allocate, Initialize or Start nodes.
389 // Or Region for the check in LoadNode::Ideal();
390 // 'sub' should have sub->in(0) != NULL.
391 assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() ||
392 sub->is_Region() || sub->is_Call(), "expecting only these nodes");
393
394 // Get control edge of 'sub'.
395 Node* orig_sub = sub;
396 sub = sub->find_exact_control(sub->in(0));
397 if (sub == NULL || sub->is_top())
398 return false; // Conservative answer for dead code
399
400 assert(sub->is_CFG(), "expecting control");
401
402 if (sub == dom)
403 return true;
404
405 if (sub->is_Start() || sub->is_Root())
406 return false;
407
408 {
409 // Check all control edges of 'dom'.
410
411 ResourceMark rm;
412 Arena* arena = Thread::current()->resource_area();
413 Node_List nlist(arena);
414 Unique_Node_List dom_list(arena);
415
416 dom_list.push(dom);
417 bool only_dominating_controls = false;
418
419 for (uint next = 0; next < dom_list.size(); next++) {
420 Node* n = dom_list.at(next);
421 if (n == orig_sub)
422 return false; // One of dom's inputs dominated by sub.
423 if (!n->is_CFG() && n->pinned()) {
424 // Check only own control edge for pinned non-control nodes.
425 n = n->find_exact_control(n->in(0));
426 if (n == NULL || n->is_top())
427 return false; // Conservative answer for dead code
428 assert(n->is_CFG(), "expecting control");
429 dom_list.push(n);
430 } else if (n->is_Con() || n->is_Start() || n->is_Root()) {
431 only_dominating_controls = true;
432 } else if (n->is_CFG()) {
433 if (n->dominates(sub, nlist))
434 only_dominating_controls = true;
435 else
436 return false;
437 } else {
438 // First, own control edge.
439 Node* m = n->find_exact_control(n->in(0));
440 if (m != NULL) {
441 if (m->is_top())
442 return false; // Conservative answer for dead code
443 dom_list.push(m);
444 }
445 // Now, the rest of edges.
446 uint cnt = n->req();
447 for (uint i = 1; i < cnt; i++) {
448 m = n->find_exact_control(n->in(i));
449 if (m == NULL || m->is_top())
450 continue;
451 dom_list.push(m);
452 }
453 }
454 }
455 return only_dominating_controls;
456 }
457 }
458
459 //---------------------detect_ptr_independence---------------------------------
460 // Used by MemNode::find_previous_store to prove that two base
461 // pointers are never equal.
462 // The pointers are accompanied by their associated allocations,
463 // if any, which have been previously discovered by the caller.
464 bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1,
465 Node* p2, AllocateNode* a2,
466 PhaseTransform* phase) {
467 // Attempt to prove that these two pointers cannot be aliased.
468 // They may both manifestly be allocations, and they should differ.
469 // Or, if they are not both allocations, they can be distinct constants.
470 // Otherwise, one is an allocation and the other a pre-existing value.
471 if (a1 == NULL && a2 == NULL) { // neither an allocation
472 return (p1 != p2) && p1->is_Con() && p2->is_Con();
473 } else if (a1 != NULL && a2 != NULL) { // both allocations
474 return (a1 != a2);
475 } else if (a1 != NULL) { // one allocation a1
476 // (Note: p2->is_Con implies p2->in(0)->is_Root, which dominates.)
477 return all_controls_dominate(p2, a1);
478 } else { //(a2 != NULL) // one allocation a2
479 return all_controls_dominate(p1, a2);
480 }
481 return false;
482 }
483
484
485 // Find an arraycopy that must have set (can_see_stored_value=true) or
486 // could have set (can_see_stored_value=false) the value for this load
487 Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const {
488 if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Op_MemBarStoreStore ||
489 mem->in(0)->Opcode() == Op_MemBarCPUOrder)) {
490 Node* mb = mem->in(0);
491 if (mb->in(0) != NULL && mb->in(0)->is_Proj() &&
492 mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) {
493 ArrayCopyNode* ac = mb->in(0)->in(0)->as_ArrayCopy();
494 if (ac->is_clonebasic()) {
495 intptr_t offset;
496 AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase, offset);
497 assert(alloc != NULL && alloc->initialization()->is_complete_with_arraycopy(), "broken allocation");
498 if (alloc == ld_alloc) {
499 return ac;
500 }
501 }
502 }
503 } else if (mem->is_Proj() && mem->in(0) != NULL && mem->in(0)->is_ArrayCopy()) {
504 ArrayCopyNode* ac = mem->in(0)->as_ArrayCopy();
505
506 if (ac->is_arraycopy_validated() ||
507 ac->is_copyof_validated() ||
508 ac->is_copyofrange_validated()) {
509 Node* ld_addp = in(MemNode::Address);
510 if (ld_addp->is_AddP()) {
511 Node* ld_base = ld_addp->in(AddPNode::Address);
512 Node* ld_offs = ld_addp->in(AddPNode::Offset);
513
514 Node* dest = ac->in(ArrayCopyNode::Dest);
515
516 if (dest == ld_base) {
517 const TypeX *ld_offs_t = phase->type(ld_offs)->isa_intptr_t();
518 if (ac->modifies(ld_offs_t->_lo, ld_offs_t->_hi, phase, can_see_stored_value)) {
519 return ac;
520 }
521 if (!can_see_stored_value) {
522 mem = ac->in(TypeFunc::Memory);
523 }
524 }
525 }
526 }
527 }
528 return NULL;
529 }
530
531 // The logic for reordering loads and stores uses four steps:
532 // (a) Walk carefully past stores and initializations which we
533 // can prove are independent of this load.
534 // (b) Observe that the next memory state makes an exact match
535 // with self (load or store), and locate the relevant store.
536 // (c) Ensure that, if we were to wire self directly to the store,
537 // the optimizer would fold it up somehow.
538 // (d) Do the rewiring, and return, depending on some other part of
539 // the optimizer to fold up the load.
540 // This routine handles steps (a) and (b). Steps (c) and (d) are
541 // specific to loads and stores, so they are handled by the callers.
542 // (Currently, only LoadNode::Ideal has steps (c), (d). More later.)
543 //
544 Node* MemNode::find_previous_store(PhaseTransform* phase) {
545 Node* ctrl = in(MemNode::Control);
546 Node* adr = in(MemNode::Address);
547 intptr_t offset = 0;
548 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
549 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
550
551 if (offset == Type::OffsetBot)
552 return NULL; // cannot unalias unless there are precise offsets
553
554 const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr();
555
556 intptr_t size_in_bytes = memory_size();
557
558 Node* mem = in(MemNode::Memory); // start searching here...
559
560 int cnt = 50; // Cycle limiter
561 for (;;) { // While we can dance past unrelated stores...
562 if (--cnt < 0) break; // Caught in cycle or a complicated dance?
563
564 Node* prev = mem;
565 if (mem->is_Store()) {
566 Node* st_adr = mem->in(MemNode::Address);
567 intptr_t st_offset = 0;
568 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
569 if (st_base == NULL)
570 break; // inscrutable pointer
571 if (st_offset != offset && st_offset != Type::OffsetBot) {
572 const int MAX_STORE = BytesPerLong;
573 if (st_offset >= offset + size_in_bytes ||
574 st_offset <= offset - MAX_STORE ||
575 st_offset <= offset - mem->as_Store()->memory_size()) {
576 // Success: The offsets are provably independent.
577 // (You may ask, why not just test st_offset != offset and be done?
578 // The answer is that stores of different sizes can co-exist
579 // in the same sequence of RawMem effects. We sometimes initialize
580 // a whole 'tile' of array elements with a single jint or jlong.)
581 mem = mem->in(MemNode::Memory);
582 continue; // (a) advance through independent store memory
583 }
584 }
585 if (st_base != base &&
586 detect_ptr_independence(base, alloc,
587 st_base,
588 AllocateNode::Ideal_allocation(st_base, phase),
589 phase)) {
590 // Success: The bases are provably independent.
591 mem = mem->in(MemNode::Memory);
592 continue; // (a) advance through independent store memory
593 }
594
595 // (b) At this point, if the bases or offsets do not agree, we lose,
596 // since we have not managed to prove 'this' and 'mem' independent.
597 if (st_base == base && st_offset == offset) {
598 return mem; // let caller handle steps (c), (d)
599 }
600
601 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
602 InitializeNode* st_init = mem->in(0)->as_Initialize();
603 AllocateNode* st_alloc = st_init->allocation();
604 if (st_alloc == NULL)
605 break; // something degenerated
606 bool known_identical = false;
607 bool known_independent = false;
608 if (alloc == st_alloc)
609 known_identical = true;
610 else if (alloc != NULL)
611 known_independent = true;
612 else if (all_controls_dominate(this, st_alloc))
613 known_independent = true;
614
615 if (known_independent) {
616 // The bases are provably independent: Either they are
617 // manifestly distinct allocations, or else the control
618 // of this load dominates the store's allocation.
619 int alias_idx = phase->C->get_alias_index(adr_type());
620 if (alias_idx == Compile::AliasIdxRaw) {
621 mem = st_alloc->in(TypeFunc::Memory);
622 } else {
623 mem = st_init->memory(alias_idx);
624 }
625 continue; // (a) advance through independent store memory
626 }
627
628 // (b) at this point, if we are not looking at a store initializing
629 // the same allocation we are loading from, we lose.
630 if (known_identical) {
631 // From caller, can_see_stored_value will consult find_captured_store.
632 return mem; // let caller handle steps (c), (d)
633 }
634
635 } else if (find_previous_arraycopy(phase, alloc, mem, false) != NULL) {
636 if (prev != mem) {
637 // Found an arraycopy but it doesn't affect that load
638 continue;
639 }
640 // Found an arraycopy that may affect that load
641 return mem;
642 } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
643 // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
644 if (mem->is_Proj() && mem->in(0)->is_Call()) {
645 // ArrayCopyNodes processed here as well.
646 CallNode *call = mem->in(0)->as_Call();
647 if (!call->may_modify(addr_t, phase)) {
648 mem = call->in(TypeFunc::Memory);
649 continue; // (a) advance through independent call memory
650 }
651 } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
652 if (ArrayCopyNode::may_modify(addr_t, mem->in(0)->as_MemBar(), phase)) {
653 break;
654 }
655 mem = mem->in(0)->in(TypeFunc::Memory);
656 continue; // (a) advance through independent MemBar memory
657 } else if (mem->is_ClearArray()) {
658 if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
659 // (the call updated 'mem' value)
660 continue; // (a) advance through independent allocation memory
661 } else {
662 // Can not bypass initialization of the instance
663 // we are looking for.
664 return mem;
665 }
666 } else if (mem->is_MergeMem()) {
667 int alias_idx = phase->C->get_alias_index(adr_type());
668 mem = mem->as_MergeMem()->memory_at(alias_idx);
669 continue; // (a) advance through independent MergeMem memory
670 }
671 }
672
673 // Unless there is an explicit 'continue', we must bail out here,
674 // because 'mem' is an inscrutable memory state (e.g., a call).
675 break;
676 }
677
678 return NULL; // bail out
679 }
680
681 //----------------------calculate_adr_type-------------------------------------
682 // Helper function. Notices when the given type of address hits top or bottom.
683 // Also, asserts a cross-check of the type against the expected address type.
684 const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) {
685 if (t == Type::TOP) return NULL; // does not touch memory any more?
686 #ifdef PRODUCT
687 cross_check = NULL;
688 #else
689 if (!VerifyAliases || is_error_reported() || Node::in_dump()) cross_check = NULL;
690 #endif
691 const TypePtr* tp = t->isa_ptr();
692 if (tp == NULL) {
693 assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide");
694 return TypePtr::BOTTOM; // touches lots of memory
695 } else {
696 #ifdef ASSERT
697 // %%%% [phh] We don't check the alias index if cross_check is
698 // TypeRawPtr::BOTTOM. Needs to be investigated.
699 if (cross_check != NULL &&
700 cross_check != TypePtr::BOTTOM &&
701 cross_check != TypeRawPtr::BOTTOM) {
702 // Recheck the alias index, to see if it has changed (due to a bug).
703 Compile* C = Compile::current();
704 assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
705 "must stay in the original alias category");
706 // The type of the address must be contained in the adr_type,
707 // disregarding "null"-ness.
708 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
709 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
710 assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
711 "real address must not escape from expected memory type");
712 }
713 #endif
714 return tp;
715 }
716 }
717
718 //=============================================================================
719 // Should LoadNode::Ideal() attempt to remove control edges?
720 bool LoadNode::can_remove_control() const {
721 return true;
722 }
723 uint LoadNode::size_of() const { return sizeof(*this); }
724 uint LoadNode::cmp( const Node &n ) const
725 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
726 const Type *LoadNode::bottom_type() const { return _type; }
727 uint LoadNode::ideal_reg() const {
728 return _type->ideal_reg();
729 }
730
731 #ifndef PRODUCT
732 void LoadNode::dump_spec(outputStream *st) const {
733 MemNode::dump_spec(st);
734 if( !Verbose && !WizardMode ) {
735 // standard dump does this in Verbose and WizardMode
736 st->print(" #"); _type->dump_on(st);
737 }
738 if (!_depends_only_on_test) {
739 st->print(" (does not depend only on test)");
740 }
741 }
742 #endif
743
744 #ifdef ASSERT
745 //----------------------------is_immutable_value-------------------------------
746 // Helper function to allow a raw load without control edge for some cases
747 bool LoadNode::is_immutable_value(Node* adr) {
748 return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() &&
749 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
750 (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) ==
751 in_bytes(JavaThread::osthread_offset())));
752 }
753 #endif
754
755 //----------------------------LoadNode::make-----------------------------------
756 // Polymorphic factory method:
757 Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo, ControlDependency control_dependency) {
758 Compile* C = gvn.C;
759
760 // sanity check the alias category against the created node type
761 assert(!(adr_type->isa_oopptr() &&
762 adr_type->offset() == oopDesc::klass_offset_in_bytes()),
763 "use LoadKlassNode instead");
764 assert(!(adr_type->isa_aryptr() &&
765 adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
766 "use LoadRangeNode instead");
767 // Check control edge of raw loads
768 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
769 // oop will be recorded in oop map if load crosses safepoint
770 rt->isa_oopptr() || is_immutable_value(adr),
771 "raw memory operations should have control edge");
772 switch (bt) {
773 case T_BOOLEAN: return new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
774 case T_BYTE: return new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
775 case T_INT: return new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
776 case T_CHAR: return new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
777 case T_SHORT: return new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency);
778 case T_LONG: return new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency);
779 case T_FLOAT: return new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency);
780 case T_DOUBLE: return new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency);
781 case T_ADDRESS: return new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
782 case T_OBJECT:
783 #ifdef _LP64
784 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
785 Node* load = gvn.transform(new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency));
786 return new DecodeNNode(load, load->bottom_type()->make_ptr());
787 } else
788 #endif
789 {
790 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
791 return new LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo, control_dependency);
792 }
793 }
794 ShouldNotReachHere();
795 return (LoadNode*)NULL;
796 }
797
798 LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
799 bool require_atomic = true;
800 return new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
801 }
802
803 LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
804 bool require_atomic = true;
805 return new LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
806 }
807
808
809
810 //------------------------------hash-------------------------------------------
811 uint LoadNode::hash() const {
812 // unroll addition of interesting fields
813 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
814 }
815
816 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
817 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
818 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
819 bool is_stable_ary = FoldStableValues &&
820 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
821 tp->isa_aryptr()->is_stable();
822
823 return (eliminate_boxing && non_volatile) || is_stable_ary;
824 }
825
826 return false;
827 }
828
829 // Is the value loaded previously stored by an arraycopy? If so return
830 // a load node that reads from the source array so we may be able to
831 // optimize out the ArrayCopy node later.
832 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseTransform* phase) const {
833 Node* ld_adr = in(MemNode::Address);
834 intptr_t ld_off = 0;
835 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
836 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
837 if (ac != NULL) {
838 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
839
840 Node* ld = clone();
841 if (ac->as_ArrayCopy()->is_clonebasic()) {
842 assert(ld_alloc != NULL, "need an alloc");
843 Node* addp = in(MemNode::Address)->clone();
844 assert(addp->is_AddP(), "address must be addp");
845 assert(addp->in(AddPNode::Base) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Base), "strange pattern");
846 assert(addp->in(AddPNode::Address) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Address), "strange pattern");
847 addp->set_req(AddPNode::Base, ac->in(ArrayCopyNode::Src)->in(AddPNode::Base));
848 addp->set_req(AddPNode::Address, ac->in(ArrayCopyNode::Src)->in(AddPNode::Address));
849 ld->set_req(MemNode::Address, phase->transform(addp));
850 if (in(0) != NULL) {
851 assert(ld_alloc->in(0) != NULL, "alloc must have control");
852 ld->set_req(0, ld_alloc->in(0));
853 }
854 } else {
855 Node* addp = in(MemNode::Address)->clone();
856 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
857 addp->set_req(AddPNode::Base, ac->in(ArrayCopyNode::Src));
858 addp->set_req(AddPNode::Address, ac->in(ArrayCopyNode::Src));
859
860 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
861 BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
862 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
863 uint shift = exact_log2(type2aelembytes(ary_elem));
864
865 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
866 #ifdef _LP64
867 diff = phase->transform(new ConvI2LNode(diff));
868 #endif
869 diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
870
871 Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
872 addp->set_req(AddPNode::Offset, offset);
873 ld->set_req(MemNode::Address, phase->transform(addp));
874
875 if (in(0) != NULL) {
876 assert(ac->in(0) != NULL, "alloc must have control");
877 ld->set_req(0, ac->in(0));
878 }
879 }
880 // load depends on the tests that validate the arraycopy
881 ld->as_Load()->_depends_only_on_test = Pinned;
882 return ld;
883 }
884 return NULL;
885 }
886
887
888 //---------------------------can_see_stored_value------------------------------
889 // This routine exists to make sure this set of tests is done the same
890 // everywhere. We need to make a coordinated change: first LoadNode::Ideal
891 // will change the graph shape in a way which makes memory alive twice at the
892 // same time (uses the Oracle model of aliasing), then some
893 // LoadXNode::Identity will fold things back to the equivalence-class model
894 // of aliasing.
895 Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
896 Node* ld_adr = in(MemNode::Address);
897 intptr_t ld_off = 0;
898 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
899 const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
900 Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
901 // This is more general than load from boxing objects.
902 if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
903 uint alias_idx = atp->index();
904 bool final = !atp->is_rewritable();
905 Node* result = NULL;
906 Node* current = st;
907 // Skip through chains of MemBarNodes checking the MergeMems for
908 // new states for the slice of this load. Stop once any other
909 // kind of node is encountered. Loads from final memory can skip
910 // through any kind of MemBar but normal loads shouldn't skip
911 // through MemBarAcquire since the could allow them to move out of
912 // a synchronized region.
913 while (current->is_Proj()) {
914 int opc = current->in(0)->Opcode();
915 if ((final && (opc == Op_MemBarAcquire ||
916 opc == Op_MemBarAcquireLock ||
917 opc == Op_LoadFence)) ||
918 opc == Op_MemBarRelease ||
919 opc == Op_StoreFence ||
920 opc == Op_MemBarReleaseLock ||
921 opc == Op_MemBarStoreStore ||
922 opc == Op_MemBarCPUOrder) {
923 Node* mem = current->in(0)->in(TypeFunc::Memory);
924 if (mem->is_MergeMem()) {
925 MergeMemNode* merge = mem->as_MergeMem();
926 Node* new_st = merge->memory_at(alias_idx);
927 if (new_st == merge->base_memory()) {
928 // Keep searching
929 current = new_st;
930 continue;
931 }
932 // Save the new memory state for the slice and fall through
933 // to exit.
934 result = new_st;
935 }
936 }
937 break;
938 }
939 if (result != NULL) {
940 st = result;
941 }
942 }
943
944 // Loop around twice in the case Load -> Initialize -> Store.
945 // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
946 for (int trip = 0; trip <= 1; trip++) {
947
948 if (st->is_Store()) {
949 Node* st_adr = st->in(MemNode::Address);
950 if (!phase->eqv(st_adr, ld_adr)) {
951 // Try harder before giving up... Match raw and non-raw pointers.
952 intptr_t st_off = 0;
953 AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off);
954 if (alloc == NULL) return NULL;
955 if (alloc != ld_alloc) return NULL;
956 if (ld_off != st_off) return NULL;
957 // At this point we have proven something like this setup:
958 // A = Allocate(...)
959 // L = LoadQ(, AddP(CastPP(, A.Parm),, #Off))
960 // S = StoreQ(, AddP(, A.Parm , #Off), V)
961 // (Actually, we haven't yet proven the Q's are the same.)
962 // In other words, we are loading from a casted version of
963 // the same pointer-and-offset that we stored to.
964 // Thus, we are able to replace L by V.
965 }
966 // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
967 if (store_Opcode() != st->Opcode())
968 return NULL;
969 return st->in(MemNode::ValueIn);
970 }
971
972 // A load from a freshly-created object always returns zero.
973 // (This can happen after LoadNode::Ideal resets the load's memory input
974 // to find_captured_store, which returned InitializeNode::zero_memory.)
975 if (st->is_Proj() && st->in(0)->is_Allocate() &&
976 (st->in(0) == ld_alloc) &&
977 (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
978 // return a zero value for the load's basic type
979 // (This is one of the few places where a generic PhaseTransform
980 // can create new nodes. Think of it as lazily manifesting
981 // virtually pre-existing constants.)
982 return phase->zerocon(memory_type());
983 }
984
985 // A load from an initialization barrier can match a captured store.
986 if (st->is_Proj() && st->in(0)->is_Initialize()) {
987 InitializeNode* init = st->in(0)->as_Initialize();
988 AllocateNode* alloc = init->allocation();
989 if ((alloc != NULL) && (alloc == ld_alloc)) {
990 // examine a captured store value
991 st = init->find_captured_store(ld_off, memory_size(), phase);
992 if (st != NULL) {
993 continue; // take one more trip around
994 }
995 }
996 }
997
998 // Load boxed value from result of valueOf() call is input parameter.
999 if (this->is_Load() && ld_adr->is_AddP() &&
1000 (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1001 intptr_t ignore = 0;
1002 Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
1003 if (base != NULL && base->is_Proj() &&
1004 base->as_Proj()->_con == TypeFunc::Parms &&
1005 base->in(0)->is_CallStaticJava() &&
1006 base->in(0)->as_CallStaticJava()->is_boxing_method()) {
1007 return base->in(0)->in(TypeFunc::Parms);
1008 }
1009 }
1010
1011 break;
1012 }
1013
1014 return NULL;
1015 }
1016
1017 //----------------------is_instance_field_load_with_local_phi------------------
1018 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1019 if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1020 in(Address)->is_AddP() ) {
1021 const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1022 // Only instances and boxed values.
1023 if( t_oop != NULL &&
1024 (t_oop->is_ptr_to_boxed_value() ||
1025 t_oop->is_known_instance_field()) &&
1026 t_oop->offset() != Type::OffsetBot &&
1027 t_oop->offset() != Type::OffsetTop) {
1028 return true;
1029 }
1030 }
1031 return false;
1032 }
1033
1034 //------------------------------Identity---------------------------------------
1035 // Loads are identity if previous store is to same address
1036 Node *LoadNode::Identity( PhaseTransform *phase ) {
1037 // If the previous store-maker is the right kind of Store, and the store is
1038 // to the same address, then we are equal to the value stored.
1039 Node* mem = in(Memory);
1040 Node* value = can_see_stored_value(mem, phase);
1041 if( value ) {
1042 // byte, short & char stores truncate naturally.
1043 // A load has to load the truncated value which requires
1044 // some sort of masking operation and that requires an
1045 // Ideal call instead of an Identity call.
1046 if (memory_size() < BytesPerInt) {
1047 // If the input to the store does not fit with the load's result type,
1048 // it must be truncated via an Ideal call.
1049 if (!phase->type(value)->higher_equal(phase->type(this)))
1050 return this;
1051 }
1052 // (This works even when value is a Con, but LoadNode::Value
1053 // usually runs first, producing the singleton type of the Con.)
1054 return value;
1055 }
1056
1057 // Search for an existing data phi which was generated before for the same
1058 // instance's field to avoid infinite generation of phis in a loop.
1059 Node *region = mem->in(0);
1060 if (is_instance_field_load_with_local_phi(region)) {
1061 const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
1062 int this_index = phase->C->get_alias_index(addr_t);
1063 int this_offset = addr_t->offset();
1064 int this_iid = addr_t->instance_id();
1065 if (!addr_t->is_known_instance() &&
1066 addr_t->is_ptr_to_boxed_value()) {
1067 // Use _idx of address base (could be Phi node) for boxed values.
1068 intptr_t ignore = 0;
1069 Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1070 this_iid = base->_idx;
1071 }
1072 const Type* this_type = bottom_type();
1073 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1074 Node* phi = region->fast_out(i);
1075 if (phi->is_Phi() && phi != mem &&
1076 phi->as_Phi()->is_same_inst_field(this_type, this_iid, this_index, this_offset)) {
1077 return phi;
1078 }
1079 }
1080 }
1081
1082 return this;
1083 }
1084
1085 // We're loading from an object which has autobox behaviour.
1086 // If this object is result of a valueOf call we'll have a phi
1087 // merging a newly allocated object and a load from the cache.
1088 // We want to replace this load with the original incoming
1089 // argument to the valueOf call.
1090 Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
1091 assert(phase->C->eliminate_boxing(), "sanity");
1092 intptr_t ignore = 0;
1093 Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
1094 if ((base == NULL) || base->is_Phi()) {
1095 // Push the loads from the phi that comes from valueOf up
1096 // through it to allow elimination of the loads and the recovery
1097 // of the original value. It is done in split_through_phi().
1098 return NULL;
1099 } else if (base->is_Load() ||
1100 base->is_DecodeN() && base->in(1)->is_Load()) {
1101 // Eliminate the load of boxed value for integer types from the cache
1102 // array by deriving the value from the index into the array.
1103 // Capture the offset of the load and then reverse the computation.
1104
1105 // Get LoadN node which loads a boxing object from 'cache' array.
1106 if (base->is_DecodeN()) {
1107 base = base->in(1);
1108 }
1109 if (!base->in(Address)->is_AddP()) {
1110 return NULL; // Complex address
1111 }
1112 AddPNode* address = base->in(Address)->as_AddP();
1113 Node* cache_base = address->in(AddPNode::Base);
1114 if ((cache_base != NULL) && cache_base->is_DecodeN()) {
1115 // Get ConP node which is static 'cache' field.
1116 cache_base = cache_base->in(1);
1117 }
1118 if ((cache_base != NULL) && cache_base->is_Con()) {
1119 const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr();
1120 if ((base_type != NULL) && base_type->is_autobox_cache()) {
1121 Node* elements[4];
1122 int shift = exact_log2(type2aelembytes(T_OBJECT));
1123 int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
1124 if ((count > 0) && elements[0]->is_Con() &&
1125 ((count == 1) ||
1126 (count == 2) && elements[1]->Opcode() == Op_LShiftX &&
1127 elements[1]->in(2) == phase->intcon(shift))) {
1128 ciObjArray* array = base_type->const_oop()->as_obj_array();
1129 // Fetch the box object cache[0] at the base of the array and get its value
1130 ciInstance* box = array->obj_at(0)->as_instance();
1131 ciInstanceKlass* ik = box->klass()->as_instance_klass();
1132 assert(ik->is_box_klass(), "sanity");
1133 assert(ik->nof_nonstatic_fields() == 1, "change following code");
1134 if (ik->nof_nonstatic_fields() == 1) {
1135 // This should be true nonstatic_field_at requires calling
1136 // nof_nonstatic_fields so check it anyway
1137 ciConstant c = box->field_value(ik->nonstatic_field_at(0));
1138 BasicType bt = c.basic_type();
1139 // Only integer types have boxing cache.
1140 assert(bt == T_BOOLEAN || bt == T_CHAR ||
1141 bt == T_BYTE || bt == T_SHORT ||
1142 bt == T_INT || bt == T_LONG, "wrong type = %s", type2name(bt));
1143 jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
1144 if (cache_low != (int)cache_low) {
1145 return NULL; // should not happen since cache is array indexed by value
1146 }
1147 jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
1148 if (offset != (int)offset) {
1149 return NULL; // should not happen since cache is array indexed by value
1150 }
1151 // Add up all the offsets making of the address of the load
1152 Node* result = elements[0];
1153 for (int i = 1; i < count; i++) {
1154 result = phase->transform(new AddXNode(result, elements[i]));
1155 }
1156 // Remove the constant offset from the address and then
1157 result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset)));
1158 // remove the scaling of the offset to recover the original index.
1159 if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
1160 // Peel the shift off directly but wrap it in a dummy node
1161 // since Ideal can't return existing nodes
1162 result = new RShiftXNode(result->in(1), phase->intcon(0));
1163 } else if (result->is_Add() && result->in(2)->is_Con() &&
1164 result->in(1)->Opcode() == Op_LShiftX &&
1165 result->in(1)->in(2) == phase->intcon(shift)) {
1166 // We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z)
1167 // but for boxing cache access we know that X<<Z will not overflow
1168 // (there is range check) so we do this optimizatrion by hand here.
1169 Node* add_con = new RShiftXNode(result->in(2), phase->intcon(shift));
1170 result = new AddXNode(result->in(1)->in(1), phase->transform(add_con));
1171 } else {
1172 result = new RShiftXNode(result, phase->intcon(shift));
1173 }
1174 #ifdef _LP64
1175 if (bt != T_LONG) {
1176 result = new ConvL2INode(phase->transform(result));
1177 }
1178 #else
1179 if (bt == T_LONG) {
1180 result = new ConvI2LNode(phase->transform(result));
1181 }
1182 #endif
1183 // Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair).
1184 // Need to preserve unboxing load type if it is unsigned.
1185 switch(this->Opcode()) {
1186 case Op_LoadUB:
1187 result = new AndINode(phase->transform(result), phase->intcon(0xFF));
1188 break;
1189 case Op_LoadUS:
1190 result = new AndINode(phase->transform(result), phase->intcon(0xFFFF));
1191 break;
1192 }
1193 return result;
1194 }
1195 }
1196 }
1197 }
1198 }
1199 return NULL;
1200 }
1201
1202 static bool stable_phi(PhiNode* phi, PhaseGVN *phase) {
1203 Node* region = phi->in(0);
1204 if (region == NULL) {
1205 return false; // Wait stable graph
1206 }
1207 uint cnt = phi->req();
1208 for (uint i = 1; i < cnt; i++) {
1209 Node* rc = region->in(i);
1210 if (rc == NULL || phase->type(rc) == Type::TOP)
1211 return false; // Wait stable graph
1212 Node* in = phi->in(i);
1213 if (in == NULL || phase->type(in) == Type::TOP)
1214 return false; // Wait stable graph
1215 }
1216 return true;
1217 }
1218 //------------------------------split_through_phi------------------------------
1219 // Split instance or boxed field load through Phi.
1220 Node *LoadNode::split_through_phi(PhaseGVN *phase) {
1221 Node* mem = in(Memory);
1222 Node* address = in(Address);
1223 const TypeOopPtr *t_oop = phase->type(address)->isa_oopptr();
1224
1225 assert((t_oop != NULL) &&
1226 (t_oop->is_known_instance_field() ||
1227 t_oop->is_ptr_to_boxed_value()), "invalide conditions");
1228
1229 Compile* C = phase->C;
1230 intptr_t ignore = 0;
1231 Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1232 bool base_is_phi = (base != NULL) && base->is_Phi();
1233 bool load_boxed_values = t_oop->is_ptr_to_boxed_value() && C->aggressive_unboxing() &&
1234 (base != NULL) && (base == address->in(AddPNode::Base)) &&
1235 phase->type(base)->higher_equal(TypePtr::NOTNULL);
1236
1237 if (!((mem->is_Phi() || base_is_phi) &&
1238 (load_boxed_values || t_oop->is_known_instance_field()))) {
1239 return NULL; // memory is not Phi
1240 }
1241
1242 if (mem->is_Phi()) {
1243 if (!stable_phi(mem->as_Phi(), phase)) {
1244 return NULL; // Wait stable graph
1245 }
1246 uint cnt = mem->req();
1247 // Check for loop invariant memory.
1248 if (cnt == 3) {
1249 for (uint i = 1; i < cnt; i++) {
1250 Node* in = mem->in(i);
1251 Node* m = optimize_memory_chain(in, t_oop, this, phase);
1252 if (m == mem) {
1253 set_req(Memory, mem->in(cnt - i));
1254 return this; // made change
1255 }
1256 }
1257 }
1258 }
1259 if (base_is_phi) {
1260 if (!stable_phi(base->as_Phi(), phase)) {
1261 return NULL; // Wait stable graph
1262 }
1263 uint cnt = base->req();
1264 // Check for loop invariant memory.
1265 if (cnt == 3) {
1266 for (uint i = 1; i < cnt; i++) {
1267 if (base->in(i) == base) {
1268 return NULL; // Wait stable graph
1269 }
1270 }
1271 }
1272 }
1273
1274 bool load_boxed_phi = load_boxed_values && base_is_phi && (base->in(0) == mem->in(0));
1275
1276 // Split through Phi (see original code in loopopts.cpp).
1277 assert(C->have_alias_type(t_oop), "instance should have alias type");
1278
1279 // Do nothing here if Identity will find a value
1280 // (to avoid infinite chain of value phis generation).
1281 if (!phase->eqv(this, this->Identity(phase)))
1282 return NULL;
1283
1284 // Select Region to split through.
1285 Node* region;
1286 if (!base_is_phi) {
1287 assert(mem->is_Phi(), "sanity");
1288 region = mem->in(0);
1289 // Skip if the region dominates some control edge of the address.
1290 if (!MemNode::all_controls_dominate(address, region))
1291 return NULL;
1292 } else if (!mem->is_Phi()) {
1293 assert(base_is_phi, "sanity");
1294 region = base->in(0);
1295 // Skip if the region dominates some control edge of the memory.
1296 if (!MemNode::all_controls_dominate(mem, region))
1297 return NULL;
1298 } else if (base->in(0) != mem->in(0)) {
1299 assert(base_is_phi && mem->is_Phi(), "sanity");
1300 if (MemNode::all_controls_dominate(mem, base->in(0))) {
1301 region = base->in(0);
1302 } else if (MemNode::all_controls_dominate(address, mem->in(0))) {
1303 region = mem->in(0);
1304 } else {
1305 return NULL; // complex graph
1306 }
1307 } else {
1308 assert(base->in(0) == mem->in(0), "sanity");
1309 region = mem->in(0);
1310 }
1311
1312 const Type* this_type = this->bottom_type();
1313 int this_index = C->get_alias_index(t_oop);
1314 int this_offset = t_oop->offset();
1315 int this_iid = t_oop->instance_id();
1316 if (!t_oop->is_known_instance() && load_boxed_values) {
1317 // Use _idx of address base for boxed values.
1318 this_iid = base->_idx;
1319 }
1320 PhaseIterGVN* igvn = phase->is_IterGVN();
1321 Node* phi = new PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
1322 for (uint i = 1; i < region->req(); i++) {
1323 Node* x;
1324 Node* the_clone = NULL;
1325 if (region->in(i) == C->top()) {
1326 x = C->top(); // Dead path? Use a dead data op
1327 } else {
1328 x = this->clone(); // Else clone up the data op
1329 the_clone = x; // Remember for possible deletion.
1330 // Alter data node to use pre-phi inputs
1331 if (this->in(0) == region) {
1332 x->set_req(0, region->in(i));
1333 } else {
1334 x->set_req(0, NULL);
1335 }
1336 if (mem->is_Phi() && (mem->in(0) == region)) {
1337 x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone.
1338 }
1339 if (address->is_Phi() && address->in(0) == region) {
1340 x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone
1341 }
1342 if (base_is_phi && (base->in(0) == region)) {
1343 Node* base_x = base->in(i); // Clone address for loads from boxed objects.
1344 Node* adr_x = phase->transform(new AddPNode(base_x,base_x,address->in(AddPNode::Offset)));
1345 x->set_req(Address, adr_x);
1346 }
1347 }
1348 // Check for a 'win' on some paths
1349 const Type *t = x->Value(igvn);
1350
1351 bool singleton = t->singleton();
1352
1353 // See comments in PhaseIdealLoop::split_thru_phi().
1354 if (singleton && t == Type::TOP) {
1355 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
1356 }
1357
1358 if (singleton) {
1359 x = igvn->makecon(t);
1360 } else {
1361 // We now call Identity to try to simplify the cloned node.
1362 // Note that some Identity methods call phase->type(this).
1363 // Make sure that the type array is big enough for
1364 // our new node, even though we may throw the node away.
1365 // (This tweaking with igvn only works because x is a new node.)
1366 igvn->set_type(x, t);
1367 // If x is a TypeNode, capture any more-precise type permanently into Node
1368 // otherwise it will be not updated during igvn->transform since
1369 // igvn->type(x) is set to x->Value() already.
1370 x->raise_bottom_type(t);
1371 Node *y = x->Identity(igvn);
1372 if (y != x) {
1373 x = y;
1374 } else {
1375 y = igvn->hash_find_insert(x);
1376 if (y) {
1377 x = y;
1378 } else {
1379 // Else x is a new node we are keeping
1380 // We do not need register_new_node_with_optimizer
1381 // because set_type has already been called.
1382 igvn->_worklist.push(x);
1383 }
1384 }
1385 }
1386 if (x != the_clone && the_clone != NULL) {
1387 igvn->remove_dead_node(the_clone);
1388 }
1389 phi->set_req(i, x);
1390 }
1391 // Record Phi
1392 igvn->register_new_node_with_optimizer(phi);
1393 return phi;
1394 }
1395
1396 //------------------------------Ideal------------------------------------------
1397 // If the load is from Field memory and the pointer is non-null, it might be possible to
1398 // zero out the control input.
1399 // If the offset is constant and the base is an object allocation,
1400 // try to hook me up to the exact initializing store.
1401 Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1402 Node* p = MemNode::Ideal_common(phase, can_reshape);
1403 if (p) return (p == NodeSentinel) ? NULL : p;
1404
1405 Node* ctrl = in(MemNode::Control);
1406 Node* address = in(MemNode::Address);
1407 bool progress = false;
1408
1409 // Skip up past a SafePoint control. Cannot do this for Stores because
1410 // pointer stores & cardmarks must stay on the same side of a SafePoint.
1411 if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint &&
1412 phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) {
1413 ctrl = ctrl->in(0);
1414 set_req(MemNode::Control,ctrl);
1415 progress = true;
1416 }
1417
1418 intptr_t ignore = 0;
1419 Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1420 if (base != NULL
1421 && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
1422 // Check for useless control edge in some common special cases
1423 if (in(MemNode::Control) != NULL
1424 && can_remove_control()
1425 && phase->type(base)->higher_equal(TypePtr::NOTNULL)
1426 && all_controls_dominate(base, phase->C->start())) {
1427 // A method-invariant, non-null address (constant or 'this' argument).
1428 set_req(MemNode::Control, NULL);
1429 progress = true;
1430 }
1431 }
1432
1433 Node* mem = in(MemNode::Memory);
1434 const TypePtr *addr_t = phase->type(address)->isa_ptr();
1435
1436 if (can_reshape && (addr_t != NULL)) {
1437 // try to optimize our memory input
1438 Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
1439 if (opt_mem != mem) {
1440 set_req(MemNode::Memory, opt_mem);
1441 if (phase->type( opt_mem ) == Type::TOP) return NULL;
1442 return this;
1443 }
1444 const TypeOopPtr *t_oop = addr_t->isa_oopptr();
1445 if ((t_oop != NULL) &&
1446 (t_oop->is_known_instance_field() ||
1447 t_oop->is_ptr_to_boxed_value())) {
1448 PhaseIterGVN *igvn = phase->is_IterGVN();
1449 if (igvn != NULL && igvn->_worklist.member(opt_mem)) {
1450 // Delay this transformation until memory Phi is processed.
1451 phase->is_IterGVN()->_worklist.push(this);
1452 return NULL;
1453 }
1454 // Split instance field load through Phi.
1455 Node* result = split_through_phi(phase);
1456 if (result != NULL) return result;
1457
1458 if (t_oop->is_ptr_to_boxed_value()) {
1459 Node* result = eliminate_autobox(phase);
1460 if (result != NULL) return result;
1461 }
1462 }
1463 }
1464
1465 // Is there a dominating load that loads the same value? Leave
1466 // anything that is not a load of a field/array element (like
1467 // barriers etc.) alone
1468 if (in(0) != NULL && adr_type() != TypeRawPtr::BOTTOM && can_reshape) {
1469 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1470 Node *use = mem->fast_out(i);
1471 if (use != this &&
1472 use->Opcode() == Opcode() &&
1473 use->in(0) != NULL &&
1474 use->in(0) != in(0) &&
1475 use->in(Address) == in(Address)) {
1476 Node* ctl = in(0);
1477 for (int i = 0; i < 10 && ctl != NULL; i++) {
1478 ctl = IfNode::up_one_dom(ctl);
1479 if (ctl == use->in(0)) {
1480 set_req(0, use->in(0));
1481 return this;
1482 }
1483 }
1484 }
1485 }
1486 }
1487
1488 // Check for prior store with a different base or offset; make Load
1489 // independent. Skip through any number of them. Bail out if the stores
1490 // are in an endless dead cycle and report no progress. This is a key
1491 // transform for Reflection. However, if after skipping through the Stores
1492 // we can't then fold up against a prior store do NOT do the transform as
1493 // this amounts to using the 'Oracle' model of aliasing. It leaves the same
1494 // array memory alive twice: once for the hoisted Load and again after the
1495 // bypassed Store. This situation only works if EVERYBODY who does
1496 // anti-dependence work knows how to bypass. I.e. we need all
1497 // anti-dependence checks to ask the same Oracle. Right now, that Oracle is
1498 // the alias index stuff. So instead, peek through Stores and IFF we can
1499 // fold up, do so.
1500 Node* prev_mem = find_previous_store(phase);
1501 if (prev_mem != NULL) {
1502 Node* value = can_see_arraycopy_value(prev_mem, phase);
1503 if (value != NULL) {
1504 return value;
1505 }
1506 }
1507 // Steps (a), (b): Walk past independent stores to find an exact match.
1508 if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) {
1509 // (c) See if we can fold up on the spot, but don't fold up here.
1510 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1511 // just return a prior value, which is done by Identity calls.
1512 if (can_see_stored_value(prev_mem, phase)) {
1513 // Make ready for step (d):
1514 set_req(MemNode::Memory, prev_mem);
1515 return this;
1516 }
1517 }
1518
1519 return progress ? this : NULL;
1520 }
1521
1522 // Helper to recognize certain Klass fields which are invariant across
1523 // some group of array types (e.g., int[] or all T[] where T < Object).
1524 const Type*
1525 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1526 ciKlass* klass) const {
1527 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1528 // The field is Klass::_modifier_flags. Return its (constant) value.
1529 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1530 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1531 return TypeInt::make(klass->modifier_flags());
1532 }
1533 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1534 // The field is Klass::_access_flags. Return its (constant) value.
1535 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1536 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1537 return TypeInt::make(klass->access_flags());
1538 }
1539 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1540 // The field is Klass::_layout_helper. Return its constant value if known.
1541 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1542 return TypeInt::make(klass->layout_helper());
1543 }
1544
1545 // No match.
1546 return NULL;
1547 }
1548
1549 // Try to constant-fold a stable array element.
1550 static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
1551 assert(ary->const_oop(), "array should be constant");
1552 assert(ary->is_stable(), "array should be stable");
1553
1554 // Decode the results of GraphKit::array_element_address.
1555 ciArray* aobj = ary->const_oop()->as_array();
1556 ciConstant con = aobj->element_value_by_offset(off);
1557
1558 if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
1559 const Type* con_type = Type::make_from_constant(con);
1560 if (con_type != NULL) {
1561 if (con_type->isa_aryptr()) {
1562 // Join with the array element type, in case it is also stable.
1563 int dim = ary->stable_dimension();
1564 con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
1565 }
1566 if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
1567 con_type = con_type->make_narrowoop();
1568 }
1569 #ifndef PRODUCT
1570 if (TraceIterativeGVN) {
1571 tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
1572 con_type->dump(); tty->cr();
1573 }
1574 #endif //PRODUCT
1575 return con_type;
1576 }
1577 }
1578 return NULL;
1579 }
1580
1581 //------------------------------Value-----------------------------------------
1582 const Type *LoadNode::Value( PhaseTransform *phase ) const {
1583 // Either input is TOP ==> the result is TOP
1584 Node* mem = in(MemNode::Memory);
1585 const Type *t1 = phase->type(mem);
1586 if (t1 == Type::TOP) return Type::TOP;
1587 Node* adr = in(MemNode::Address);
1588 const TypePtr* tp = phase->type(adr)->isa_ptr();
1589 if (tp == NULL || tp->empty()) return Type::TOP;
1590 int off = tp->offset();
1591 assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1592 Compile* C = phase->C;
1593
1594 // Try to guess loaded type from pointer type
1595 if (tp->isa_aryptr()) {
1596 const TypeAryPtr* ary = tp->is_aryptr();
1597 const Type* t = ary->elem();
1598
1599 // Determine whether the reference is beyond the header or not, by comparing
1600 // the offset against the offset of the start of the array's data.
1601 // Different array types begin at slightly different offsets (12 vs. 16).
1602 // We choose T_BYTE as an example base type that is least restrictive
1603 // as to alignment, which will therefore produce the smallest
1604 // possible base offset.
1605 const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1606 const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
1607
1608 // Try to constant-fold a stable array element.
1609 if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
1610 // Make sure the reference is not into the header and the offset is constant
1611 if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
1612 const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
1613 if (con_type != NULL) {
1614 return con_type;
1615 }
1616 }
1617 }
1618
1619 // Don't do this for integer types. There is only potential profit if
1620 // the element type t is lower than _type; that is, for int types, if _type is
1621 // more restrictive than t. This only happens here if one is short and the other
1622 // char (both 16 bits), and in those cases we've made an intentional decision
1623 // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1624 // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1625 //
1626 // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1627 // where the _gvn.type of the AddP is wider than 8. This occurs when an earlier
1628 // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1629 // subsumed by p1. If p1 is on the worklist but has not yet been re-transformed,
1630 // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1631 // In fact, that could have been the original type of p1, and p1 could have
1632 // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
1633 // expression (LShiftL quux 3) independently optimized to the constant 8.
1634 if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
1635 && (_type->isa_vect() == NULL)
1636 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1637 // t might actually be lower than _type, if _type is a unique
1638 // concrete subclass of abstract class t.
1639 if (off_beyond_header) { // is the offset beyond the header?
1640 const Type* jt = t->join_speculative(_type);
1641 // In any case, do not allow the join, per se, to empty out the type.
1642 if (jt->empty() && !t->empty()) {
1643 // This can happen if a interface-typed array narrows to a class type.
1644 jt = _type;
1645 }
1646 #ifdef ASSERT
1647 if (phase->C->eliminate_boxing() && adr->is_AddP()) {
1648 // The pointers in the autobox arrays are always non-null
1649 Node* base = adr->in(AddPNode::Base);
1650 if ((base != NULL) && base->is_DecodeN()) {
1651 // Get LoadN node which loads IntegerCache.cache field
1652 base = base->in(1);
1653 }
1654 if ((base != NULL) && base->is_Con()) {
1655 const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
1656 if ((base_type != NULL) && base_type->is_autobox_cache()) {
1657 // It could be narrow oop
1658 assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1659 }
1660 }
1661 }
1662 #endif
1663 return jt;
1664 }
1665 }
1666 } else if (tp->base() == Type::InstPtr) {
1667 ciEnv* env = C->env();
1668 const TypeInstPtr* tinst = tp->is_instptr();
1669 ciKlass* klass = tinst->klass();
1670 assert( off != Type::OffsetBot ||
1671 // arrays can be cast to Objects
1672 tp->is_oopptr()->klass()->is_java_lang_Object() ||
1673 // unsafe field access may not have a constant offset
1674 C->has_unsafe_access(),
1675 "Field accesses must be precise" );
1676 // For oop loads, we expect the _type to be precise
1677 if (klass == env->String_klass() &&
1678 adr->is_AddP() && off != Type::OffsetBot) {
1679 // For constant Strings treat the final fields as compile time constants.
1680 // While we can list what field types java.lang.String has, it is more
1681 // future-proof to handle all possible field types, anticipating future
1682 // changes and experiments in String code.
1683 Node* base = adr->in(AddPNode::Base);
1684 const TypeOopPtr* t = phase->type(base)->isa_oopptr();
1685 if (t != NULL && t->singleton()) {
1686 ciField* field = env->String_klass()->get_field_by_offset(off, false);
1687 if (field != NULL && field->is_final()) {
1688 ciObject* string = t->const_oop();
1689 ciConstant constant = string->as_instance()->field_value(field);
1690 // Type::make_from_constant does not handle narrow oops, so handle it here.
1691 // Everything else can use the factory method.
1692 if ((constant.basic_type() == T_ARRAY || constant.basic_type() == T_OBJECT)
1693 && adr->bottom_type()->is_ptr_to_narrowoop()) {
1694 return TypeNarrowOop::make_from_constant(constant.as_object(), true);
1695 } else {
1696 return Type::make_from_constant(constant, true);
1697 }
1698 }
1699 }
1700 }
1701 // Optimizations for constant objects
1702 ciObject* const_oop = tinst->const_oop();
1703 if (const_oop != NULL) {
1704 // For constant Boxed value treat the target field as a compile time constant.
1705 if (tinst->is_ptr_to_boxed_value()) {
1706 return tinst->get_const_boxed_value();
1707 } else
1708 // For constant CallSites treat the target field as a compile time constant.
1709 if (const_oop->is_call_site()) {
1710 ciCallSite* call_site = const_oop->as_call_site();
1711 ciField* field = call_site->klass()->as_instance_klass()->get_field_by_offset(off, /*is_static=*/ false);
1712 if (field != NULL && field->is_call_site_target()) {
1713 ciMethodHandle* target = call_site->get_target();
1714 if (target != NULL) { // just in case
1715 ciConstant constant(T_OBJECT, target);
1716 const Type* t;
1717 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
1718 t = TypeNarrowOop::make_from_constant(constant.as_object(), true);
1719 } else {
1720 t = TypeOopPtr::make_from_constant(constant.as_object(), true);
1721 }
1722 // Add a dependence for invalidation of the optimization.
1723 if (!call_site->is_constant_call_site()) {
1724 C->dependencies()->assert_call_site_target_value(call_site, target);
1725 }
1726 return t;
1727 }
1728 }
1729 }
1730 }
1731 } else if (tp->base() == Type::KlassPtr) {
1732 assert( off != Type::OffsetBot ||
1733 // arrays can be cast to Objects
1734 tp->is_klassptr()->klass()->is_java_lang_Object() ||
1735 // also allow array-loading from the primary supertype
1736 // array during subtype checks
1737 Opcode() == Op_LoadKlass,
1738 "Field accesses must be precise" );
1739 // For klass/static loads, we expect the _type to be precise
1740 }
1741
1742 const TypeKlassPtr *tkls = tp->isa_klassptr();
1743 if (tkls != NULL && !StressReflectiveCode) {
1744 ciKlass* klass = tkls->klass();
1745 if (klass->is_loaded() && tkls->klass_is_exact()) {
1746 // We are loading a field from a Klass metaobject whose identity
1747 // is known at compile time (the type is "exact" or "precise").
1748 // Check for fields we know are maintained as constants by the VM.
1749 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
1750 // The field is Klass::_super_check_offset. Return its (constant) value.
1751 // (Folds up type checking code.)
1752 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
1753 return TypeInt::make(klass->super_check_offset());
1754 }
1755 // Compute index into primary_supers array
1756 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
1757 // Check for overflowing; use unsigned compare to handle the negative case.
1758 if( depth < ciKlass::primary_super_limit() ) {
1759 // The field is an element of Klass::_primary_supers. Return its (constant) value.
1760 // (Folds up type checking code.)
1761 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
1762 ciKlass *ss = klass->super_of_depth(depth);
1763 return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
1764 }
1765 const Type* aift = load_array_final_field(tkls, klass);
1766 if (aift != NULL) return aift;
1767 if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
1768 // The field is Klass::_java_mirror. Return its (constant) value.
1769 // (Folds up the 2nd indirection in anObjConstant.getClass().)
1770 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
1771 return TypeInstPtr::make(klass->java_mirror());
1772 }
1773 }
1774
1775 // We can still check if we are loading from the primary_supers array at a
1776 // shallow enough depth. Even though the klass is not exact, entries less
1777 // than or equal to its super depth are correct.
1778 if (klass->is_loaded() ) {
1779 ciType *inner = klass;
1780 while( inner->is_obj_array_klass() )
1781 inner = inner->as_obj_array_klass()->base_element_type();
1782 if( inner->is_instance_klass() &&
1783 !inner->as_instance_klass()->flags().is_interface() ) {
1784 // Compute index into primary_supers array
1785 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
1786 // Check for overflowing; use unsigned compare to handle the negative case.
1787 if( depth < ciKlass::primary_super_limit() &&
1788 depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
1789 // The field is an element of Klass::_primary_supers. Return its (constant) value.
1790 // (Folds up type checking code.)
1791 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
1792 ciKlass *ss = klass->super_of_depth(depth);
1793 return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
1794 }
1795 }
1796 }
1797
1798 // If the type is enough to determine that the thing is not an array,
1799 // we can give the layout_helper a positive interval type.
1800 // This will help short-circuit some reflective code.
1801 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())
1802 && !klass->is_array_klass() // not directly typed as an array
1803 && !klass->is_interface() // specifically not Serializable & Cloneable
1804 && !klass->is_java_lang_Object() // not the supertype of all T[]
1805 ) {
1806 // Note: When interfaces are reliable, we can narrow the interface
1807 // test to (klass != Serializable && klass != Cloneable).
1808 assert(Opcode() == Op_LoadI, "must load an int from _layout_helper");
1809 jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false);
1810 // The key property of this type is that it folds up tests
1811 // for array-ness, since it proves that the layout_helper is positive.
1812 // Thus, a generic value like the basic object layout helper works fine.
1813 return TypeInt::make(min_size, max_jint, Type::WidenMin);
1814 }
1815 }
1816
1817 // If we are loading from a freshly-allocated object, produce a zero,
1818 // if the load is provably beyond the header of the object.
1819 // (Also allow a variable load from a fresh array to produce zero.)
1820 const TypeOopPtr *tinst = tp->isa_oopptr();
1821 bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
1822 bool is_boxed_value = (tinst != NULL) && tinst->is_ptr_to_boxed_value();
1823 if (ReduceFieldZeroing || is_instance || is_boxed_value) {
1824 Node* value = can_see_stored_value(mem,phase);
1825 if (value != NULL && value->is_Con()) {
1826 assert(value->bottom_type()->higher_equal(_type),"sanity");
1827 return value->bottom_type();
1828 }
1829 }
1830
1831 if (is_instance) {
1832 // If we have an instance type and our memory input is the
1833 // programs's initial memory state, there is no matching store,
1834 // so just return a zero of the appropriate type
1835 Node *mem = in(MemNode::Memory);
1836 if (mem->is_Parm() && mem->in(0)->is_Start()) {
1837 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
1838 return Type::get_zero_type(_type->basic_type());
1839 }
1840 }
1841 return _type;
1842 }
1843
1844 //------------------------------match_edge-------------------------------------
1845 // Do we Match on this edge index or not? Match only the address.
1846 uint LoadNode::match_edge(uint idx) const {
1847 return idx == MemNode::Address;
1848 }
1849
1850 //--------------------------LoadBNode::Ideal--------------------------------------
1851 //
1852 // If the previous store is to the same address as this load,
1853 // and the value stored was larger than a byte, replace this load
1854 // with the value stored truncated to a byte. If no truncation is
1855 // needed, the replacement is done in LoadNode::Identity().
1856 //
1857 Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1858 Node* mem = in(MemNode::Memory);
1859 Node* value = can_see_stored_value(mem,phase);
1860 if( value && !phase->type(value)->higher_equal( _type ) ) {
1861 Node *result = phase->transform( new LShiftINode(value, phase->intcon(24)) );
1862 return new RShiftINode(result, phase->intcon(24));
1863 }
1864 // Identity call will handle the case where truncation is not needed.
1865 return LoadNode::Ideal(phase, can_reshape);
1866 }
1867
1868 const Type* LoadBNode::Value(PhaseTransform *phase) const {
1869 Node* mem = in(MemNode::Memory);
1870 Node* value = can_see_stored_value(mem,phase);
1871 if (value != NULL && value->is_Con() &&
1872 !value->bottom_type()->higher_equal(_type)) {
1873 // If the input to the store does not fit with the load's result type,
1874 // it must be truncated. We can't delay until Ideal call since
1875 // a singleton Value is needed for split_thru_phi optimization.
1876 int con = value->get_int();
1877 return TypeInt::make((con << 24) >> 24);
1878 }
1879 return LoadNode::Value(phase);
1880 }
1881
1882 //--------------------------LoadUBNode::Ideal-------------------------------------
1883 //
1884 // If the previous store is to the same address as this load,
1885 // and the value stored was larger than a byte, replace this load
1886 // with the value stored truncated to a byte. If no truncation is
1887 // needed, the replacement is done in LoadNode::Identity().
1888 //
1889 Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1890 Node* mem = in(MemNode::Memory);
1891 Node* value = can_see_stored_value(mem, phase);
1892 if (value && !phase->type(value)->higher_equal(_type))
1893 return new AndINode(value, phase->intcon(0xFF));
1894 // Identity call will handle the case where truncation is not needed.
1895 return LoadNode::Ideal(phase, can_reshape);
1896 }
1897
1898 const Type* LoadUBNode::Value(PhaseTransform *phase) const {
1899 Node* mem = in(MemNode::Memory);
1900 Node* value = can_see_stored_value(mem,phase);
1901 if (value != NULL && value->is_Con() &&
1902 !value->bottom_type()->higher_equal(_type)) {
1903 // If the input to the store does not fit with the load's result type,
1904 // it must be truncated. We can't delay until Ideal call since
1905 // a singleton Value is needed for split_thru_phi optimization.
1906 int con = value->get_int();
1907 return TypeInt::make(con & 0xFF);
1908 }
1909 return LoadNode::Value(phase);
1910 }
1911
1912 //--------------------------LoadUSNode::Ideal-------------------------------------
1913 //
1914 // If the previous store is to the same address as this load,
1915 // and the value stored was larger than a char, replace this load
1916 // with the value stored truncated to a char. If no truncation is
1917 // needed, the replacement is done in LoadNode::Identity().
1918 //
1919 Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1920 Node* mem = in(MemNode::Memory);
1921 Node* value = can_see_stored_value(mem,phase);
1922 if( value && !phase->type(value)->higher_equal( _type ) )
1923 return new AndINode(value,phase->intcon(0xFFFF));
1924 // Identity call will handle the case where truncation is not needed.
1925 return LoadNode::Ideal(phase, can_reshape);
1926 }
1927
1928 const Type* LoadUSNode::Value(PhaseTransform *phase) const {
1929 Node* mem = in(MemNode::Memory);
1930 Node* value = can_see_stored_value(mem,phase);
1931 if (value != NULL && value->is_Con() &&
1932 !value->bottom_type()->higher_equal(_type)) {
1933 // If the input to the store does not fit with the load's result type,
1934 // it must be truncated. We can't delay until Ideal call since
1935 // a singleton Value is needed for split_thru_phi optimization.
1936 int con = value->get_int();
1937 return TypeInt::make(con & 0xFFFF);
1938 }
1939 return LoadNode::Value(phase);
1940 }
1941
1942 //--------------------------LoadSNode::Ideal--------------------------------------
1943 //
1944 // If the previous store is to the same address as this load,
1945 // and the value stored was larger than a short, replace this load
1946 // with the value stored truncated to a short. If no truncation is
1947 // needed, the replacement is done in LoadNode::Identity().
1948 //
1949 Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1950 Node* mem = in(MemNode::Memory);
1951 Node* value = can_see_stored_value(mem,phase);
1952 if( value && !phase->type(value)->higher_equal( _type ) ) {
1953 Node *result = phase->transform( new LShiftINode(value, phase->intcon(16)) );
1954 return new RShiftINode(result, phase->intcon(16));
1955 }
1956 // Identity call will handle the case where truncation is not needed.
1957 return LoadNode::Ideal(phase, can_reshape);
1958 }
1959
1960 const Type* LoadSNode::Value(PhaseTransform *phase) const {
1961 Node* mem = in(MemNode::Memory);
1962 Node* value = can_see_stored_value(mem,phase);
1963 if (value != NULL && value->is_Con() &&
1964 !value->bottom_type()->higher_equal(_type)) {
1965 // If the input to the store does not fit with the load's result type,
1966 // it must be truncated. We can't delay until Ideal call since
1967 // a singleton Value is needed for split_thru_phi optimization.
1968 int con = value->get_int();
1969 return TypeInt::make((con << 16) >> 16);
1970 }
1971 return LoadNode::Value(phase);
1972 }
1973
1974 //=============================================================================
1975 //----------------------------LoadKlassNode::make------------------------------
1976 // Polymorphic factory method:
1977 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
1978 // sanity check the alias category against the created node type
1979 const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
1980 assert(adr_type != NULL, "expecting TypeKlassPtr");
1981 #ifdef _LP64
1982 if (adr_type->is_ptr_to_narrowklass()) {
1983 assert(UseCompressedClassPointers, "no compressed klasses");
1984 Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
1985 return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
1986 }
1987 #endif
1988 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
1989 return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
1990 }
1991
1992 //------------------------------Value------------------------------------------
1993 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
1994 return klass_value_common(phase);
1995 }
1996
1997 // In most cases, LoadKlassNode does not have the control input set. If the control
1998 // input is set, it must not be removed (by LoadNode::Ideal()).
1999 bool LoadKlassNode::can_remove_control() const {
2000 return false;
2001 }
2002
2003 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
2004 // Either input is TOP ==> the result is TOP
2005 const Type *t1 = phase->type( in(MemNode::Memory) );
2006 if (t1 == Type::TOP) return Type::TOP;
2007 Node *adr = in(MemNode::Address);
2008 const Type *t2 = phase->type( adr );
2009 if (t2 == Type::TOP) return Type::TOP;
2010 const TypePtr *tp = t2->is_ptr();
2011 if (TypePtr::above_centerline(tp->ptr()) ||
2012 tp->ptr() == TypePtr::Null) return Type::TOP;
2013
2014 // Return a more precise klass, if possible
2015 const TypeInstPtr *tinst = tp->isa_instptr();
2016 if (tinst != NULL) {
2017 ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
2018 int offset = tinst->offset();
2019 if (ik == phase->C->env()->Class_klass()
2020 && (offset == java_lang_Class::klass_offset_in_bytes() ||
2021 offset == java_lang_Class::array_klass_offset_in_bytes())) {
2022 // We are loading a special hidden field from a Class mirror object,
2023 // the field which points to the VM's Klass metaobject.
2024 ciType* t = tinst->java_mirror_type();
2025 // java_mirror_type returns non-null for compile-time Class constants.
2026 if (t != NULL) {
2027 // constant oop => constant klass
2028 if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
2029 if (t->is_void()) {
2030 // We cannot create a void array. Since void is a primitive type return null
2031 // klass. Users of this result need to do a null check on the returned klass.
2032 return TypePtr::NULL_PTR;
2033 }
2034 return TypeKlassPtr::make(ciArrayKlass::make(t));
2035 }
2036 if (!t->is_klass()) {
2037 // a primitive Class (e.g., int.class) has NULL for a klass field
2038 return TypePtr::NULL_PTR;
2039 }
2040 // (Folds up the 1st indirection in aClassConstant.getModifiers().)
2041 return TypeKlassPtr::make(t->as_klass());
2042 }
2043 // non-constant mirror, so we can't tell what's going on
2044 }
2045 if( !ik->is_loaded() )
2046 return _type; // Bail out if not loaded
2047 if (offset == oopDesc::klass_offset_in_bytes()) {
2048 if (tinst->klass_is_exact()) {
2049 return TypeKlassPtr::make(ik);
2050 }
2051 // See if we can become precise: no subklasses and no interface
2052 // (Note: We need to support verified interfaces.)
2053 if (!ik->is_interface() && !ik->has_subklass()) {
2054 //assert(!UseExactTypes, "this code should be useless with exact types");
2055 // Add a dependence; if any subclass added we need to recompile
2056 if (!ik->is_final()) {
2057 // %%% should use stronger assert_unique_concrete_subtype instead
2058 phase->C->dependencies()->assert_leaf_type(ik);
2059 }
2060 // Return precise klass
2061 return TypeKlassPtr::make(ik);
2062 }
2063
2064 // Return root of possible klass
2065 return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/);
2066 }
2067 }
2068
2069 // Check for loading klass from an array
2070 const TypeAryPtr *tary = tp->isa_aryptr();
2071 if( tary != NULL ) {
2072 ciKlass *tary_klass = tary->klass();
2073 if (tary_klass != NULL // can be NULL when at BOTTOM or TOP
2074 && tary->offset() == oopDesc::klass_offset_in_bytes()) {
2075 if (tary->klass_is_exact()) {
2076 return TypeKlassPtr::make(tary_klass);
2077 }
2078 ciArrayKlass *ak = tary->klass()->as_array_klass();
2079 // If the klass is an object array, we defer the question to the
2080 // array component klass.
2081 if( ak->is_obj_array_klass() ) {
2082 assert( ak->is_loaded(), "" );
2083 ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass();
2084 if( base_k->is_loaded() && base_k->is_instance_klass() ) {
2085 ciInstanceKlass* ik = base_k->as_instance_klass();
2086 // See if we can become precise: no subklasses and no interface
2087 if (!ik->is_interface() && !ik->has_subklass()) {
2088 //assert(!UseExactTypes, "this code should be useless with exact types");
2089 // Add a dependence; if any subclass added we need to recompile
2090 if (!ik->is_final()) {
2091 phase->C->dependencies()->assert_leaf_type(ik);
2092 }
2093 // Return precise array klass
2094 return TypeKlassPtr::make(ak);
2095 }
2096 }
2097 return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
2098 } else { // Found a type-array?
2099 //assert(!UseExactTypes, "this code should be useless with exact types");
2100 assert( ak->is_type_array_klass(), "" );
2101 return TypeKlassPtr::make(ak); // These are always precise
2102 }
2103 }
2104 }
2105
2106 // Check for loading klass from an array klass
2107 const TypeKlassPtr *tkls = tp->isa_klassptr();
2108 if (tkls != NULL && !StressReflectiveCode) {
2109 ciKlass* klass = tkls->klass();
2110 if( !klass->is_loaded() )
2111 return _type; // Bail out if not loaded
2112 if( klass->is_obj_array_klass() &&
2113 tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2114 ciKlass* elem = klass->as_obj_array_klass()->element_klass();
2115 // // Always returning precise element type is incorrect,
2116 // // e.g., element type could be object and array may contain strings
2117 // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2118
2119 // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2120 // according to the element type's subclassing.
2121 return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);
2122 }
2123 if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2124 tkls->offset() == in_bytes(Klass::super_offset())) {
2125 ciKlass* sup = klass->as_instance_klass()->super();
2126 // The field is Klass::_super. Return its (constant) value.
2127 // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2128 return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2129 }
2130 }
2131
2132 // Bailout case
2133 return LoadNode::Value(phase);
2134 }
2135
2136 //------------------------------Identity---------------------------------------
2137 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2138 // Also feed through the klass in Allocate(...klass...)._klass.
2139 Node* LoadKlassNode::Identity( PhaseTransform *phase ) {
2140 return klass_identity_common(phase);
2141 }
2142
2143 Node* LoadNode::klass_identity_common(PhaseTransform *phase ) {
2144 Node* x = LoadNode::Identity(phase);
2145 if (x != this) return x;
2146
2147 // Take apart the address into an oop and and offset.
2148 // Return 'this' if we cannot.
2149 Node* adr = in(MemNode::Address);
2150 intptr_t offset = 0;
2151 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2152 if (base == NULL) return this;
2153 const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2154 if (toop == NULL) return this;
2155
2156 // We can fetch the klass directly through an AllocateNode.
2157 // This works even if the klass is not constant (clone or newArray).
2158 if (offset == oopDesc::klass_offset_in_bytes()) {
2159 Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2160 if (allocated_klass != NULL) {
2161 return allocated_klass;
2162 }
2163 }
2164
2165 // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*.
2166 // See inline_native_Class_query for occurrences of these patterns.
2167 // Java Example: x.getClass().isAssignableFrom(y)
2168 //
2169 // This improves reflective code, often making the Class
2170 // mirror go completely dead. (Current exception: Class
2171 // mirrors may appear in debug info, but we could clean them out by
2172 // introducing a new debug info operator for Klass*.java_mirror).
2173 if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass()
2174 && offset == java_lang_Class::klass_offset_in_bytes()) {
2175 // We are loading a special hidden field from a Class mirror,
2176 // the field which points to its Klass or ArrayKlass metaobject.
2177 if (base->is_Load()) {
2178 Node* adr2 = base->in(MemNode::Address);
2179 const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2180 if (tkls != NULL && !tkls->empty()
2181 && (tkls->klass()->is_instance_klass() ||
2182 tkls->klass()->is_array_klass())
2183 && adr2->is_AddP()
2184 ) {
2185 int mirror_field = in_bytes(Klass::java_mirror_offset());
2186 if (tkls->offset() == mirror_field) {
2187 return adr2->in(AddPNode::Base);
2188 }
2189 }
2190 }
2191 }
2192
2193 return this;
2194 }
2195
2196
2197 //------------------------------Value------------------------------------------
2198 const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const {
2199 const Type *t = klass_value_common(phase);
2200 if (t == Type::TOP)
2201 return t;
2202
2203 return t->make_narrowklass();
2204 }
2205
2206 //------------------------------Identity---------------------------------------
2207 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k.
2208 // Also feed through the klass in Allocate(...klass...)._klass.
2209 Node* LoadNKlassNode::Identity( PhaseTransform *phase ) {
2210 Node *x = klass_identity_common(phase);
2211
2212 const Type *t = phase->type( x );
2213 if( t == Type::TOP ) return x;
2214 if( t->isa_narrowklass()) return x;
2215 assert (!t->isa_narrowoop(), "no narrow oop here");
2216
2217 return phase->transform(new EncodePKlassNode(x, t->make_narrowklass()));
2218 }
2219
2220 //------------------------------Value-----------------------------------------
2221 const Type *LoadRangeNode::Value( PhaseTransform *phase ) const {
2222 // Either input is TOP ==> the result is TOP
2223 const Type *t1 = phase->type( in(MemNode::Memory) );
2224 if( t1 == Type::TOP ) return Type::TOP;
2225 Node *adr = in(MemNode::Address);
2226 const Type *t2 = phase->type( adr );
2227 if( t2 == Type::TOP ) return Type::TOP;
2228 const TypePtr *tp = t2->is_ptr();
2229 if (TypePtr::above_centerline(tp->ptr())) return Type::TOP;
2230 const TypeAryPtr *tap = tp->isa_aryptr();
2231 if( !tap ) return _type;
2232 return tap->size();
2233 }
2234
2235 //-------------------------------Ideal---------------------------------------
2236 // Feed through the length in AllocateArray(...length...)._length.
2237 Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2238 Node* p = MemNode::Ideal_common(phase, can_reshape);
2239 if (p) return (p == NodeSentinel) ? NULL : p;
2240
2241 // Take apart the address into an oop and and offset.
2242 // Return 'this' if we cannot.
2243 Node* adr = in(MemNode::Address);
2244 intptr_t offset = 0;
2245 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2246 if (base == NULL) return NULL;
2247 const TypeAryPtr* tary = phase->type(adr)->isa_aryptr();
2248 if (tary == NULL) return NULL;
2249
2250 // We can fetch the length directly through an AllocateArrayNode.
2251 // This works even if the length is not constant (clone or newArray).
2252 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2253 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
2254 if (alloc != NULL) {
2255 Node* allocated_length = alloc->Ideal_length();
2256 Node* len = alloc->make_ideal_length(tary, phase);
2257 if (allocated_length != len) {
2258 // New CastII improves on this.
2259 return len;
2260 }
2261 }
2262 }
2263
2264 return NULL;
2265 }
2266
2267 //------------------------------Identity---------------------------------------
2268 // Feed through the length in AllocateArray(...length...)._length.
2269 Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
2270 Node* x = LoadINode::Identity(phase);
2271 if (x != this) return x;
2272
2273 // Take apart the address into an oop and and offset.
2274 // Return 'this' if we cannot.
2275 Node* adr = in(MemNode::Address);
2276 intptr_t offset = 0;
2277 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2278 if (base == NULL) return this;
2279 const TypeAryPtr* tary = phase->type(adr)->isa_aryptr();
2280 if (tary == NULL) return this;
2281
2282 // We can fetch the length directly through an AllocateArrayNode.
2283 // This works even if the length is not constant (clone or newArray).
2284 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2285 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
2286 if (alloc != NULL) {
2287 Node* allocated_length = alloc->Ideal_length();
2288 // Do not allow make_ideal_length to allocate a CastII node.
2289 Node* len = alloc->make_ideal_length(tary, phase, false);
2290 if (allocated_length == len) {
2291 // Return allocated_length only if it would not be improved by a CastII.
2292 return allocated_length;
2293 }
2294 }
2295 }
2296
2297 return this;
2298
2299 }
2300
2301 //=============================================================================
2302 //---------------------------StoreNode::make-----------------------------------
2303 // Polymorphic factory method:
2304 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2305 assert((mo == unordered || mo == release), "unexpected");
2306 Compile* C = gvn.C;
2307 assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2308 ctl != NULL, "raw memory operations should have control edge");
2309
2310 switch (bt) {
2311 case T_BOOLEAN:
2312 case T_BYTE: return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2313 case T_INT: return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2314 case T_CHAR:
2315 case T_SHORT: return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2316 case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2317 case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2318 case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2319 case T_METADATA:
2320 case T_ADDRESS:
2321 case T_OBJECT:
2322 #ifdef _LP64
2323 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2324 val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2325 return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2326 } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2327 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2328 adr->bottom_type()->isa_rawptr())) {
2329 val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2330 return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2331 }
2332 #endif
2333 {
2334 return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2335 }
2336 }
2337 ShouldNotReachHere();
2338 return (StoreNode*)NULL;
2339 }
2340
2341 StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2342 bool require_atomic = true;
2343 return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2344 }
2345
2346 StoreDNode* StoreDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2347 bool require_atomic = true;
2348 return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2349 }
2350
2351
2352 //--------------------------bottom_type----------------------------------------
2353 const Type *StoreNode::bottom_type() const {
2354 return Type::MEMORY;
2355 }
2356
2357 //------------------------------hash-------------------------------------------
2358 uint StoreNode::hash() const {
2359 // unroll addition of interesting fields
2360 //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2361
2362 // Since they are not commoned, do not hash them:
2363 return NO_HASH;
2364 }
2365
2366 //------------------------------Ideal------------------------------------------
2367 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2368 // When a store immediately follows a relevant allocation/initialization,
2369 // try to capture it into the initialization, or hoist it above.
2370 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2371 Node* p = MemNode::Ideal_common(phase, can_reshape);
2372 if (p) return (p == NodeSentinel) ? NULL : p;
2373
2374 Node* mem = in(MemNode::Memory);
2375 Node* address = in(MemNode::Address);
2376 // Back-to-back stores to same address? Fold em up. Generally
2377 // unsafe if I have intervening uses... Also disallowed for StoreCM
2378 // since they must follow each StoreP operation. Redundant StoreCMs
2379 // are eliminated just before matching in final_graph_reshape.
2380 {
2381 Node* st = mem;
2382 // If Store 'st' has more than one use, we cannot fold 'st' away.
2383 // For example, 'st' might be the final state at a conditional
2384 // return. Or, 'st' might be used by some node which is live at
2385 // the same time 'st' is live, which might be unschedulable. So,
2386 // require exactly ONE user until such time as we clone 'mem' for
2387 // each of 'mem's uses (thus making the exactly-1-user-rule hold
2388 // true).
2389 while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2390 // Looking at a dead closed cycle of memory?
2391 assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2392 assert(Opcode() == st->Opcode() ||
2393 st->Opcode() == Op_StoreVector ||
2394 Opcode() == Op_StoreVector ||
2395 phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2396 (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI), // expanded ClearArrayNode
2397 "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
2398
2399 if (st->in(MemNode::Address)->eqv_uncast(address) &&
2400 st->as_Store()->memory_size() <= this->memory_size()) {
2401 Node* use = st->raw_out(0);
2402 phase->igvn_rehash_node_delayed(use);
2403 if (can_reshape) {
2404 use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN());
2405 } else {
2406 // It's OK to do this in the parser, since DU info is always accurate,
2407 // and the parser always refers to nodes via SafePointNode maps.
2408 use->set_req(MemNode::Memory, st->in(MemNode::Memory));
2409 }
2410 return this;
2411 }
2412 st = st->in(MemNode::Memory);
2413 }
2414 }
2415
2416
2417 // Capture an unaliased, unconditional, simple store into an initializer.
2418 // Or, if it is independent of the allocation, hoist it above the allocation.
2419 if (ReduceFieldZeroing && /*can_reshape &&*/
2420 mem->is_Proj() && mem->in(0)->is_Initialize()) {
2421 InitializeNode* init = mem->in(0)->as_Initialize();
2422 intptr_t offset = init->can_capture_store(this, phase, can_reshape);
2423 if (offset > 0) {
2424 Node* moved = init->capture_store(this, offset, phase, can_reshape);
2425 // If the InitializeNode captured me, it made a raw copy of me,
2426 // and I need to disappear.
2427 if (moved != NULL) {
2428 // %%% hack to ensure that Ideal returns a new node:
2429 mem = MergeMemNode::make(mem);
2430 return mem; // fold me away
2431 }
2432 }
2433 }
2434
2435 return NULL; // No further progress
2436 }
2437
2438 //------------------------------Value-----------------------------------------
2439 const Type *StoreNode::Value( PhaseTransform *phase ) const {
2440 // Either input is TOP ==> the result is TOP
2441 const Type *t1 = phase->type( in(MemNode::Memory) );
2442 if( t1 == Type::TOP ) return Type::TOP;
2443 const Type *t2 = phase->type( in(MemNode::Address) );
2444 if( t2 == Type::TOP ) return Type::TOP;
2445 const Type *t3 = phase->type( in(MemNode::ValueIn) );
2446 if( t3 == Type::TOP ) return Type::TOP;
2447 return Type::MEMORY;
2448 }
2449
2450 //------------------------------Identity---------------------------------------
2451 // Remove redundant stores:
2452 // Store(m, p, Load(m, p)) changes to m.
2453 // Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x).
2454 Node *StoreNode::Identity( PhaseTransform *phase ) {
2455 Node* mem = in(MemNode::Memory);
2456 Node* adr = in(MemNode::Address);
2457 Node* val = in(MemNode::ValueIn);
2458
2459 // Load then Store? Then the Store is useless
2460 if (val->is_Load() &&
2461 val->in(MemNode::Address)->eqv_uncast(adr) &&
2462 val->in(MemNode::Memory )->eqv_uncast(mem) &&
2463 val->as_Load()->store_Opcode() == Opcode()) {
2464 return mem;
2465 }
2466
2467 // Two stores in a row of the same value?
2468 if (mem->is_Store() &&
2469 mem->in(MemNode::Address)->eqv_uncast(adr) &&
2470 mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2471 mem->Opcode() == Opcode()) {
2472 return mem;
2473 }
2474
2475 // Store of zero anywhere into a freshly-allocated object?
2476 // Then the store is useless.
2477 // (It must already have been captured by the InitializeNode.)
2478 if (ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
2479 // a newly allocated object is already all-zeroes everywhere
2480 if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
2481 return mem;
2482 }
2483
2484 // the store may also apply to zero-bits in an earlier object
2485 Node* prev_mem = find_previous_store(phase);
2486 // Steps (a), (b): Walk past independent stores to find an exact match.
2487 if (prev_mem != NULL) {
2488 Node* prev_val = can_see_stored_value(prev_mem, phase);
2489 if (prev_val != NULL && phase->eqv(prev_val, val)) {
2490 // prev_val and val might differ by a cast; it would be good
2491 // to keep the more informative of the two.
2492 return mem;
2493 }
2494 }
2495 }
2496
2497 return this;
2498 }
2499
2500 //------------------------------match_edge-------------------------------------
2501 // Do we Match on this edge index or not? Match only memory & value
2502 uint StoreNode::match_edge(uint idx) const {
2503 return idx == MemNode::Address || idx == MemNode::ValueIn;
2504 }
2505
2506 //------------------------------cmp--------------------------------------------
2507 // Do not common stores up together. They generally have to be split
2508 // back up anyways, so do not bother.
2509 uint StoreNode::cmp( const Node &n ) const {
2510 return (&n == this); // Always fail except on self
2511 }
2512
2513 //------------------------------Ideal_masked_input-----------------------------
2514 // Check for a useless mask before a partial-word store
2515 // (StoreB ... (AndI valIn conIa) )
2516 // If (conIa & mask == mask) this simplifies to
2517 // (StoreB ... (valIn) )
2518 Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) {
2519 Node *val = in(MemNode::ValueIn);
2520 if( val->Opcode() == Op_AndI ) {
2521 const TypeInt *t = phase->type( val->in(2) )->isa_int();
2522 if( t && t->is_con() && (t->get_con() & mask) == mask ) {
2523 set_req(MemNode::ValueIn, val->in(1));
2524 return this;
2525 }
2526 }
2527 return NULL;
2528 }
2529
2530
2531 //------------------------------Ideal_sign_extended_input----------------------
2532 // Check for useless sign-extension before a partial-word store
2533 // (StoreB ... (RShiftI _ (LShiftI _ valIn conIL ) conIR) )
2534 // If (conIL == conIR && conIR <= num_bits) this simplifies to
2535 // (StoreB ... (valIn) )
2536 Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) {
2537 Node *val = in(MemNode::ValueIn);
2538 if( val->Opcode() == Op_RShiftI ) {
2539 const TypeInt *t = phase->type( val->in(2) )->isa_int();
2540 if( t && t->is_con() && (t->get_con() <= num_bits) ) {
2541 Node *shl = val->in(1);
2542 if( shl->Opcode() == Op_LShiftI ) {
2543 const TypeInt *t2 = phase->type( shl->in(2) )->isa_int();
2544 if( t2 && t2->is_con() && (t2->get_con() == t->get_con()) ) {
2545 set_req(MemNode::ValueIn, shl->in(1));
2546 return this;
2547 }
2548 }
2549 }
2550 }
2551 return NULL;
2552 }
2553
2554 //------------------------------value_never_loaded-----------------------------------
2555 // Determine whether there are any possible loads of the value stored.
2556 // For simplicity, we actually check if there are any loads from the
2557 // address stored to, not just for loads of the value stored by this node.
2558 //
2559 bool StoreNode::value_never_loaded( PhaseTransform *phase) const {
2560 Node *adr = in(Address);
2561 const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr();
2562 if (adr_oop == NULL)
2563 return false;
2564 if (!adr_oop->is_known_instance_field())
2565 return false; // if not a distinct instance, there may be aliases of the address
2566 for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) {
2567 Node *use = adr->fast_out(i);
2568 if (use->is_Load() || use->is_LoadStore()) {
2569 return false;
2570 }
2571 }
2572 return true;
2573 }
2574
2575 //=============================================================================
2576 //------------------------------Ideal------------------------------------------
2577 // If the store is from an AND mask that leaves the low bits untouched, then
2578 // we can skip the AND operation. If the store is from a sign-extension
2579 // (a left shift, then right shift) we can skip both.
2580 Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){
2581 Node *progress = StoreNode::Ideal_masked_input(phase, 0xFF);
2582 if( progress != NULL ) return progress;
2583
2584 progress = StoreNode::Ideal_sign_extended_input(phase, 24);
2585 if( progress != NULL ) return progress;
2586
2587 // Finally check the default case
2588 return StoreNode::Ideal(phase, can_reshape);
2589 }
2590
2591 //=============================================================================
2592 //------------------------------Ideal------------------------------------------
2593 // If the store is from an AND mask that leaves the low bits untouched, then
2594 // we can skip the AND operation
2595 Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){
2596 Node *progress = StoreNode::Ideal_masked_input(phase, 0xFFFF);
2597 if( progress != NULL ) return progress;
2598
2599 progress = StoreNode::Ideal_sign_extended_input(phase, 16);
2600 if( progress != NULL ) return progress;
2601
2602 // Finally check the default case
2603 return StoreNode::Ideal(phase, can_reshape);
2604 }
2605
2606 //=============================================================================
2607 //------------------------------Identity---------------------------------------
2608 Node *StoreCMNode::Identity( PhaseTransform *phase ) {
2609 // No need to card mark when storing a null ptr
2610 Node* my_store = in(MemNode::OopStore);
2611 if (my_store->is_Store()) {
2612 const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
2613 if( t1 == TypePtr::NULL_PTR ) {
2614 return in(MemNode::Memory);
2615 }
2616 }
2617 return this;
2618 }
2619
2620 //=============================================================================
2621 //------------------------------Ideal---------------------------------------
2622 Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
2623 Node* progress = StoreNode::Ideal(phase, can_reshape);
2624 if (progress != NULL) return progress;
2625
2626 Node* my_store = in(MemNode::OopStore);
2627 if (my_store->is_MergeMem()) {
2628 Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
2629 set_req(MemNode::OopStore, mem);
2630 return this;
2631 }
2632
2633 return NULL;
2634 }
2635
2636 //------------------------------Value-----------------------------------------
2637 const Type *StoreCMNode::Value( PhaseTransform *phase ) const {
2638 // Either input is TOP ==> the result is TOP
2639 const Type *t = phase->type( in(MemNode::Memory) );
2640 if( t == Type::TOP ) return Type::TOP;
2641 t = phase->type( in(MemNode::Address) );
2642 if( t == Type::TOP ) return Type::TOP;
2643 t = phase->type( in(MemNode::ValueIn) );
2644 if( t == Type::TOP ) return Type::TOP;
2645 // If extra input is TOP ==> the result is TOP
2646 t = phase->type( in(MemNode::OopStore) );
2647 if( t == Type::TOP ) return Type::TOP;
2648
2649 return StoreNode::Value( phase );
2650 }
2651
2652
2653 //=============================================================================
2654 //----------------------------------SCMemProjNode------------------------------
2655 const Type * SCMemProjNode::Value( PhaseTransform *phase ) const
2656 {
2657 return bottom_type();
2658 }
2659
2660 //=============================================================================
2661 //----------------------------------LoadStoreNode------------------------------
2662 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
2663 : Node(required),
2664 _type(rt),
2665 _adr_type(at)
2666 {
2667 init_req(MemNode::Control, c );
2668 init_req(MemNode::Memory , mem);
2669 init_req(MemNode::Address, adr);
2670 init_req(MemNode::ValueIn, val);
2671 init_class_id(Class_LoadStore);
2672 }
2673
2674 uint LoadStoreNode::ideal_reg() const {
2675 return _type->ideal_reg();
2676 }
2677
2678 bool LoadStoreNode::result_not_used() const {
2679 for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
2680 Node *x = fast_out(i);
2681 if (x->Opcode() == Op_SCMemProj) continue;
2682 return false;
2683 }
2684 return true;
2685 }
2686
2687 uint LoadStoreNode::size_of() const { return sizeof(*this); }
2688
2689 //=============================================================================
2690 //----------------------------------LoadStoreConditionalNode--------------------
2691 LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
2692 init_req(ExpectedIn, ex );
2693 }
2694
2695 //=============================================================================
2696 //-------------------------------adr_type--------------------------------------
2697 const TypePtr* ClearArrayNode::adr_type() const {
2698 Node *adr = in(3);
2699 if (adr == NULL) return NULL; // node is dead
2700 return MemNode::calculate_adr_type(adr->bottom_type());
2701 }
2702
2703 //------------------------------match_edge-------------------------------------
2704 // Do we Match on this edge index or not? Do not match memory
2705 uint ClearArrayNode::match_edge(uint idx) const {
2706 return idx > 1;
2707 }
2708
2709 //------------------------------Identity---------------------------------------
2710 // Clearing a zero length array does nothing
2711 Node *ClearArrayNode::Identity( PhaseTransform *phase ) {
2712 return phase->type(in(2))->higher_equal(TypeX::ZERO) ? in(1) : this;
2713 }
2714
2715 //------------------------------Idealize---------------------------------------
2716 // Clearing a short array is faster with stores
2717 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
2718 const int unit = BytesPerLong;
2719 const TypeX* t = phase->type(in(2))->isa_intptr_t();
2720 if (!t) return NULL;
2721 if (!t->is_con()) return NULL;
2722 intptr_t raw_count = t->get_con();
2723 intptr_t size = raw_count;
2724 if (!Matcher::init_array_count_is_in_bytes) size *= unit;
2725 // Clearing nothing uses the Identity call.
2726 // Negative clears are possible on dead ClearArrays
2727 // (see jck test stmt114.stmt11402.val).
2728 if (size <= 0 || size % unit != 0) return NULL;
2729 intptr_t count = size / unit;
2730 // Length too long; use fast hardware clear
2731 if (size > Matcher::init_array_short_size) return NULL;
2732 Node *mem = in(1);
2733 if( phase->type(mem)==Type::TOP ) return NULL;
2734 Node *adr = in(3);
2735 const Type* at = phase->type(adr);
2736 if( at==Type::TOP ) return NULL;
2737 const TypePtr* atp = at->isa_ptr();
2738 // adjust atp to be the correct array element address type
2739 if (atp == NULL) atp = TypePtr::BOTTOM;
2740 else atp = atp->add_offset(Type::OffsetBot);
2741 // Get base for derived pointer purposes
2742 if( adr->Opcode() != Op_AddP ) Unimplemented();
2743 Node *base = adr->in(1);
2744
2745 Node *zero = phase->makecon(TypeLong::ZERO);
2746 Node *off = phase->MakeConX(BytesPerLong);
2747 mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2748 count--;
2749 while( count-- ) {
2750 mem = phase->transform(mem);
2751 adr = phase->transform(new AddPNode(base,adr,off));
2752 mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2753 }
2754 return mem;
2755 }
2756
2757 //----------------------------step_through----------------------------------
2758 // Return allocation input memory edge if it is different instance
2759 // or itself if it is the one we are looking for.
2760 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
2761 Node* n = *np;
2762 assert(n->is_ClearArray(), "sanity");
2763 intptr_t offset;
2764 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
2765 // This method is called only before Allocate nodes are expanded
2766 // during macro nodes expansion. Before that ClearArray nodes are
2767 // only generated in PhaseMacroExpand::generate_arraycopy() (before
2768 // Allocate nodes are expanded) which follows allocations.
2769 assert(alloc != NULL, "should have allocation");
2770 if (alloc->_idx == instance_id) {
2771 // Can not bypass initialization of the instance we are looking for.
2772 return false;
2773 }
2774 // Otherwise skip it.
2775 InitializeNode* init = alloc->initialization();
2776 if (init != NULL)
2777 *np = init->in(TypeFunc::Memory);
2778 else
2779 *np = alloc->in(TypeFunc::Memory);
2780 return true;
2781 }
2782
2783 //----------------------------clear_memory-------------------------------------
2784 // Generate code to initialize object storage to zero.
2785 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2786 intptr_t start_offset,
2787 Node* end_offset,
2788 PhaseGVN* phase) {
2789 intptr_t offset = start_offset;
2790
2791 int unit = BytesPerLong;
2792 if ((offset % unit) != 0) {
2793 Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
2794 adr = phase->transform(adr);
2795 const TypePtr* atp = TypeRawPtr::BOTTOM;
2796 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
2797 mem = phase->transform(mem);
2798 offset += BytesPerInt;
2799 }
2800 assert((offset % unit) == 0, "");
2801
2802 // Initialize the remaining stuff, if any, with a ClearArray.
2803 return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
2804 }
2805
2806 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2807 Node* start_offset,
2808 Node* end_offset,
2809 PhaseGVN* phase) {
2810 if (start_offset == end_offset) {
2811 // nothing to do
2812 return mem;
2813 }
2814
2815 int unit = BytesPerLong;
2816 Node* zbase = start_offset;
2817 Node* zend = end_offset;
2818
2819 // Scale to the unit required by the CPU:
2820 if (!Matcher::init_array_count_is_in_bytes) {
2821 Node* shift = phase->intcon(exact_log2(unit));
2822 zbase = phase->transform(new URShiftXNode(zbase, shift) );
2823 zend = phase->transform(new URShiftXNode(zend, shift) );
2824 }
2825
2826 // Bulk clear double-words
2827 Node* zsize = phase->transform(new SubXNode(zend, zbase) );
2828 Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
2829 mem = new ClearArrayNode(ctl, mem, zsize, adr);
2830 return phase->transform(mem);
2831 }
2832
2833 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2834 intptr_t start_offset,
2835 intptr_t end_offset,
2836 PhaseGVN* phase) {
2837 if (start_offset == end_offset) {
2838 // nothing to do
2839 return mem;
2840 }
2841
2842 assert((end_offset % BytesPerInt) == 0, "odd end offset");
2843 intptr_t done_offset = end_offset;
2844 if ((done_offset % BytesPerLong) != 0) {
2845 done_offset -= BytesPerInt;
2846 }
2847 if (done_offset > start_offset) {
2848 mem = clear_memory(ctl, mem, dest,
2849 start_offset, phase->MakeConX(done_offset), phase);
2850 }
2851 if (done_offset < end_offset) { // emit the final 32-bit store
2852 Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
2853 adr = phase->transform(adr);
2854 const TypePtr* atp = TypeRawPtr::BOTTOM;
2855 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
2856 mem = phase->transform(mem);
2857 done_offset += BytesPerInt;
2858 }
2859 assert(done_offset == end_offset, "");
2860 return mem;
2861 }
2862
2863 //=============================================================================
2864 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
2865 : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
2866 _adr_type(C->get_adr_type(alias_idx))
2867 {
2868 init_class_id(Class_MemBar);
2869 Node* top = C->top();
2870 init_req(TypeFunc::I_O,top);
2871 init_req(TypeFunc::FramePtr,top);
2872 init_req(TypeFunc::ReturnAdr,top);
2873 if (precedent != NULL)
2874 init_req(TypeFunc::Parms, precedent);
2875 }
2876
2877 //------------------------------cmp--------------------------------------------
2878 uint MemBarNode::hash() const { return NO_HASH; }
2879 uint MemBarNode::cmp( const Node &n ) const {
2880 return (&n == this); // Always fail except on self
2881 }
2882
2883 //------------------------------make-------------------------------------------
2884 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
2885 switch (opcode) {
2886 case Op_MemBarAcquire: return new MemBarAcquireNode(C, atp, pn);
2887 case Op_LoadFence: return new LoadFenceNode(C, atp, pn);
2888 case Op_MemBarRelease: return new MemBarReleaseNode(C, atp, pn);
2889 case Op_StoreFence: return new StoreFenceNode(C, atp, pn);
2890 case Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn);
2891 case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn);
2892 case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn);
2893 case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn);
2894 case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn);
2895 case Op_Initialize: return new InitializeNode(C, atp, pn);
2896 case Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn);
2897 default: ShouldNotReachHere(); return NULL;
2898 }
2899 }
2900
2901 //------------------------------Ideal------------------------------------------
2902 // Return a node which is more "ideal" than the current node. Strip out
2903 // control copies
2904 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2905 if (remove_dead_region(phase, can_reshape)) return this;
2906 // Don't bother trying to transform a dead node
2907 if (in(0) && in(0)->is_top()) {
2908 return NULL;
2909 }
2910
2911 bool progress = false;
2912 // Eliminate volatile MemBars for scalar replaced objects.
2913 if (can_reshape && req() == (Precedent+1)) {
2914 bool eliminate = false;
2915 int opc = Opcode();
2916 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
2917 // Volatile field loads and stores.
2918 Node* my_mem = in(MemBarNode::Precedent);
2919 // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
2920 if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
2921 // if the Precedent is a decodeN and its input (a Load) is used at more than one place,
2922 // replace this Precedent (decodeN) with the Load instead.
2923 if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) {
2924 Node* load_node = my_mem->in(1);
2925 set_req(MemBarNode::Precedent, load_node);
2926 phase->is_IterGVN()->_worklist.push(my_mem);
2927 my_mem = load_node;
2928 } else {
2929 assert(my_mem->unique_out() == this, "sanity");
2930 del_req(Precedent);
2931 phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
2932 my_mem = NULL;
2933 }
2934 progress = true;
2935 }
2936 if (my_mem != NULL && my_mem->is_Mem()) {
2937 const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
2938 // Check for scalar replaced object reference.
2939 if( t_oop != NULL && t_oop->is_known_instance_field() &&
2940 t_oop->offset() != Type::OffsetBot &&
2941 t_oop->offset() != Type::OffsetTop) {
2942 eliminate = true;
2943 }
2944 }
2945 } else if (opc == Op_MemBarRelease) {
2946 // Final field stores.
2947 Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
2948 if ((alloc != NULL) && alloc->is_Allocate() &&
2949 alloc->as_Allocate()->does_not_escape_thread()) {
2950 // The allocated object does not escape.
2951 eliminate = true;
2952 }
2953 }
2954 if (eliminate) {
2955 // Replace MemBar projections by its inputs.
2956 PhaseIterGVN* igvn = phase->is_IterGVN();
2957 igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
2958 igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
2959 // Must return either the original node (now dead) or a new node
2960 // (Do not return a top here, since that would break the uniqueness of top.)
2961 return new ConINode(TypeInt::ZERO);
2962 }
2963 }
2964 return progress ? this : NULL;
2965 }
2966
2967 //------------------------------Value------------------------------------------
2968 const Type *MemBarNode::Value( PhaseTransform *phase ) const {
2969 if( !in(0) ) return Type::TOP;
2970 if( phase->type(in(0)) == Type::TOP )
2971 return Type::TOP;
2972 return TypeTuple::MEMBAR;
2973 }
2974
2975 //------------------------------match------------------------------------------
2976 // Construct projections for memory.
2977 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
2978 switch (proj->_con) {
2979 case TypeFunc::Control:
2980 case TypeFunc::Memory:
2981 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
2982 }
2983 ShouldNotReachHere();
2984 return NULL;
2985 }
2986
2987 //===========================InitializeNode====================================
2988 // SUMMARY:
2989 // This node acts as a memory barrier on raw memory, after some raw stores.
2990 // The 'cooked' oop value feeds from the Initialize, not the Allocation.
2991 // The Initialize can 'capture' suitably constrained stores as raw inits.
2992 // It can coalesce related raw stores into larger units (called 'tiles').
2993 // It can avoid zeroing new storage for memory units which have raw inits.
2994 // At macro-expansion, it is marked 'complete', and does not optimize further.
2995 //
2996 // EXAMPLE:
2997 // The object 'new short[2]' occupies 16 bytes in a 32-bit machine.
2998 // ctl = incoming control; mem* = incoming memory
2999 // (Note: A star * on a memory edge denotes I/O and other standard edges.)
3000 // First allocate uninitialized memory and fill in the header:
3001 // alloc = (Allocate ctl mem* 16 #short[].klass ...)
3002 // ctl := alloc.Control; mem* := alloc.Memory*
3003 // rawmem = alloc.Memory; rawoop = alloc.RawAddress
3004 // Then initialize to zero the non-header parts of the raw memory block:
3005 // init = (Initialize alloc.Control alloc.Memory* alloc.RawAddress)
3006 // ctl := init.Control; mem.SLICE(#short[*]) := init.Memory
3007 // After the initialize node executes, the object is ready for service:
3008 // oop := (CheckCastPP init.Control alloc.RawAddress #short[])
3009 // Suppose its body is immediately initialized as {1,2}:
3010 // store1 = (StoreC init.Control init.Memory (+ oop 12) 1)
3011 // store2 = (StoreC init.Control store1 (+ oop 14) 2)
3012 // mem.SLICE(#short[*]) := store2
3013 //
3014 // DETAILS:
3015 // An InitializeNode collects and isolates object initialization after
3016 // an AllocateNode and before the next possible safepoint. As a
3017 // memory barrier (MemBarNode), it keeps critical stores from drifting
3018 // down past any safepoint or any publication of the allocation.
3019 // Before this barrier, a newly-allocated object may have uninitialized bits.
3020 // After this barrier, it may be treated as a real oop, and GC is allowed.
3021 //
3022 // The semantics of the InitializeNode include an implicit zeroing of
3023 // the new object from object header to the end of the object.
3024 // (The object header and end are determined by the AllocateNode.)
3025 //
3026 // Certain stores may be added as direct inputs to the InitializeNode.
3027 // These stores must update raw memory, and they must be to addresses
3028 // derived from the raw address produced by AllocateNode, and with
3029 // a constant offset. They must be ordered by increasing offset.
3030 // The first one is at in(RawStores), the last at in(req()-1).
3031 // Unlike most memory operations, they are not linked in a chain,
3032 // but are displayed in parallel as users of the rawmem output of
3033 // the allocation.
3034 //
3035 // (See comments in InitializeNode::capture_store, which continue
3036 // the example given above.)
3037 //
3038 // When the associated Allocate is macro-expanded, the InitializeNode
3039 // may be rewritten to optimize collected stores. A ClearArrayNode
3040 // may also be created at that point to represent any required zeroing.
3041 // The InitializeNode is then marked 'complete', prohibiting further
3042 // capturing of nearby memory operations.
3043 //
3044 // During macro-expansion, all captured initializations which store
3045 // constant values of 32 bits or smaller are coalesced (if advantageous)
3046 // into larger 'tiles' 32 or 64 bits. This allows an object to be
3047 // initialized in fewer memory operations. Memory words which are
3048 // covered by neither tiles nor non-constant stores are pre-zeroed
3049 // by explicit stores of zero. (The code shape happens to do all
3050 // zeroing first, then all other stores, with both sequences occurring
3051 // in order of ascending offsets.)
3052 //
3053 // Alternatively, code may be inserted between an AllocateNode and its
3054 // InitializeNode, to perform arbitrary initialization of the new object.
3055 // E.g., the object copying intrinsics insert complex data transfers here.
3056 // The initialization must then be marked as 'complete' disable the
3057 // built-in zeroing semantics and the collection of initializing stores.
3058 //
3059 // While an InitializeNode is incomplete, reads from the memory state
3060 // produced by it are optimizable if they match the control edge and
3061 // new oop address associated with the allocation/initialization.
3062 // They return a stored value (if the offset matches) or else zero.
3063 // A write to the memory state, if it matches control and address,
3064 // and if it is to a constant offset, may be 'captured' by the
3065 // InitializeNode. It is cloned as a raw memory operation and rewired
3066 // inside the initialization, to the raw oop produced by the allocation.
3067 // Operations on addresses which are provably distinct (e.g., to
3068 // other AllocateNodes) are allowed to bypass the initialization.
3069 //
3070 // The effect of all this is to consolidate object initialization
3071 // (both arrays and non-arrays, both piecewise and bulk) into a
3072 // single location, where it can be optimized as a unit.
3073 //
3074 // Only stores with an offset less than TrackedInitializationLimit words
3075 // will be considered for capture by an InitializeNode. This puts a
3076 // reasonable limit on the complexity of optimized initializations.
3077
3078 //---------------------------InitializeNode------------------------------------
3079 InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop)
3080 : _is_complete(Incomplete), _does_not_escape(false),
3081 MemBarNode(C, adr_type, rawoop)
3082 {
3083 init_class_id(Class_Initialize);
3084
3085 assert(adr_type == Compile::AliasIdxRaw, "only valid atp");
3086 assert(in(RawAddress) == rawoop, "proper init");
3087 // Note: allocation() can be NULL, for secondary initialization barriers
3088 }
3089
3090 // Since this node is not matched, it will be processed by the
3091 // register allocator. Declare that there are no constraints
3092 // on the allocation of the RawAddress edge.
3093 const RegMask &InitializeNode::in_RegMask(uint idx) const {
3094 // This edge should be set to top, by the set_complete. But be conservative.
3095 if (idx == InitializeNode::RawAddress)
3096 return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]);
3097 return RegMask::Empty;
3098 }
3099
3100 Node* InitializeNode::memory(uint alias_idx) {
3101 Node* mem = in(Memory);
3102 if (mem->is_MergeMem()) {
3103 return mem->as_MergeMem()->memory_at(alias_idx);
3104 } else {
3105 // incoming raw memory is not split
3106 return mem;
3107 }
3108 }
3109
3110 bool InitializeNode::is_non_zero() {
3111 if (is_complete()) return false;
3112 remove_extra_zeroes();
3113 return (req() > RawStores);
3114 }
3115
3116 void InitializeNode::set_complete(PhaseGVN* phase) {
3117 assert(!is_complete(), "caller responsibility");
3118 _is_complete = Complete;
3119
3120 // After this node is complete, it contains a bunch of
3121 // raw-memory initializations. There is no need for
3122 // it to have anything to do with non-raw memory effects.
3123 // Therefore, tell all non-raw users to re-optimize themselves,
3124 // after skipping the memory effects of this initialization.
3125 PhaseIterGVN* igvn = phase->is_IterGVN();
3126 if (igvn) igvn->add_users_to_worklist(this);
3127 }
3128
3129 // convenience function
3130 // return false if the init contains any stores already
3131 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
3132 InitializeNode* init = initialization();
3133 if (init == NULL || init->is_complete()) return false;
3134 init->remove_extra_zeroes();
3135 // for now, if this allocation has already collected any inits, bail:
3136 if (init->is_non_zero()) return false;
3137 init->set_complete(phase);
3138 return true;
3139 }
3140
3141 void InitializeNode::remove_extra_zeroes() {
3142 if (req() == RawStores) return;
3143 Node* zmem = zero_memory();
3144 uint fill = RawStores;
3145 for (uint i = fill; i < req(); i++) {
3146 Node* n = in(i);
3147 if (n->is_top() || n == zmem) continue; // skip
3148 if (fill < i) set_req(fill, n); // compact
3149 ++fill;
3150 }
3151 // delete any empty spaces created:
3152 while (fill < req()) {
3153 del_req(fill);
3154 }
3155 }
3156
3157 // Helper for remembering which stores go with which offsets.
3158 intptr_t InitializeNode::get_store_offset(Node* st, PhaseTransform* phase) {
3159 if (!st->is_Store()) return -1; // can happen to dead code via subsume_node
3160 intptr_t offset = -1;
3161 Node* base = AddPNode::Ideal_base_and_offset(st->in(MemNode::Address),
3162 phase, offset);
3163 if (base == NULL) return -1; // something is dead,
3164 if (offset < 0) return -1; // dead, dead
3165 return offset;
3166 }
3167
3168 // Helper for proving that an initialization expression is
3169 // "simple enough" to be folded into an object initialization.
3170 // Attempts to prove that a store's initial value 'n' can be captured
3171 // within the initialization without creating a vicious cycle, such as:
3172 // { Foo p = new Foo(); p.next = p; }
3173 // True for constants and parameters and small combinations thereof.
3174 bool InitializeNode::detect_init_independence(Node* n, int& count) {
3175 if (n == NULL) return true; // (can this really happen?)
3176 if (n->is_Proj()) n = n->in(0);
3177 if (n == this) return false; // found a cycle
3178 if (n->is_Con()) return true;
3179 if (n->is_Start()) return true; // params, etc., are OK
3180 if (n->is_Root()) return true; // even better
3181
3182 Node* ctl = n->in(0);
3183 if (ctl != NULL && !ctl->is_top()) {
3184 if (ctl->is_Proj()) ctl = ctl->in(0);
3185 if (ctl == this) return false;
3186
3187 // If we already know that the enclosing memory op is pinned right after
3188 // the init, then any control flow that the store has picked up
3189 // must have preceded the init, or else be equal to the init.
3190 // Even after loop optimizations (which might change control edges)
3191 // a store is never pinned *before* the availability of its inputs.
3192 if (!MemNode::all_controls_dominate(n, this))
3193 return false; // failed to prove a good control
3194 }
3195
3196 // Check data edges for possible dependencies on 'this'.
3197 if ((count += 1) > 20) return false; // complexity limit
3198 for (uint i = 1; i < n->req(); i++) {
3199 Node* m = n->in(i);
3200 if (m == NULL || m == n || m->is_top()) continue;
3201 uint first_i = n->find_edge(m);
3202 if (i != first_i) continue; // process duplicate edge just once
3203 if (!detect_init_independence(m, count)) {
3204 return false;
3205 }
3206 }
3207
3208 return true;
3209 }
3210
3211 // Here are all the checks a Store must pass before it can be moved into
3212 // an initialization. Returns zero if a check fails.
3213 // On success, returns the (constant) offset to which the store applies,
3214 // within the initialized memory.
3215 intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) {
3216 const int FAIL = 0;
3217 if (st->req() != MemNode::ValueIn + 1)
3218 return FAIL; // an inscrutable StoreNode (card mark?)
3219 Node* ctl = st->in(MemNode::Control);
3220 if (!(ctl != NULL && ctl->is_Proj() && ctl->in(0) == this))
3221 return FAIL; // must be unconditional after the initialization
3222 Node* mem = st->in(MemNode::Memory);
3223 if (!(mem->is_Proj() && mem->in(0) == this))
3224 return FAIL; // must not be preceded by other stores
3225 Node* adr = st->in(MemNode::Address);
3226 intptr_t offset;
3227 AllocateNode* alloc = AllocateNode::Ideal_allocation(adr, phase, offset);
3228 if (alloc == NULL)
3229 return FAIL; // inscrutable address
3230 if (alloc != allocation())
3231 return FAIL; // wrong allocation! (store needs to float up)
3232 Node* val = st->in(MemNode::ValueIn);
3233 int complexity_count = 0;
3234 if (!detect_init_independence(val, complexity_count))
3235 return FAIL; // stored value must be 'simple enough'
3236
3237 // The Store can be captured only if nothing after the allocation
3238 // and before the Store is using the memory location that the store
3239 // overwrites.
3240 bool failed = false;
3241 // If is_complete_with_arraycopy() is true the shape of the graph is
3242 // well defined and is safe so no need for extra checks.
3243 if (!is_complete_with_arraycopy()) {
3244 // We are going to look at each use of the memory state following
3245 // the allocation to make sure nothing reads the memory that the
3246 // Store writes.
3247 const TypePtr* t_adr = phase->type(adr)->isa_ptr();
3248 int alias_idx = phase->C->get_alias_index(t_adr);
3249 ResourceMark rm;
3250 Unique_Node_List mems;
3251 mems.push(mem);
3252 Node* unique_merge = NULL;
3253 for (uint next = 0; next < mems.size(); ++next) {
3254 Node *m = mems.at(next);
3255 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
3256 Node *n = m->fast_out(j);
3257 if (n->outcnt() == 0) {
3258 continue;
3259 }
3260 if (n == st) {
3261 continue;
3262 } else if (n->in(0) != NULL && n->in(0) != ctl) {
3263 // If the control of this use is different from the control
3264 // of the Store which is right after the InitializeNode then
3265 // this node cannot be between the InitializeNode and the
3266 // Store.
3267 continue;
3268 } else if (n->is_MergeMem()) {
3269 if (n->as_MergeMem()->memory_at(alias_idx) == m) {
3270 // We can hit a MergeMemNode (that will likely go away
3271 // later) that is a direct use of the memory state
3272 // following the InitializeNode on the same slice as the
3273 // store node that we'd like to capture. We need to check
3274 // the uses of the MergeMemNode.
3275 mems.push(n);
3276 }
3277 } else if (n->is_Mem()) {
3278 Node* other_adr = n->in(MemNode::Address);
3279 if (other_adr == adr) {
3280 failed = true;
3281 break;
3282 } else {
3283 const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
3284 if (other_t_adr != NULL) {
3285 int other_alias_idx = phase->C->get_alias_index(other_t_adr);
3286 if (other_alias_idx == alias_idx) {
3287 // A load from the same memory slice as the store right
3288 // after the InitializeNode. We check the control of the
3289 // object/array that is loaded from. If it's the same as
3290 // the store control then we cannot capture the store.
3291 assert(!n->is_Store(), "2 stores to same slice on same control?");
3292 Node* base = other_adr;
3293 assert(base->is_AddP(), "should be addp but is %s", base->Name());
3294 base = base->in(AddPNode::Base);
3295 if (base != NULL) {
3296 base = base->uncast();
3297 if (base->is_Proj() && base->in(0) == alloc) {
3298 failed = true;
3299 break;
3300 }
3301 }
3302 }
3303 }
3304 }
3305 } else {
3306 failed = true;
3307 break;
3308 }
3309 }
3310 }
3311 }
3312 if (failed) {
3313 if (!can_reshape) {
3314 // We decided we couldn't capture the store during parsing. We
3315 // should try again during the next IGVN once the graph is
3316 // cleaner.
3317 phase->C->record_for_igvn(st);
3318 }
3319 return FAIL;
3320 }
3321
3322 return offset; // success
3323 }
3324
3325 // Find the captured store in(i) which corresponds to the range
3326 // [start..start+size) in the initialized object.
3327 // If there is one, return its index i. If there isn't, return the
3328 // negative of the index where it should be inserted.
3329 // Return 0 if the queried range overlaps an initialization boundary
3330 // or if dead code is encountered.
3331 // If size_in_bytes is zero, do not bother with overlap checks.
3332 int InitializeNode::captured_store_insertion_point(intptr_t start,
3333 int size_in_bytes,
3334 PhaseTransform* phase) {
3335 const int FAIL = 0, MAX_STORE = BytesPerLong;
3336
3337 if (is_complete())
3338 return FAIL; // arraycopy got here first; punt
3339
3340 assert(allocation() != NULL, "must be present");
3341
3342 // no negatives, no header fields:
3343 if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL;
3344
3345 // after a certain size, we bail out on tracking all the stores:
3346 intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
3347 if (start >= ti_limit) return FAIL;
3348
3349 for (uint i = InitializeNode::RawStores, limit = req(); ; ) {
3350 if (i >= limit) return -(int)i; // not found; here is where to put it
3351
3352 Node* st = in(i);
3353 intptr_t st_off = get_store_offset(st, phase);
3354 if (st_off < 0) {
3355 if (st != zero_memory()) {
3356 return FAIL; // bail out if there is dead garbage
3357 }
3358 } else if (st_off > start) {
3359 // ...we are done, since stores are ordered
3360 if (st_off < start + size_in_bytes) {
3361 return FAIL; // the next store overlaps
3362 }
3363 return -(int)i; // not found; here is where to put it
3364 } else if (st_off < start) {
3365 if (size_in_bytes != 0 &&
3366 start < st_off + MAX_STORE &&
3367 start < st_off + st->as_Store()->memory_size()) {
3368 return FAIL; // the previous store overlaps
3369 }
3370 } else {
3371 if (size_in_bytes != 0 &&
3372 st->as_Store()->memory_size() != size_in_bytes) {
3373 return FAIL; // mismatched store size
3374 }
3375 return i;
3376 }
3377
3378 ++i;
3379 }
3380 }
3381
3382 // Look for a captured store which initializes at the offset 'start'
3383 // with the given size. If there is no such store, and no other
3384 // initialization interferes, then return zero_memory (the memory
3385 // projection of the AllocateNode).
3386 Node* InitializeNode::find_captured_store(intptr_t start, int size_in_bytes,
3387 PhaseTransform* phase) {
3388 assert(stores_are_sane(phase), "");
3389 int i = captured_store_insertion_point(start, size_in_bytes, phase);
3390 if (i == 0) {
3391 return NULL; // something is dead
3392 } else if (i < 0) {
3393 return zero_memory(); // just primordial zero bits here
3394 } else {
3395 Node* st = in(i); // here is the store at this position
3396 assert(get_store_offset(st->as_Store(), phase) == start, "sanity");
3397 return st;
3398 }
3399 }
3400
3401 // Create, as a raw pointer, an address within my new object at 'offset'.
3402 Node* InitializeNode::make_raw_address(intptr_t offset,
3403 PhaseTransform* phase) {
3404 Node* addr = in(RawAddress);
3405 if (offset != 0) {
3406 Compile* C = phase->C;
3407 addr = phase->transform( new AddPNode(C->top(), addr,
3408 phase->MakeConX(offset)) );
3409 }
3410 return addr;
3411 }
3412
3413 // Clone the given store, converting it into a raw store
3414 // initializing a field or element of my new object.
3415 // Caller is responsible for retiring the original store,
3416 // with subsume_node or the like.
3417 //
3418 // From the example above InitializeNode::InitializeNode,
3419 // here are the old stores to be captured:
3420 // store1 = (StoreC init.Control init.Memory (+ oop 12) 1)
3421 // store2 = (StoreC init.Control store1 (+ oop 14) 2)
3422 //
3423 // Here is the changed code; note the extra edges on init:
3424 // alloc = (Allocate ...)
3425 // rawoop = alloc.RawAddress
3426 // rawstore1 = (StoreC alloc.Control alloc.Memory (+ rawoop 12) 1)
3427 // rawstore2 = (StoreC alloc.Control alloc.Memory (+ rawoop 14) 2)
3428 // init = (Initialize alloc.Control alloc.Memory rawoop
3429 // rawstore1 rawstore2)
3430 //
3431 Node* InitializeNode::capture_store(StoreNode* st, intptr_t start,
3432 PhaseTransform* phase, bool can_reshape) {
3433 assert(stores_are_sane(phase), "");
3434
3435 if (start < 0) return NULL;
3436 assert(can_capture_store(st, phase, can_reshape) == start, "sanity");
3437
3438 Compile* C = phase->C;
3439 int size_in_bytes = st->memory_size();
3440 int i = captured_store_insertion_point(start, size_in_bytes, phase);
3441 if (i == 0) return NULL; // bail out
3442 Node* prev_mem = NULL; // raw memory for the captured store
3443 if (i > 0) {
3444 prev_mem = in(i); // there is a pre-existing store under this one
3445 set_req(i, C->top()); // temporarily disconnect it
3446 // See StoreNode::Ideal 'st->outcnt() == 1' for the reason to disconnect.
3447 } else {
3448 i = -i; // no pre-existing store
3449 prev_mem = zero_memory(); // a slice of the newly allocated object
3450 if (i > InitializeNode::RawStores && in(i-1) == prev_mem)
3451 set_req(--i, C->top()); // reuse this edge; it has been folded away
3452 else
3453 ins_req(i, C->top()); // build a new edge
3454 }
3455 Node* new_st = st->clone();
3456 new_st->set_req(MemNode::Control, in(Control));
3457 new_st->set_req(MemNode::Memory, prev_mem);
3458 new_st->set_req(MemNode::Address, make_raw_address(start, phase));
3459 new_st = phase->transform(new_st);
3460
3461 // At this point, new_st might have swallowed a pre-existing store
3462 // at the same offset, or perhaps new_st might have disappeared,
3463 // if it redundantly stored the same value (or zero to fresh memory).
3464
3465 // In any case, wire it in:
3466 phase->igvn_rehash_node_delayed(this);
3467 set_req(i, new_st);
3468
3469 // The caller may now kill the old guy.
3470 DEBUG_ONLY(Node* check_st = find_captured_store(start, size_in_bytes, phase));
3471 assert(check_st == new_st || check_st == NULL, "must be findable");
3472 assert(!is_complete(), "");
3473 return new_st;
3474 }
3475
3476 static bool store_constant(jlong* tiles, int num_tiles,
3477 intptr_t st_off, int st_size,
3478 jlong con) {
3479 if ((st_off & (st_size-1)) != 0)
3480 return false; // strange store offset (assume size==2**N)
3481 address addr = (address)tiles + st_off;
3482 assert(st_off >= 0 && addr+st_size <= (address)&tiles[num_tiles], "oob");
3483 switch (st_size) {
3484 case sizeof(jbyte): *(jbyte*) addr = (jbyte) con; break;
3485 case sizeof(jchar): *(jchar*) addr = (jchar) con; break;
3486 case sizeof(jint): *(jint*) addr = (jint) con; break;
3487 case sizeof(jlong): *(jlong*) addr = (jlong) con; break;
3488 default: return false; // strange store size (detect size!=2**N here)
3489 }
3490 return true; // return success to caller
3491 }
3492
3493 // Coalesce subword constants into int constants and possibly
3494 // into long constants. The goal, if the CPU permits,
3495 // is to initialize the object with a small number of 64-bit tiles.
3496 // Also, convert floating-point constants to bit patterns.
3497 // Non-constants are not relevant to this pass.
3498 //
3499 // In terms of the running example on InitializeNode::InitializeNode
3500 // and InitializeNode::capture_store, here is the transformation
3501 // of rawstore1 and rawstore2 into rawstore12:
3502 // alloc = (Allocate ...)
3503 // rawoop = alloc.RawAddress
3504 // tile12 = 0x00010002
3505 // rawstore12 = (StoreI alloc.Control alloc.Memory (+ rawoop 12) tile12)
3506 // init = (Initialize alloc.Control alloc.Memory rawoop rawstore12)
3507 //
3508 void
3509 InitializeNode::coalesce_subword_stores(intptr_t header_size,
3510 Node* size_in_bytes,
3511 PhaseGVN* phase) {
3512 Compile* C = phase->C;
3513
3514 assert(stores_are_sane(phase), "");
3515 // Note: After this pass, they are not completely sane,
3516 // since there may be some overlaps.
3517
3518 int old_subword = 0, old_long = 0, new_int = 0, new_long = 0;
3519
3520 intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
3521 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit);
3522 size_limit = MIN2(size_limit, ti_limit);
3523 size_limit = align_size_up(size_limit, BytesPerLong);
3524 int num_tiles = size_limit / BytesPerLong;
3525
3526 // allocate space for the tile map:
3527 const int small_len = DEBUG_ONLY(true ? 3 :) 30; // keep stack frames small
3528 jlong tiles_buf[small_len];
3529 Node* nodes_buf[small_len];
3530 jlong inits_buf[small_len];
3531 jlong* tiles = ((num_tiles <= small_len) ? &tiles_buf[0]
3532 : NEW_RESOURCE_ARRAY(jlong, num_tiles));
3533 Node** nodes = ((num_tiles <= small_len) ? &nodes_buf[0]
3534 : NEW_RESOURCE_ARRAY(Node*, num_tiles));
3535 jlong* inits = ((num_tiles <= small_len) ? &inits_buf[0]
3536 : NEW_RESOURCE_ARRAY(jlong, num_tiles));
3537 // tiles: exact bitwise model of all primitive constants
3538 // nodes: last constant-storing node subsumed into the tiles model
3539 // inits: which bytes (in each tile) are touched by any initializations
3540
3541 //// Pass A: Fill in the tile model with any relevant stores.
3542
3543 Copy::zero_to_bytes(tiles, sizeof(tiles[0]) * num_tiles);
3544 Copy::zero_to_bytes(nodes, sizeof(nodes[0]) * num_tiles);
3545 Copy::zero_to_bytes(inits, sizeof(inits[0]) * num_tiles);
3546 Node* zmem = zero_memory(); // initially zero memory state
3547 for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
3548 Node* st = in(i);
3549 intptr_t st_off = get_store_offset(st, phase);
3550
3551 // Figure out the store's offset and constant value:
3552 if (st_off < header_size) continue; //skip (ignore header)
3553 if (st->in(MemNode::Memory) != zmem) continue; //skip (odd store chain)
3554 int st_size = st->as_Store()->memory_size();
3555 if (st_off + st_size > size_limit) break;
3556
3557 // Record which bytes are touched, whether by constant or not.
3558 if (!store_constant(inits, num_tiles, st_off, st_size, (jlong) -1))
3559 continue; // skip (strange store size)
3560
3561 const Type* val = phase->type(st->in(MemNode::ValueIn));
3562 if (!val->singleton()) continue; //skip (non-con store)
3563 BasicType type = val->basic_type();
3564
3565 jlong con = 0;
3566 switch (type) {
3567 case T_INT: con = val->is_int()->get_con(); break;
3568 case T_LONG: con = val->is_long()->get_con(); break;
3569 case T_FLOAT: con = jint_cast(val->getf()); break;
3570 case T_DOUBLE: con = jlong_cast(val->getd()); break;
3571 default: continue; //skip (odd store type)
3572 }
3573
3574 if (type == T_LONG && Matcher::isSimpleConstant64(con) &&
3575 st->Opcode() == Op_StoreL) {
3576 continue; // This StoreL is already optimal.
3577 }
3578
3579 // Store down the constant.
3580 store_constant(tiles, num_tiles, st_off, st_size, con);
3581
3582 intptr_t j = st_off >> LogBytesPerLong;
3583
3584 if (type == T_INT && st_size == BytesPerInt
3585 && (st_off & BytesPerInt) == BytesPerInt) {
3586 jlong lcon = tiles[j];
3587 if (!Matcher::isSimpleConstant64(lcon) &&
3588 st->Opcode() == Op_StoreI) {
3589 // This StoreI is already optimal by itself.
3590 jint* intcon = (jint*) &tiles[j];
3591 intcon[1] = 0; // undo the store_constant()
3592
3593 // If the previous store is also optimal by itself, back up and
3594 // undo the action of the previous loop iteration... if we can.
3595 // But if we can't, just let the previous half take care of itself.
3596 st = nodes[j];
3597 st_off -= BytesPerInt;
3598 con = intcon[0];
3599 if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) {
3600 assert(st_off >= header_size, "still ignoring header");
3601 assert(get_store_offset(st, phase) == st_off, "must be");
3602 assert(in(i-1) == zmem, "must be");
3603 DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn)));
3604 assert(con == tcon->is_int()->get_con(), "must be");
3605 // Undo the effects of the previous loop trip, which swallowed st:
3606 intcon[0] = 0; // undo store_constant()
3607 set_req(i-1, st); // undo set_req(i, zmem)
3608 nodes[j] = NULL; // undo nodes[j] = st
3609 --old_subword; // undo ++old_subword
3610 }
3611 continue; // This StoreI is already optimal.
3612 }
3613 }
3614
3615 // This store is not needed.
3616 set_req(i, zmem);
3617 nodes[j] = st; // record for the moment
3618 if (st_size < BytesPerLong) // something has changed
3619 ++old_subword; // includes int/float, but who's counting...
3620 else ++old_long;
3621 }
3622
3623 if ((old_subword + old_long) == 0)
3624 return; // nothing more to do
3625
3626 //// Pass B: Convert any non-zero tiles into optimal constant stores.
3627 // Be sure to insert them before overlapping non-constant stores.
3628 // (E.g., byte[] x = { 1,2,y,4 } => x[int 0] = 0x01020004, x[2]=y.)
3629 for (int j = 0; j < num_tiles; j++) {
3630 jlong con = tiles[j];
3631 jlong init = inits[j];
3632 if (con == 0) continue;
3633 jint con0, con1; // split the constant, address-wise
3634 jint init0, init1; // split the init map, address-wise
3635 { union { jlong con; jint intcon[2]; } u;
3636 u.con = con;
3637 con0 = u.intcon[0];
3638 con1 = u.intcon[1];
3639 u.con = init;
3640 init0 = u.intcon[0];
3641 init1 = u.intcon[1];
3642 }
3643
3644 Node* old = nodes[j];
3645 assert(old != NULL, "need the prior store");
3646 intptr_t offset = (j * BytesPerLong);
3647
3648 bool split = !Matcher::isSimpleConstant64(con);
3649
3650 if (offset < header_size) {
3651 assert(offset + BytesPerInt >= header_size, "second int counts");
3652 assert(*(jint*)&tiles[j] == 0, "junk in header");
3653 split = true; // only the second word counts
3654 // Example: int a[] = { 42 ... }
3655 } else if (con0 == 0 && init0 == -1) {
3656 split = true; // first word is covered by full inits
3657 // Example: int a[] = { ... foo(), 42 ... }
3658 } else if (con1 == 0 && init1 == -1) {
3659 split = true; // second word is covered by full inits
3660 // Example: int a[] = { ... 42, foo() ... }
3661 }
3662
3663 // Here's a case where init0 is neither 0 nor -1:
3664 // byte a[] = { ... 0,0,foo(),0, 0,0,0,42 ... }
3665 // Assuming big-endian memory, init0, init1 are 0x0000FF00, 0x000000FF.
3666 // In this case the tile is not split; it is (jlong)42.
3667 // The big tile is stored down, and then the foo() value is inserted.
3668 // (If there were foo(),foo() instead of foo(),0, init0 would be -1.)
3669
3670 Node* ctl = old->in(MemNode::Control);
3671 Node* adr = make_raw_address(offset, phase);
3672 const TypePtr* atp = TypeRawPtr::BOTTOM;
3673
3674 // One or two coalesced stores to plop down.
3675 Node* st[2];
3676 intptr_t off[2];
3677 int nst = 0;
3678 if (!split) {
3679 ++new_long;
3680 off[nst] = offset;
3681 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3682 phase->longcon(con), T_LONG, MemNode::unordered);
3683 } else {
3684 // Omit either if it is a zero.
3685 if (con0 != 0) {
3686 ++new_int;
3687 off[nst] = offset;
3688 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3689 phase->intcon(con0), T_INT, MemNode::unordered);
3690 }
3691 if (con1 != 0) {
3692 ++new_int;
3693 offset += BytesPerInt;
3694 adr = make_raw_address(offset, phase);
3695 off[nst] = offset;
3696 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3697 phase->intcon(con1), T_INT, MemNode::unordered);
3698 }
3699 }
3700
3701 // Insert second store first, then the first before the second.
3702 // Insert each one just before any overlapping non-constant stores.
3703 while (nst > 0) {
3704 Node* st1 = st[--nst];
3705 C->copy_node_notes_to(st1, old);
3706 st1 = phase->transform(st1);
3707 offset = off[nst];
3708 assert(offset >= header_size, "do not smash header");
3709 int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase);
3710 guarantee(ins_idx != 0, "must re-insert constant store");
3711 if (ins_idx < 0) ins_idx = -ins_idx; // never overlap
3712 if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem)
3713 set_req(--ins_idx, st1);
3714 else
3715 ins_req(ins_idx, st1);
3716 }
3717 }
3718
3719 if (PrintCompilation && WizardMode)
3720 tty->print_cr("Changed %d/%d subword/long constants into %d/%d int/long",
3721 old_subword, old_long, new_int, new_long);
3722 if (C->log() != NULL)
3723 C->log()->elem("comment that='%d/%d subword/long to %d/%d int/long'",
3724 old_subword, old_long, new_int, new_long);
3725
3726 // Clean up any remaining occurrences of zmem:
3727 remove_extra_zeroes();
3728 }
3729
3730 // Explore forward from in(start) to find the first fully initialized
3731 // word, and return its offset. Skip groups of subword stores which
3732 // together initialize full words. If in(start) is itself part of a
3733 // fully initialized word, return the offset of in(start). If there
3734 // are no following full-word stores, or if something is fishy, return
3735 // a negative value.
3736 intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) {
3737 int int_map = 0;
3738 intptr_t int_map_off = 0;
3739 const int FULL_MAP = right_n_bits(BytesPerInt); // the int_map we hope for
3740
3741 for (uint i = start, limit = req(); i < limit; i++) {
3742 Node* st = in(i);
3743
3744 intptr_t st_off = get_store_offset(st, phase);
3745 if (st_off < 0) break; // return conservative answer
3746
3747 int st_size = st->as_Store()->memory_size();
3748 if (st_size >= BytesPerInt && (st_off % BytesPerInt) == 0) {
3749 return st_off; // we found a complete word init
3750 }
3751
3752 // update the map:
3753
3754 intptr_t this_int_off = align_size_down(st_off, BytesPerInt);
3755 if (this_int_off != int_map_off) {
3756 // reset the map:
3757 int_map = 0;
3758 int_map_off = this_int_off;
3759 }
3760
3761 int subword_off = st_off - this_int_off;
3762 int_map |= right_n_bits(st_size) << subword_off;
3763 if ((int_map & FULL_MAP) == FULL_MAP) {
3764 return this_int_off; // we found a complete word init
3765 }
3766
3767 // Did this store hit or cross the word boundary?
3768 intptr_t next_int_off = align_size_down(st_off + st_size, BytesPerInt);
3769 if (next_int_off == this_int_off + BytesPerInt) {
3770 // We passed the current int, without fully initializing it.
3771 int_map_off = next_int_off;
3772 int_map >>= BytesPerInt;
3773 } else if (next_int_off > this_int_off + BytesPerInt) {
3774 // We passed the current and next int.
3775 return this_int_off + BytesPerInt;
3776 }
3777 }
3778
3779 return -1;
3780 }
3781
3782
3783 // Called when the associated AllocateNode is expanded into CFG.
3784 // At this point, we may perform additional optimizations.
3785 // Linearize the stores by ascending offset, to make memory
3786 // activity as coherent as possible.
3787 Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
3788 intptr_t header_size,
3789 Node* size_in_bytes,
3790 PhaseGVN* phase) {
3791 assert(!is_complete(), "not already complete");
3792 assert(stores_are_sane(phase), "");
3793 assert(allocation() != NULL, "must be present");
3794
3795 remove_extra_zeroes();
3796
3797 if (ReduceFieldZeroing || ReduceBulkZeroing)
3798 // reduce instruction count for common initialization patterns
3799 coalesce_subword_stores(header_size, size_in_bytes, phase);
3800
3801 Node* zmem = zero_memory(); // initially zero memory state
3802 Node* inits = zmem; // accumulating a linearized chain of inits
3803 #ifdef ASSERT
3804 intptr_t first_offset = allocation()->minimum_header_size();
3805 intptr_t last_init_off = first_offset; // previous init offset
3806 intptr_t last_init_end = first_offset; // previous init offset+size
3807 intptr_t last_tile_end = first_offset; // previous tile offset+size
3808 #endif
3809 intptr_t zeroes_done = header_size;
3810
3811 bool do_zeroing = true; // we might give up if inits are very sparse
3812 int big_init_gaps = 0; // how many large gaps have we seen?
3813
3814 if (ZeroTLAB) do_zeroing = false;
3815 if (!ReduceFieldZeroing && !ReduceBulkZeroing) do_zeroing = false;
3816
3817 for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
3818 Node* st = in(i);
3819 intptr_t st_off = get_store_offset(st, phase);
3820 if (st_off < 0)
3821 break; // unknown junk in the inits
3822 if (st->in(MemNode::Memory) != zmem)
3823 break; // complicated store chains somehow in list
3824
3825 int st_size = st->as_Store()->memory_size();
3826 intptr_t next_init_off = st_off + st_size;
3827
3828 if (do_zeroing && zeroes_done < next_init_off) {
3829 // See if this store needs a zero before it or under it.
3830 intptr_t zeroes_needed = st_off;
3831
3832 if (st_size < BytesPerInt) {
3833 // Look for subword stores which only partially initialize words.
3834 // If we find some, we must lay down some word-level zeroes first,
3835 // underneath the subword stores.
3836 //
3837 // Examples:
3838 // byte[] a = { p,q,r,s } => a[0]=p,a[1]=q,a[2]=r,a[3]=s
3839 // byte[] a = { x,y,0,0 } => a[0..3] = 0, a[0]=x,a[1]=y
3840 // byte[] a = { 0,0,z,0 } => a[0..3] = 0, a[2]=z
3841 //
3842 // Note: coalesce_subword_stores may have already done this,
3843 // if it was prompted by constant non-zero subword initializers.
3844 // But this case can still arise with non-constant stores.
3845
3846 intptr_t next_full_store = find_next_fullword_store(i, phase);
3847
3848 // In the examples above:
3849 // in(i) p q r s x y z
3850 // st_off 12 13 14 15 12 13 14
3851 // st_size 1 1 1 1 1 1 1
3852 // next_full_s. 12 16 16 16 16 16 16
3853 // z's_done 12 16 16 16 12 16 12
3854 // z's_needed 12 16 16 16 16 16 16
3855 // zsize 0 0 0 0 4 0 4
3856 if (next_full_store < 0) {
3857 // Conservative tack: Zero to end of current word.
3858 zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
3859 } else {
3860 // Zero to beginning of next fully initialized word.
3861 // Or, don't zero at all, if we are already in that word.
3862 assert(next_full_store >= zeroes_needed, "must go forward");
3863 assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
3864 zeroes_needed = next_full_store;
3865 }
3866 }
3867
3868 if (zeroes_needed > zeroes_done) {
3869 intptr_t zsize = zeroes_needed - zeroes_done;
3870 // Do some incremental zeroing on rawmem, in parallel with inits.
3871 zeroes_done = align_size_down(zeroes_done, BytesPerInt);
3872 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
3873 zeroes_done, zeroes_needed,
3874 phase);
3875 zeroes_done = zeroes_needed;
3876 if (zsize > Matcher::init_array_short_size && ++big_init_gaps > 2)
3877 do_zeroing = false; // leave the hole, next time
3878 }
3879 }
3880
3881 // Collect the store and move on:
3882 st->set_req(MemNode::Memory, inits);
3883 inits = st; // put it on the linearized chain
3884 set_req(i, zmem); // unhook from previous position
3885
3886 if (zeroes_done == st_off)
3887 zeroes_done = next_init_off;
3888
3889 assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
3890
3891 #ifdef ASSERT
3892 // Various order invariants. Weaker than stores_are_sane because
3893 // a large constant tile can be filled in by smaller non-constant stores.
3894 assert(st_off >= last_init_off, "inits do not reverse");
3895 last_init_off = st_off;
3896 const Type* val = NULL;
3897 if (st_size >= BytesPerInt &&
3898 (val = phase->type(st->in(MemNode::ValueIn)))->singleton() &&
3899 (int)val->basic_type() < (int)T_OBJECT) {
3900 assert(st_off >= last_tile_end, "tiles do not overlap");
3901 assert(st_off >= last_init_end, "tiles do not overwrite inits");
3902 last_tile_end = MAX2(last_tile_end, next_init_off);
3903 } else {
3904 intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong);
3905 assert(st_tile_end >= last_tile_end, "inits stay with tiles");
3906 assert(st_off >= last_init_end, "inits do not overlap");
3907 last_init_end = next_init_off; // it's a non-tile
3908 }
3909 #endif //ASSERT
3910 }
3911
3912 remove_extra_zeroes(); // clear out all the zmems left over
3913 add_req(inits);
3914
3915 if (!ZeroTLAB) {
3916 // If anything remains to be zeroed, zero it all now.
3917 zeroes_done = align_size_down(zeroes_done, BytesPerInt);
3918 // if it is the last unused 4 bytes of an instance, forget about it
3919 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
3920 if (zeroes_done + BytesPerLong >= size_limit) {
3921 assert(allocation() != NULL, "");
3922 if (allocation()->Opcode() == Op_Allocate) {
3923 Node* klass_node = allocation()->in(AllocateNode::KlassNode);
3924 ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
3925 if (zeroes_done == k->layout_helper())
3926 zeroes_done = size_limit;
3927 }
3928 }
3929 if (zeroes_done < size_limit) {
3930 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
3931 zeroes_done, size_in_bytes, phase);
3932 }
3933 }
3934
3935 set_complete(phase);
3936 return rawmem;
3937 }
3938
3939
3940 #ifdef ASSERT
3941 bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
3942 if (is_complete())
3943 return true; // stores could be anything at this point
3944 assert(allocation() != NULL, "must be present");
3945 intptr_t last_off = allocation()->minimum_header_size();
3946 for (uint i = InitializeNode::RawStores; i < req(); i++) {
3947 Node* st = in(i);
3948 intptr_t st_off = get_store_offset(st, phase);
3949 if (st_off < 0) continue; // ignore dead garbage
3950 if (last_off > st_off) {
3951 tty->print_cr("*** bad store offset at %d: " INTX_FORMAT " > " INTX_FORMAT, i, last_off, st_off);
3952 this->dump(2);
3953 assert(false, "ascending store offsets");
3954 return false;
3955 }
3956 last_off = st_off + st->as_Store()->memory_size();
3957 }
3958 return true;
3959 }
3960 #endif //ASSERT
3961
3962
3963
3964
3965 //============================MergeMemNode=====================================
3966 //
3967 // SEMANTICS OF MEMORY MERGES: A MergeMem is a memory state assembled from several
3968 // contributing store or call operations. Each contributor provides the memory
3969 // state for a particular "alias type" (see Compile::alias_type). For example,
3970 // if a MergeMem has an input X for alias category #6, then any memory reference
3971 // to alias category #6 may use X as its memory state input, as an exact equivalent
3972 // to using the MergeMem as a whole.
3973 // Load<6>( MergeMem(<6>: X, ...), p ) <==> Load<6>(X,p)
3974 //
3975 // (Here, the <N> notation gives the index of the relevant adr_type.)
3976 //
3977 // In one special case (and more cases in the future), alias categories overlap.
3978 // The special alias category "Bot" (Compile::AliasIdxBot) includes all memory
3979 // states. Therefore, if a MergeMem has only one contributing input W for Bot,
3980 // it is exactly equivalent to that state W:
3981 // MergeMem(<Bot>: W) <==> W
3982 //
3983 // Usually, the merge has more than one input. In that case, where inputs
3984 // overlap (i.e., one is Bot), the narrower alias type determines the memory
3985 // state for that type, and the wider alias type (Bot) fills in everywhere else:
3986 // Load<5>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<5>(W,p)
3987 // Load<6>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<6>(X,p)
3988 //
3989 // A merge can take a "wide" memory state as one of its narrow inputs.
3990 // This simply means that the merge observes out only the relevant parts of
3991 // the wide input. That is, wide memory states arriving at narrow merge inputs
3992 // are implicitly "filtered" or "sliced" as necessary. (This is rare.)
3993 //
3994 // These rules imply that MergeMem nodes may cascade (via their <Bot> links),
3995 // and that memory slices "leak through":
3996 // MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y)) <==> MergeMem(<Bot>: W, <7>: Y)
3997 //
3998 // But, in such a cascade, repeated memory slices can "block the leak":
3999 // MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y), <7>: Y') <==> MergeMem(<Bot>: W, <7>: Y')
4000 //
4001 // In the last example, Y is not part of the combined memory state of the
4002 // outermost MergeMem. The system must, of course, prevent unschedulable
4003 // memory states from arising, so you can be sure that the state Y is somehow
4004 // a precursor to state Y'.
4005 //
4006 //
4007 // REPRESENTATION OF MEMORY MERGES: The indexes used to address the Node::in array
4008 // of each MergeMemNode array are exactly the numerical alias indexes, including
4009 // but not limited to AliasIdxTop, AliasIdxBot, and AliasIdxRaw. The functions
4010 // Compile::alias_type (and kin) produce and manage these indexes.
4011 //
4012 // By convention, the value of in(AliasIdxTop) (i.e., in(1)) is always the top node.
4013 // (Note that this provides quick access to the top node inside MergeMem methods,
4014 // without the need to reach out via TLS to Compile::current.)
4015 //
4016 // As a consequence of what was just described, a MergeMem that represents a full
4017 // memory state has an edge in(AliasIdxBot) which is a "wide" memory state,
4018 // containing all alias categories.
4019 //
4020 // MergeMem nodes never (?) have control inputs, so in(0) is NULL.
4021 //
4022 // All other edges in(N) (including in(AliasIdxRaw), which is in(3)) are either
4023 // a memory state for the alias type <N>, or else the top node, meaning that
4024 // there is no particular input for that alias type. Note that the length of
4025 // a MergeMem is variable, and may be extended at any time to accommodate new
4026 // memory states at larger alias indexes. When merges grow, they are of course
4027 // filled with "top" in the unused in() positions.
4028 //
4029 // This use of top is named "empty_memory()", or "empty_mem" (no-memory) as a variable.
4030 // (Top was chosen because it works smoothly with passes like GCM.)
4031 //
4032 // For convenience, we hardwire the alias index for TypeRawPtr::BOTTOM. (It is
4033 // the type of random VM bits like TLS references.) Since it is always the
4034 // first non-Bot memory slice, some low-level loops use it to initialize an
4035 // index variable: for (i = AliasIdxRaw; i < req(); i++).
4036 //
4037 //
4038 // ACCESSORS: There is a special accessor MergeMemNode::base_memory which returns
4039 // the distinguished "wide" state. The accessor MergeMemNode::memory_at(N) returns
4040 // the memory state for alias type <N>, or (if there is no particular slice at <N>,
4041 // it returns the base memory. To prevent bugs, memory_at does not accept <Top>
4042 // or <Bot> indexes. The iterator MergeMemStream provides robust iteration over
4043 // MergeMem nodes or pairs of such nodes, ensuring that the non-top edges are visited.
4044 //
4045 // %%%% We may get rid of base_memory as a separate accessor at some point; it isn't
4046 // really that different from the other memory inputs. An abbreviation called
4047 // "bot_memory()" for "memory_at(AliasIdxBot)" would keep code tidy.
4048 //
4049 //
4050 // PARTIAL MEMORY STATES: During optimization, MergeMem nodes may arise that represent
4051 // partial memory states. When a Phi splits through a MergeMem, the copy of the Phi
4052 // that "emerges though" the base memory will be marked as excluding the alias types
4053 // of the other (narrow-memory) copies which "emerged through" the narrow edges:
4054 //
4055 // Phi<Bot>(U, MergeMem(<Bot>: W, <8>: Y))
4056 // ==Ideal=> MergeMem(<Bot>: Phi<Bot-8>(U, W), Phi<8>(U, Y))
4057 //
4058 // This strange "subtraction" effect is necessary to ensure IGVN convergence.
4059 // (It is currently unimplemented.) As you can see, the resulting merge is
4060 // actually a disjoint union of memory states, rather than an overlay.
4061 //
4062
4063 //------------------------------MergeMemNode-----------------------------------
4064 Node* MergeMemNode::make_empty_memory() {
4065 Node* empty_memory = (Node*) Compile::current()->top();
4066 assert(empty_memory->is_top(), "correct sentinel identity");
4067 return empty_memory;
4068 }
4069
4070 MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) {
4071 init_class_id(Class_MergeMem);
4072 // all inputs are nullified in Node::Node(int)
4073 // set_input(0, NULL); // no control input
4074
4075 // Initialize the edges uniformly to top, for starters.
4076 Node* empty_mem = make_empty_memory();
4077 for (uint i = Compile::AliasIdxTop; i < req(); i++) {
4078 init_req(i,empty_mem);
4079 }
4080 assert(empty_memory() == empty_mem, "");
4081
4082 if( new_base != NULL && new_base->is_MergeMem() ) {
4083 MergeMemNode* mdef = new_base->as_MergeMem();
4084 assert(mdef->empty_memory() == empty_mem, "consistent sentinels");
4085 for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) {
4086 mms.set_memory(mms.memory2());
4087 }
4088 assert(base_memory() == mdef->base_memory(), "");
4089 } else {
4090 set_base_memory(new_base);
4091 }
4092 }
4093
4094 // Make a new, untransformed MergeMem with the same base as 'mem'.
4095 // If mem is itself a MergeMem, populate the result with the same edges.
4096 MergeMemNode* MergeMemNode::make(Node* mem) {
4097 return new MergeMemNode(mem);
4098 }
4099
4100 //------------------------------cmp--------------------------------------------
4101 uint MergeMemNode::hash() const { return NO_HASH; }
4102 uint MergeMemNode::cmp( const Node &n ) const {
4103 return (&n == this); // Always fail except on self
4104 }
4105
4106 //------------------------------Identity---------------------------------------
4107 Node* MergeMemNode::Identity(PhaseTransform *phase) {
4108 // Identity if this merge point does not record any interesting memory
4109 // disambiguations.
4110 Node* base_mem = base_memory();
4111 Node* empty_mem = empty_memory();
4112 if (base_mem != empty_mem) { // Memory path is not dead?
4113 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4114 Node* mem = in(i);
4115 if (mem != empty_mem && mem != base_mem) {
4116 return this; // Many memory splits; no change
4117 }
4118 }
4119 }
4120 return base_mem; // No memory splits; ID on the one true input
4121 }
4122
4123 //------------------------------Ideal------------------------------------------
4124 // This method is invoked recursively on chains of MergeMem nodes
4125 Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4126 // Remove chain'd MergeMems
4127 //
4128 // This is delicate, because the each "in(i)" (i >= Raw) is interpreted
4129 // relative to the "in(Bot)". Since we are patching both at the same time,
4130 // we have to be careful to read each "in(i)" relative to the old "in(Bot)",
4131 // but rewrite each "in(i)" relative to the new "in(Bot)".
4132 Node *progress = NULL;
4133
4134
4135 Node* old_base = base_memory();
4136 Node* empty_mem = empty_memory();
4137 if (old_base == empty_mem)
4138 return NULL; // Dead memory path.
4139
4140 MergeMemNode* old_mbase;
4141 if (old_base != NULL && old_base->is_MergeMem())
4142 old_mbase = old_base->as_MergeMem();
4143 else
4144 old_mbase = NULL;
4145 Node* new_base = old_base;
4146
4147 // simplify stacked MergeMems in base memory
4148 if (old_mbase) new_base = old_mbase->base_memory();
4149
4150 // the base memory might contribute new slices beyond my req()
4151 if (old_mbase) grow_to_match(old_mbase);
4152
4153 // Look carefully at the base node if it is a phi.
4154 PhiNode* phi_base;
4155 if (new_base != NULL && new_base->is_Phi())
4156 phi_base = new_base->as_Phi();
4157 else
4158 phi_base = NULL;
4159
4160 Node* phi_reg = NULL;
4161 uint phi_len = (uint)-1;
4162 if (phi_base != NULL && !phi_base->is_copy()) {
4163 // do not examine phi if degraded to a copy
4164 phi_reg = phi_base->region();
4165 phi_len = phi_base->req();
4166 // see if the phi is unfinished
4167 for (uint i = 1; i < phi_len; i++) {
4168 if (phi_base->in(i) == NULL) {
4169 // incomplete phi; do not look at it yet!
4170 phi_reg = NULL;
4171 phi_len = (uint)-1;
4172 break;
4173 }
4174 }
4175 }
4176
4177 // Note: We do not call verify_sparse on entry, because inputs
4178 // can normalize to the base_memory via subsume_node or similar
4179 // mechanisms. This method repairs that damage.
4180
4181 assert(!old_mbase || old_mbase->is_empty_memory(empty_mem), "consistent sentinels");
4182
4183 // Look at each slice.
4184 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4185 Node* old_in = in(i);
4186 // calculate the old memory value
4187 Node* old_mem = old_in;
4188 if (old_mem == empty_mem) old_mem = old_base;
4189 assert(old_mem == memory_at(i), "");
4190
4191 // maybe update (reslice) the old memory value
4192
4193 // simplify stacked MergeMems
4194 Node* new_mem = old_mem;
4195 MergeMemNode* old_mmem;
4196 if (old_mem != NULL && old_mem->is_MergeMem())
4197 old_mmem = old_mem->as_MergeMem();
4198 else
4199 old_mmem = NULL;
4200 if (old_mmem == this) {
4201 // This can happen if loops break up and safepoints disappear.
4202 // A merge of BotPtr (default) with a RawPtr memory derived from a
4203 // safepoint can be rewritten to a merge of the same BotPtr with
4204 // the BotPtr phi coming into the loop. If that phi disappears
4205 // also, we can end up with a self-loop of the mergemem.
4206 // In general, if loops degenerate and memory effects disappear,
4207 // a mergemem can be left looking at itself. This simply means
4208 // that the mergemem's default should be used, since there is
4209 // no longer any apparent effect on this slice.
4210 // Note: If a memory slice is a MergeMem cycle, it is unreachable
4211 // from start. Update the input to TOP.
4212 new_mem = (new_base == this || new_base == empty_mem)? empty_mem : new_base;
4213 }
4214 else if (old_mmem != NULL) {
4215 new_mem = old_mmem->memory_at(i);
4216 }
4217 // else preceding memory was not a MergeMem
4218
4219 // replace equivalent phis (unfortunately, they do not GVN together)
4220 if (new_mem != NULL && new_mem != new_base &&
4221 new_mem->req() == phi_len && new_mem->in(0) == phi_reg) {
4222 if (new_mem->is_Phi()) {
4223 PhiNode* phi_mem = new_mem->as_Phi();
4224 for (uint i = 1; i < phi_len; i++) {
4225 if (phi_base->in(i) != phi_mem->in(i)) {
4226 phi_mem = NULL;
4227 break;
4228 }
4229 }
4230 if (phi_mem != NULL) {
4231 // equivalent phi nodes; revert to the def
4232 new_mem = new_base;
4233 }
4234 }
4235 }
4236
4237 // maybe store down a new value
4238 Node* new_in = new_mem;
4239 if (new_in == new_base) new_in = empty_mem;
4240
4241 if (new_in != old_in) {
4242 // Warning: Do not combine this "if" with the previous "if"
4243 // A memory slice might have be be rewritten even if it is semantically
4244 // unchanged, if the base_memory value has changed.
4245 set_req(i, new_in);
4246 progress = this; // Report progress
4247 }
4248 }
4249
4250 if (new_base != old_base) {
4251 set_req(Compile::AliasIdxBot, new_base);
4252 // Don't use set_base_memory(new_base), because we need to update du.
4253 assert(base_memory() == new_base, "");
4254 progress = this;
4255 }
4256
4257 if( base_memory() == this ) {
4258 // a self cycle indicates this memory path is dead
4259 set_req(Compile::AliasIdxBot, empty_mem);
4260 }
4261
4262 // Resolve external cycles by calling Ideal on a MergeMem base_memory
4263 // Recursion must occur after the self cycle check above
4264 if( base_memory()->is_MergeMem() ) {
4265 MergeMemNode *new_mbase = base_memory()->as_MergeMem();
4266 Node *m = phase->transform(new_mbase); // Rollup any cycles
4267 if( m != NULL && (m->is_top() ||
4268 m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem) ) {
4269 // propagate rollup of dead cycle to self
4270 set_req(Compile::AliasIdxBot, empty_mem);
4271 }
4272 }
4273
4274 if( base_memory() == empty_mem ) {
4275 progress = this;
4276 // Cut inputs during Parse phase only.
4277 // During Optimize phase a dead MergeMem node will be subsumed by Top.
4278 if( !can_reshape ) {
4279 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4280 if( in(i) != empty_mem ) { set_req(i, empty_mem); }
4281 }
4282 }
4283 }
4284
4285 if( !progress && base_memory()->is_Phi() && can_reshape ) {
4286 // Check if PhiNode::Ideal's "Split phis through memory merges"
4287 // transform should be attempted. Look for this->phi->this cycle.
4288 uint merge_width = req();
4289 if (merge_width > Compile::AliasIdxRaw) {
4290 PhiNode* phi = base_memory()->as_Phi();
4291 for( uint i = 1; i < phi->req(); ++i ) {// For all paths in
4292 if (phi->in(i) == this) {
4293 phase->is_IterGVN()->_worklist.push(phi);
4294 break;
4295 }
4296 }
4297 }
4298 }
4299
4300 assert(progress || verify_sparse(), "please, no dups of base");
4301 return progress;
4302 }
4303
4304 //-------------------------set_base_memory-------------------------------------
4305 void MergeMemNode::set_base_memory(Node *new_base) {
4306 Node* empty_mem = empty_memory();
4307 set_req(Compile::AliasIdxBot, new_base);
4308 assert(memory_at(req()) == new_base, "must set default memory");
4309 // Clear out other occurrences of new_base:
4310 if (new_base != empty_mem) {
4311 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4312 if (in(i) == new_base) set_req(i, empty_mem);
4313 }
4314 }
4315 }
4316
4317 //------------------------------out_RegMask------------------------------------
4318 const RegMask &MergeMemNode::out_RegMask() const {
4319 return RegMask::Empty;
4320 }
4321
4322 //------------------------------dump_spec--------------------------------------
4323 #ifndef PRODUCT
4324 void MergeMemNode::dump_spec(outputStream *st) const {
4325 st->print(" {");
4326 Node* base_mem = base_memory();
4327 for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) {
4328 Node* mem = (in(i) != NULL) ? memory_at(i) : base_mem;
4329 if (mem == base_mem) { st->print(" -"); continue; }
4330 st->print( " N%d:", mem->_idx );
4331 Compile::current()->get_adr_type(i)->dump_on(st);
4332 }
4333 st->print(" }");
4334 }
4335 #endif // !PRODUCT
4336
4337
4338 #ifdef ASSERT
4339 static bool might_be_same(Node* a, Node* b) {
4340 if (a == b) return true;
4341 if (!(a->is_Phi() || b->is_Phi())) return false;
4342 // phis shift around during optimization
4343 return true; // pretty stupid...
4344 }
4345
4346 // verify a narrow slice (either incoming or outgoing)
4347 static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) {
4348 if (!VerifyAliases) return; // don't bother to verify unless requested
4349 if (is_error_reported()) return; // muzzle asserts when debugging an error
4350 if (Node::in_dump()) return; // muzzle asserts when printing
4351 assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel");
4352 assert(n != NULL, "");
4353 // Elide intervening MergeMem's
4354 while (n->is_MergeMem()) {
4355 n = n->as_MergeMem()->memory_at(alias_idx);
4356 }
4357 Compile* C = Compile::current();
4358 const TypePtr* n_adr_type = n->adr_type();
4359 if (n == m->empty_memory()) {
4360 // Implicit copy of base_memory()
4361 } else if (n_adr_type != TypePtr::BOTTOM) {
4362 assert(n_adr_type != NULL, "new memory must have a well-defined adr_type");
4363 assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice");
4364 } else {
4365 // A few places like make_runtime_call "know" that VM calls are narrow,
4366 // and can be used to update only the VM bits stored as TypeRawPtr::BOTTOM.
4367 bool expected_wide_mem = false;
4368 if (n == m->base_memory()) {
4369 expected_wide_mem = true;
4370 } else if (alias_idx == Compile::AliasIdxRaw ||
4371 n == m->memory_at(Compile::AliasIdxRaw)) {
4372 expected_wide_mem = true;
4373 } else if (!C->alias_type(alias_idx)->is_rewritable()) {
4374 // memory can "leak through" calls on channels that
4375 // are write-once. Allow this also.
4376 expected_wide_mem = true;
4377 }
4378 assert(expected_wide_mem, "expected narrow slice replacement");
4379 }
4380 }
4381 #else // !ASSERT
4382 #define verify_memory_slice(m,i,n) (void)(0) // PRODUCT version is no-op
4383 #endif
4384
4385
4386 //-----------------------------memory_at---------------------------------------
4387 Node* MergeMemNode::memory_at(uint alias_idx) const {
4388 assert(alias_idx >= Compile::AliasIdxRaw ||
4389 alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
4390 "must avoid base_memory and AliasIdxTop");
4391
4392 // Otherwise, it is a narrow slice.
4393 Node* n = alias_idx < req() ? in(alias_idx) : empty_memory();
4394 Compile *C = Compile::current();
4395 if (is_empty_memory(n)) {
4396 // the array is sparse; empty slots are the "top" node
4397 n = base_memory();
4398 assert(Node::in_dump()
4399 || n == NULL || n->bottom_type() == Type::TOP
4400 || n->adr_type() == NULL // address is TOP
4401 || n->adr_type() == TypePtr::BOTTOM
4402 || n->adr_type() == TypeRawPtr::BOTTOM
4403 || Compile::current()->AliasLevel() == 0,
4404 "must be a wide memory");
4405 // AliasLevel == 0 if we are organizing the memory states manually.
4406 // See verify_memory_slice for comments on TypeRawPtr::BOTTOM.
4407 } else {
4408 // make sure the stored slice is sane
4409 #ifdef ASSERT
4410 if (is_error_reported() || Node::in_dump()) {
4411 } else if (might_be_same(n, base_memory())) {
4412 // Give it a pass: It is a mostly harmless repetition of the base.
4413 // This can arise normally from node subsumption during optimization.
4414 } else {
4415 verify_memory_slice(this, alias_idx, n);
4416 }
4417 #endif
4418 }
4419 return n;
4420 }
4421
4422 //---------------------------set_memory_at-------------------------------------
4423 void MergeMemNode::set_memory_at(uint alias_idx, Node *n) {
4424 verify_memory_slice(this, alias_idx, n);
4425 Node* empty_mem = empty_memory();
4426 if (n == base_memory()) n = empty_mem; // collapse default
4427 uint need_req = alias_idx+1;
4428 if (req() < need_req) {
4429 if (n == empty_mem) return; // already the default, so do not grow me
4430 // grow the sparse array
4431 do {
4432 add_req(empty_mem);
4433 } while (req() < need_req);
4434 }
4435 set_req( alias_idx, n );
4436 }
4437
4438
4439
4440 //--------------------------iteration_setup------------------------------------
4441 void MergeMemNode::iteration_setup(const MergeMemNode* other) {
4442 if (other != NULL) {
4443 grow_to_match(other);
4444 // invariant: the finite support of mm2 is within mm->req()
4445 #ifdef ASSERT
4446 for (uint i = req(); i < other->req(); i++) {
4447 assert(other->is_empty_memory(other->in(i)), "slice left uncovered");
4448 }
4449 #endif
4450 }
4451 // Replace spurious copies of base_memory by top.
4452 Node* base_mem = base_memory();
4453 if (base_mem != NULL && !base_mem->is_top()) {
4454 for (uint i = Compile::AliasIdxBot+1, imax = req(); i < imax; i++) {
4455 if (in(i) == base_mem)
4456 set_req(i, empty_memory());
4457 }
4458 }
4459 }
4460
4461 //---------------------------grow_to_match-------------------------------------
4462 void MergeMemNode::grow_to_match(const MergeMemNode* other) {
4463 Node* empty_mem = empty_memory();
4464 assert(other->is_empty_memory(empty_mem), "consistent sentinels");
4465 // look for the finite support of the other memory
4466 for (uint i = other->req(); --i >= req(); ) {
4467 if (other->in(i) != empty_mem) {
4468 uint new_len = i+1;
4469 while (req() < new_len) add_req(empty_mem);
4470 break;
4471 }
4472 }
4473 }
4474
4475 //---------------------------verify_sparse-------------------------------------
4476 #ifndef PRODUCT
4477 bool MergeMemNode::verify_sparse() const {
4478 assert(is_empty_memory(make_empty_memory()), "sane sentinel");
4479 Node* base_mem = base_memory();
4480 // The following can happen in degenerate cases, since empty==top.
4481 if (is_empty_memory(base_mem)) return true;
4482 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4483 assert(in(i) != NULL, "sane slice");
4484 if (in(i) == base_mem) return false; // should have been the sentinel value!
4485 }
4486 return true;
4487 }
4488
4489 bool MergeMemStream::match_memory(Node* mem, const MergeMemNode* mm, int idx) {
4490 Node* n;
4491 n = mm->in(idx);
4492 if (mem == n) return true; // might be empty_memory()
4493 n = (idx == Compile::AliasIdxBot)? mm->base_memory(): mm->memory_at(idx);
4494 if (mem == n) return true;
4495 while (n->is_Phi() && (n = n->as_Phi()->is_copy()) != NULL) {
4496 if (mem == n) return true;
4497 if (n == NULL) break;
4498 }
4499 return false;
4500 }
4501 #endif // !PRODUCT