1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/assembler.inline.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "runtime/java.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/stubCodeGenerator.hpp"
  34 #include "utilities/defaultStream.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 #include "vm_version_ppc.hpp"
  37 
  38 # include <sys/sysinfo.h>
  39 
  40 int VM_Version::_features = VM_Version::unknown_m;
  41 int VM_Version::_measured_cache_line_size = 32; // pessimistic init value
  42 const char* VM_Version::_features_str = "";
  43 bool VM_Version::_is_determine_features_test_running = false;
  44 
  45 
  46 #define MSG(flag)   \
  47   if (flag && !FLAG_IS_DEFAULT(flag))                                  \
  48       jio_fprintf(defaultStream::error_stream(),                       \
  49                   "warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \
  50                   "         -XX:+" #flag " will be disabled!\n");
  51 
  52 void VM_Version::initialize() {
  53 
  54   // Test which instructions are supported and measure cache line size.
  55   determine_features();
  56 
  57   // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
  58   if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
  59     if (VM_Version::has_lqarx()) {
  60       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
  61     } else if (VM_Version::has_popcntw()) {
  62       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
  63     } else if (VM_Version::has_cmpb()) {
  64       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
  65     } else if (VM_Version::has_popcntb()) {
  66       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
  67     } else {
  68       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
  69     }
  70   }
  71   guarantee(PowerArchitecturePPC64 == 0 || PowerArchitecturePPC64 == 5 ||
  72             PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7 ||
  73             PowerArchitecturePPC64 == 8,
  74             "PowerArchitecturePPC64 should be 0, 5, 6, 7, or 8");
  75 
  76   // Power 8: Configure Data Stream Control Register.
  77   if (PowerArchitecturePPC64 >= 8) {
  78     config_dscr();
  79   }
  80 
  81   if (!UseSIGTRAP) {
  82     MSG(TrapBasedICMissChecks);
  83     MSG(TrapBasedNotEntrantChecks);
  84     MSG(TrapBasedNullChecks);
  85     FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
  86     FLAG_SET_ERGO(bool, TrapBasedNullChecks,       false);
  87     FLAG_SET_ERGO(bool, TrapBasedICMissChecks,     false);
  88   }
  89 
  90 #ifdef COMPILER2
  91   if (!UseSIGTRAP) {
  92     MSG(TrapBasedRangeChecks);
  93     FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false);
  94   }
  95 
  96   // On Power6 test for section size.
  97   if (PowerArchitecturePPC64 == 6) {
  98     determine_section_size();
  99   // TODO: PPC port } else {
 100   // TODO: PPC port PdScheduling::power6SectorSize = 0x20;
 101   }
 102 
 103   MaxVectorSize = 8;
 104 #endif
 105 
 106   // Create and print feature-string.
 107   char buf[(num_features+1) * 16]; // Max 16 chars per feature.
 108   jio_snprintf(buf, sizeof(buf),
 109                "ppc64%s%s%s%s%s%s%s%s%s%s%s%s",
 110                (has_fsqrt()   ? " fsqrt"   : ""),
 111                (has_isel()    ? " isel"    : ""),
 112                (has_lxarxeh() ? " lxarxeh" : ""),
 113                (has_cmpb()    ? " cmpb"    : ""),
 114                //(has_mftgpr()? " mftgpr"  : ""),
 115                (has_popcntb() ? " popcntb" : ""),
 116                (has_popcntw() ? " popcntw" : ""),
 117                (has_fcfids()  ? " fcfids"  : ""),
 118                (has_vand()    ? " vand"    : ""),
 119                (has_lqarx()   ? " lqarx"   : ""),
 120                (has_vcipher() ? " vcipher" : ""),
 121                (has_vpmsumb() ? " vpmsumb" : ""),
 122                (has_tcheck()  ? " tcheck"  : "")
 123                // Make sure number of %s matches num_features!
 124               );
 125   _features_str = os::strdup(buf);
 126   if (Verbose) {
 127     print_features();
 128   }
 129 
 130   // PPC64 supports 8-byte compare-exchange operations (see
 131   // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
 132   // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
 133   _supports_cx8 = true;
 134 
 135   UseSSE = 0; // Only on x86 and x64
 136 
 137   intx cache_line_size = _measured_cache_line_size;
 138 
 139   if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1;
 140 
 141   if (AllocatePrefetchStyle == 4) {
 142     AllocatePrefetchStepSize = cache_line_size; // Need exact value.
 143     if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // Use larger blocks by default.
 144     if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // Default is not defined?
 145   } else {
 146     if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size;
 147     if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value.
 148     if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // Default is not defined?
 149   }
 150 
 151   assert(AllocatePrefetchLines > 0, "invalid value");
 152   if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
 153     AllocatePrefetchLines = 1; // Conservative value.
 154   }
 155 
 156   if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
 157     AllocatePrefetchStyle = 1; // Fall back if inappropriate.
 158   }
 159 
 160   assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
 161 
 162   // Implementation does not use any of the vector instructions
 163   // available with Power8. Their exploitation is still pending.
 164   if (!UseCRC32Intrinsics) {
 165     if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
 166       FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
 167     }
 168   }
 169 
 170   if (UseCRC32CIntrinsics) {
 171     if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
 172       warning("CRC32C intrinsics are not available on this CPU");
 173     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
 174   }
 175 
 176   if (UseOnSpinWaitIntrinsic) {
 177     if (!FLAG_IS_DEFAULT(UseOnSpinWaitIntrinsic))
 178       warning("onSpinWait intrinsic is not available on this CPU");
 179     FLAG_SET_DEFAULT(UseOnSpinWaitIntrinsic, false);
 180   }
 181 
 182   // The AES intrinsic stubs require AES instruction support.
 183   if (UseAES) {
 184     warning("AES instructions are not available on this CPU");
 185     FLAG_SET_DEFAULT(UseAES, false);
 186   }
 187   if (UseAESIntrinsics) {
 188     if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
 189       warning("AES intrinsics are not available on this CPU");
 190     FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 191   }
 192 
 193   if (UseGHASHIntrinsics) {
 194     warning("GHASH intrinsics are not available on this CPU");
 195     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 196   }
 197 
 198   if (UseSHA) {
 199     warning("SHA instructions are not available on this CPU");
 200     FLAG_SET_DEFAULT(UseSHA, false);
 201   }
 202   if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
 203     warning("SHA intrinsics are not available on this CPU");
 204     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 205     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 206     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 207   }
 208 
 209   if (UseAdler32Intrinsics) {
 210     warning("Adler32Intrinsics not available on this CPU.");
 211     FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
 212   }
 213 
 214   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
 215     UseMultiplyToLenIntrinsic = true;
 216   }
 217 
 218   // Adjust RTM (Restricted Transactional Memory) flags.
 219   if (!has_tcheck() && UseRTMLocking) {
 220     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
 221     // setting during arguments processing. See use_biased_locking().
 222     // VM_Version_init() is executed after UseBiasedLocking is used
 223     // in Thread::allocate().
 224     vm_exit_during_initialization("RTM instructions are not available on this CPU");
 225   }
 226 
 227   if (UseRTMLocking) {
 228 #if INCLUDE_RTM_OPT
 229     if (!UnlockExperimentalVMOptions) {
 230       vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. "
 231                                     "It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
 232     } else {
 233       warning("UseRTMLocking is only available as experimental option on this platform.");
 234     }
 235     if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
 236       // RTM locking should be used only for applications with
 237       // high lock contention. For now we do not use it by default.
 238       vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
 239     }
 240     if (!is_power_of_2(RTMTotalCountIncrRate)) {
 241       warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
 242       FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
 243     }
 244     if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
 245       warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
 246       FLAG_SET_DEFAULT(RTMAbortRatio, 50);
 247     }
 248     guarantee(RTMSpinLoopCount > 0, "unsupported");
 249 #else
 250     // Only C2 does RTM locking optimization.
 251     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
 252     // setting during arguments processing. See use_biased_locking().
 253     vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
 254 #endif
 255   } else { // !UseRTMLocking
 256     if (UseRTMForStackLocks) {
 257       if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
 258         warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
 259       }
 260       FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
 261     }
 262     if (UseRTMDeopt) {
 263       FLAG_SET_DEFAULT(UseRTMDeopt, false);
 264     }
 265     if (PrintPreciseRTMLockingStatistics) {
 266       FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
 267     }
 268   }
 269 
 270   // This machine does not allow unaligned memory accesses
 271   if (UseUnalignedAccesses) {
 272     if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
 273       warning("Unaligned memory access is not available on this CPU");
 274     FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
 275   }
 276 }
 277 
 278 bool VM_Version::use_biased_locking() {
 279 #if INCLUDE_RTM_OPT
 280   // RTM locking is most useful when there is high lock contention and
 281   // low data contention. With high lock contention the lock is usually
 282   // inflated and biased locking is not suitable for that case.
 283   // RTM locking code requires that biased locking is off.
 284   // Note: we can't switch off UseBiasedLocking in get_processor_features()
 285   // because it is used by Thread::allocate() which is called before
 286   // VM_Version::initialize().
 287   if (UseRTMLocking && UseBiasedLocking) {
 288     if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
 289       FLAG_SET_DEFAULT(UseBiasedLocking, false);
 290     } else {
 291       warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
 292       UseBiasedLocking = false;
 293     }
 294   }
 295 #endif
 296   return UseBiasedLocking;
 297 }
 298 
 299 void VM_Version::print_features() {
 300   tty->print_cr("Version: %s cache_line_size = %d", cpu_features(), (int) get_cache_line_size());
 301 }
 302 
 303 #ifdef COMPILER2
 304 // Determine section size on power6: If section size is 8 instructions,
 305 // there should be a difference between the two testloops of ~15 %. If
 306 // no difference is detected the section is assumed to be 32 instructions.
 307 void VM_Version::determine_section_size() {
 308 
 309   int unroll = 80;
 310 
 311   const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord;
 312 
 313   // Allocate space for the code.
 314   ResourceMark rm;
 315   CodeBuffer cb("detect_section_size", code_size, 0);
 316   MacroAssembler* a = new MacroAssembler(&cb);
 317 
 318   uint32_t *code = (uint32_t *)a->pc();
 319   // Emit code.
 320   void (*test1)() = (void(*)())(void *)a->function_entry();
 321 
 322   Label l1;
 323 
 324   a->li(R4, 1);
 325   a->sldi(R4, R4, 28);
 326   a->b(l1);
 327   a->align(CodeEntryAlignment);
 328 
 329   a->bind(l1);
 330 
 331   for (int i = 0; i < unroll; i++) {
 332     // Schleife 1
 333     // ------- sector 0 ------------
 334     // ;; 0
 335     a->nop();                   // 1
 336     a->fpnop0();                // 2
 337     a->fpnop1();                // 3
 338     a->addi(R4,R4, -1); // 4
 339 
 340     // ;;  1
 341     a->nop();                   // 5
 342     a->fmr(F6, F6);             // 6
 343     a->fmr(F7, F7);             // 7
 344     a->endgroup();              // 8
 345     // ------- sector 8 ------------
 346 
 347     // ;;  2
 348     a->nop();                   // 9
 349     a->nop();                   // 10
 350     a->fmr(F8, F8);             // 11
 351     a->fmr(F9, F9);             // 12
 352 
 353     // ;;  3
 354     a->nop();                   // 13
 355     a->fmr(F10, F10);           // 14
 356     a->fmr(F11, F11);           // 15
 357     a->endgroup();              // 16
 358     // -------- sector 16 -------------
 359 
 360     // ;;  4
 361     a->nop();                   // 17
 362     a->nop();                   // 18
 363     a->fmr(F15, F15);           // 19
 364     a->fmr(F16, F16);           // 20
 365 
 366     // ;;  5
 367     a->nop();                   // 21
 368     a->fmr(F17, F17);           // 22
 369     a->fmr(F18, F18);           // 23
 370     a->endgroup();              // 24
 371     // ------- sector 24  ------------
 372 
 373     // ;;  6
 374     a->nop();                   // 25
 375     a->nop();                   // 26
 376     a->fmr(F19, F19);           // 27
 377     a->fmr(F20, F20);           // 28
 378 
 379     // ;;  7
 380     a->nop();                   // 29
 381     a->fmr(F21, F21);           // 30
 382     a->fmr(F22, F22);           // 31
 383     a->brnop0();                // 32
 384 
 385     // ------- sector 32 ------------
 386   }
 387 
 388   // ;; 8
 389   a->cmpdi(CCR0, R4, unroll);   // 33
 390   a->bge(CCR0, l1);             // 34
 391   a->blr();
 392 
 393   // Emit code.
 394   void (*test2)() = (void(*)())(void *)a->function_entry();
 395   // uint32_t *code = (uint32_t *)a->pc();
 396 
 397   Label l2;
 398 
 399   a->li(R4, 1);
 400   a->sldi(R4, R4, 28);
 401   a->b(l2);
 402   a->align(CodeEntryAlignment);
 403 
 404   a->bind(l2);
 405 
 406   for (int i = 0; i < unroll; i++) {
 407     // Schleife 2
 408     // ------- sector 0 ------------
 409     // ;; 0
 410     a->brnop0();                  // 1
 411     a->nop();                     // 2
 412     //a->cmpdi(CCR0, R4, unroll);
 413     a->fpnop0();                  // 3
 414     a->fpnop1();                  // 4
 415     a->addi(R4,R4, -1);           // 5
 416 
 417     // ;; 1
 418 
 419     a->nop();                     // 6
 420     a->fmr(F6, F6);               // 7
 421     a->fmr(F7, F7);               // 8
 422     // ------- sector 8 ---------------
 423 
 424     // ;; 2
 425     a->endgroup();                // 9
 426 
 427     // ;; 3
 428     a->nop();                     // 10
 429     a->nop();                     // 11
 430     a->fmr(F8, F8);               // 12
 431 
 432     // ;; 4
 433     a->fmr(F9, F9);               // 13
 434     a->nop();                     // 14
 435     a->fmr(F10, F10);             // 15
 436 
 437     // ;; 5
 438     a->fmr(F11, F11);             // 16
 439     // -------- sector 16 -------------
 440 
 441     // ;; 6
 442     a->endgroup();                // 17
 443 
 444     // ;; 7
 445     a->nop();                     // 18
 446     a->nop();                     // 19
 447     a->fmr(F15, F15);             // 20
 448 
 449     // ;; 8
 450     a->fmr(F16, F16);             // 21
 451     a->nop();                     // 22
 452     a->fmr(F17, F17);             // 23
 453 
 454     // ;; 9
 455     a->fmr(F18, F18);             // 24
 456     // -------- sector 24 -------------
 457 
 458     // ;; 10
 459     a->endgroup();                // 25
 460 
 461     // ;; 11
 462     a->nop();                     // 26
 463     a->nop();                     // 27
 464     a->fmr(F19, F19);             // 28
 465 
 466     // ;; 12
 467     a->fmr(F20, F20);             // 29
 468     a->nop();                     // 30
 469     a->fmr(F21, F21);             // 31
 470 
 471     // ;; 13
 472     a->fmr(F22, F22);             // 32
 473   }
 474 
 475   // -------- sector 32 -------------
 476   // ;; 14
 477   a->cmpdi(CCR0, R4, unroll); // 33
 478   a->bge(CCR0, l2);           // 34
 479 
 480   a->blr();
 481   uint32_t *code_end = (uint32_t *)a->pc();
 482   a->flush();
 483 
 484   double loop1_seconds,loop2_seconds, rel_diff;
 485   uint64_t start1, stop1;
 486 
 487   start1 = os::current_thread_cpu_time(false);
 488   (*test1)();
 489   stop1 = os::current_thread_cpu_time(false);
 490   loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0);
 491 
 492 
 493   start1 = os::current_thread_cpu_time(false);
 494   (*test2)();
 495   stop1 = os::current_thread_cpu_time(false);
 496 
 497   loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0);
 498 
 499   rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100;
 500 
 501   if (PrintAssembly) {
 502     ttyLocker ttyl;
 503     tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
 504     Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
 505     tty->print_cr("Time loop1 :%f", loop1_seconds);
 506     tty->print_cr("Time loop2 :%f", loop2_seconds);
 507     tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff);
 508 
 509     if (rel_diff > 12.0) {
 510       tty->print_cr("Section Size 8 Instructions");
 511     } else{
 512       tty->print_cr("Section Size 32 Instructions or Power5");
 513     }
 514   }
 515 
 516 #if 0 // TODO: PPC port
 517   // Set sector size (if not set explicitly).
 518   if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) {
 519     if (rel_diff > 12.0) {
 520       PdScheduling::power6SectorSize = 0x20;
 521     } else {
 522       PdScheduling::power6SectorSize = 0x80;
 523     }
 524   } else if (Power6SectorSize128PPC64) {
 525     PdScheduling::power6SectorSize = 0x80;
 526   } else {
 527     PdScheduling::power6SectorSize = 0x20;
 528   }
 529 #endif
 530   if (UsePower6SchedulerPPC64) Unimplemented();
 531 }
 532 #endif // COMPILER2
 533 
 534 void VM_Version::determine_features() {
 535 #if defined(ABI_ELFv2)
 536   // 1 InstWord per call for the blr instruction.
 537   const int code_size = (num_features+1+2*1)*BytesPerInstWord;
 538 #else
 539   // 7 InstWords for each call (function descriptor + blr instruction).
 540   const int code_size = (num_features+1+2*7)*BytesPerInstWord;
 541 #endif
 542   int features = 0;
 543 
 544   // create test area
 545   enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size).
 546   char test_area[BUFFER_SIZE];
 547   char *mid_of_test_area = &test_area[BUFFER_SIZE>>1];
 548 
 549   // Allocate space for the code.
 550   ResourceMark rm;
 551   CodeBuffer cb("detect_cpu_features", code_size, 0);
 552   MacroAssembler* a = new MacroAssembler(&cb);
 553 
 554   // Must be set to true so we can generate the test code.
 555   _features = VM_Version::all_features_m;
 556 
 557   // Emit code.
 558   void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
 559   uint32_t *code = (uint32_t *)a->pc();
 560   // Don't use R0 in ldarx.
 561   // Keep R3_ARG1 unmodified, it contains &field (see below).
 562   // Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
 563   a->fsqrt(F3, F4);                            // code[0]  -> fsqrt_m
 564   a->fsqrts(F3, F4);                           // code[1]  -> fsqrts_m
 565   a->isel(R7, R5, R6, 0);                      // code[2]  -> isel_m
 566   a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3]  -> lxarx_m
 567   a->cmpb(R7, R5, R6);                         // code[4]  -> cmpb
 568   a->popcntb(R7, R5);                          // code[5]  -> popcntb
 569   a->popcntw(R7, R5);                          // code[6]  -> popcntw
 570   a->fcfids(F3, F4);                           // code[7]  -> fcfids
 571   a->vand(VR0, VR0, VR0);                      // code[8]  -> vand
 572   // arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16
 573   a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9]  -> lqarx_m
 574   a->vcipher(VR0, VR1, VR2);                   // code[10] -> vcipher
 575   a->vpmsumb(VR0, VR1, VR2);                   // code[11] -> vpmsumb
 576   a->tcheck(0);                                // code[12] -> tcheck
 577   a->blr();
 578 
 579   // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
 580   void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry();
 581   a->dcbz(R3_ARG1); // R3_ARG1 = addr
 582   a->blr();
 583 
 584   uint32_t *code_end = (uint32_t *)a->pc();
 585   a->flush();
 586   _features = VM_Version::unknown_m;
 587 
 588   // Print the detection code.
 589   if (PrintAssembly) {
 590     ttyLocker ttyl;
 591     tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
 592     Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
 593   }
 594 
 595   // Measure cache line size.
 596   memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF.
 597   (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle.
 598   int count = 0; // count zeroed bytes
 599   for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++;
 600   guarantee(is_power_of_2(count), "cache line size needs to be a power of 2");
 601   _measured_cache_line_size = count;
 602 
 603   // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
 604   VM_Version::_is_determine_features_test_running = true;
 605   // We must align the first argument to 16 bytes because of the lqarx check.
 606   (*test)((address)align_size_up((intptr_t)mid_of_test_area, 16), (uint64_t)0);
 607   VM_Version::_is_determine_features_test_running = false;
 608 
 609   // determine which instructions are legal.
 610   int feature_cntr = 0;
 611   if (code[feature_cntr++]) features |= fsqrt_m;
 612   if (code[feature_cntr++]) features |= fsqrts_m;
 613   if (code[feature_cntr++]) features |= isel_m;
 614   if (code[feature_cntr++]) features |= lxarxeh_m;
 615   if (code[feature_cntr++]) features |= cmpb_m;
 616   if (code[feature_cntr++]) features |= popcntb_m;
 617   if (code[feature_cntr++]) features |= popcntw_m;
 618   if (code[feature_cntr++]) features |= fcfids_m;
 619   if (code[feature_cntr++]) features |= vand_m;
 620   if (code[feature_cntr++]) features |= lqarx_m;
 621   if (code[feature_cntr++]) features |= vcipher_m;
 622   if (code[feature_cntr++]) features |= vpmsumb_m;
 623   if (code[feature_cntr++]) features |= tcheck_m;
 624 
 625   // Print the detection code.
 626   if (PrintAssembly) {
 627     ttyLocker ttyl;
 628     tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code));
 629     Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
 630   }
 631 
 632   _features = features;
 633 }
 634 
 635 // Power 8: Configure Data Stream Control Register.
 636 void VM_Version::config_dscr() {
 637   assert(has_tcheck(), "Only execute on Power 8 or later!");
 638 
 639   // 7 InstWords for each call (function descriptor + blr instruction).
 640   const int code_size = (2+2*7)*BytesPerInstWord;
 641 
 642   // Allocate space for the code.
 643   ResourceMark rm;
 644   CodeBuffer cb("config_dscr", code_size, 0);
 645   MacroAssembler* a = new MacroAssembler(&cb);
 646 
 647   // Emit code.
 648   uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
 649   uint32_t *code = (uint32_t *)a->pc();
 650   a->mfdscr(R3);
 651   a->blr();
 652 
 653   void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
 654   a->mtdscr(R3);
 655   a->blr();
 656 
 657   uint32_t *code_end = (uint32_t *)a->pc();
 658   a->flush();
 659 
 660   // Print the detection code.
 661   if (PrintAssembly) {
 662     ttyLocker ttyl;
 663     tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", p2i(code));
 664     Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
 665   }
 666 
 667   // Apply the configuration if needed.
 668   uint64_t dscr_val = (*get_dscr)();
 669   if (Verbose) {
 670     tty->print_cr("dscr value was 0x%lx" , dscr_val);
 671   }
 672   bool change_requested = false;
 673   if (DSCR_PPC64 != (uintx)-1) {
 674     dscr_val = DSCR_PPC64;
 675     change_requested = true;
 676   }
 677   if (DSCR_DPFD_PPC64 <= 7) {
 678     uint64_t mask = 0x7;
 679     if ((dscr_val & mask) != DSCR_DPFD_PPC64) {
 680       dscr_val = (dscr_val & ~mask) | (DSCR_DPFD_PPC64);
 681       change_requested = true;
 682     }
 683   }
 684   if (DSCR_URG_PPC64 <= 7) {
 685     uint64_t mask = 0x7 << 6;
 686     if ((dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
 687       dscr_val = (dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
 688       change_requested = true;
 689     }
 690   }
 691   if (change_requested) {
 692     (*set_dscr)(dscr_val);
 693     if (Verbose) {
 694       tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)());
 695     }
 696   }
 697 }
 698 
 699 static int saved_features = 0;
 700 
 701 void VM_Version::allow_all() {
 702   saved_features = _features;
 703   _features      = all_features_m;
 704 }
 705 
 706 void VM_Version::revert() {
 707   _features = saved_features;
 708 }