1 /* 2 * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 // According to the AIX OS doc #pragma alloca must be used 27 // with C++ compiler before referencing the function alloca() 28 #pragma alloca 29 30 // no precompiled headers 31 #include "classfile/classLoader.hpp" 32 #include "classfile/systemDictionary.hpp" 33 #include "classfile/vmSymbols.hpp" 34 #include "code/icBuffer.hpp" 35 #include "code/vtableStubs.hpp" 36 #include "compiler/compileBroker.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "jvm_aix.h" 39 #include "logging/log.hpp" 40 #include "libo4.hpp" 41 #include "libperfstat_aix.hpp" 42 #include "libodm_aix.hpp" 43 #include "loadlib_aix.hpp" 44 #include "memory/allocation.inline.hpp" 45 #include "memory/filemap.hpp" 46 #include "misc_aix.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "os_aix.inline.hpp" 49 #include "os_share_aix.hpp" 50 #include "porting_aix.hpp" 51 #include "prims/jniFastGetField.hpp" 52 #include "prims/jvm.h" 53 #include "prims/jvm_misc.hpp" 54 #include "runtime/arguments.hpp" 55 #include "runtime/atomic.hpp" 56 #include "runtime/extendedPC.hpp" 57 #include "runtime/globals.hpp" 58 #include "runtime/interfaceSupport.hpp" 59 #include "runtime/java.hpp" 60 #include "runtime/javaCalls.hpp" 61 #include "runtime/mutexLocker.hpp" 62 #include "runtime/objectMonitor.hpp" 63 #include "runtime/orderAccess.inline.hpp" 64 #include "runtime/os.hpp" 65 #include "runtime/osThread.hpp" 66 #include "runtime/perfMemory.hpp" 67 #include "runtime/sharedRuntime.hpp" 68 #include "runtime/statSampler.hpp" 69 #include "runtime/stubRoutines.hpp" 70 #include "runtime/thread.inline.hpp" 71 #include "runtime/threadCritical.hpp" 72 #include "runtime/timer.hpp" 73 #include "runtime/vm_version.hpp" 74 #include "services/attachListener.hpp" 75 #include "services/runtimeService.hpp" 76 #include "utilities/decoder.hpp" 77 #include "utilities/defaultStream.hpp" 78 #include "utilities/events.hpp" 79 #include "utilities/growableArray.hpp" 80 #include "utilities/vmError.hpp" 81 82 // put OS-includes here (sorted alphabetically) 83 #include <errno.h> 84 #include <fcntl.h> 85 #include <inttypes.h> 86 #include <poll.h> 87 #include <procinfo.h> 88 #include <pthread.h> 89 #include <pwd.h> 90 #include <semaphore.h> 91 #include <signal.h> 92 #include <stdint.h> 93 #include <stdio.h> 94 #include <string.h> 95 #include <unistd.h> 96 #include <sys/ioctl.h> 97 #include <sys/ipc.h> 98 #include <sys/mman.h> 99 #include <sys/resource.h> 100 #include <sys/select.h> 101 #include <sys/shm.h> 102 #include <sys/socket.h> 103 #include <sys/stat.h> 104 #include <sys/sysinfo.h> 105 #include <sys/systemcfg.h> 106 #include <sys/time.h> 107 #include <sys/times.h> 108 #include <sys/types.h> 109 #include <sys/utsname.h> 110 #include <sys/vminfo.h> 111 #include <sys/wait.h> 112 113 // Missing prototypes for various system APIs. 114 extern "C" 115 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t); 116 117 #if !defined(_AIXVERSION_610) 118 extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int); 119 extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int); 120 extern "C" int getargs (procsinfo*, int, char*, int); 121 #endif 122 123 #define MAX_PATH (2 * K) 124 125 // for timer info max values which include all bits 126 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 127 // for multipage initialization error analysis (in 'g_multipage_error') 128 #define ERROR_MP_OS_TOO_OLD 100 129 #define ERROR_MP_EXTSHM_ACTIVE 101 130 #define ERROR_MP_VMGETINFO_FAILED 102 131 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103 132 133 // Query dimensions of the stack of the calling thread. 134 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size); 135 static address resolve_function_descriptor_to_code_pointer(address p); 136 137 static void vmembk_print_on(outputStream* os); 138 139 //////////////////////////////////////////////////////////////////////////////// 140 // global variables (for a description see os_aix.hpp) 141 142 julong os::Aix::_physical_memory = 0; 143 144 pthread_t os::Aix::_main_thread = ((pthread_t)0); 145 int os::Aix::_page_size = -1; 146 147 // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase 148 int os::Aix::_on_pase = -1; 149 150 // 0 = uninitialized, otherwise 32 bit number: 151 // 0xVVRRTTSS 152 // VV - major version 153 // RR - minor version 154 // TT - tech level, if known, 0 otherwise 155 // SS - service pack, if known, 0 otherwise 156 uint32_t os::Aix::_os_version = 0; 157 158 // -1 = uninitialized, 0 - no, 1 - yes 159 int os::Aix::_xpg_sus_mode = -1; 160 161 // -1 = uninitialized, 0 - no, 1 - yes 162 int os::Aix::_extshm = -1; 163 164 //////////////////////////////////////////////////////////////////////////////// 165 // local variables 166 167 static jlong initial_time_count = 0; 168 static int clock_tics_per_sec = 100; 169 static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks) 170 static bool check_signals = true; 171 static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769) 172 static sigset_t SR_sigset; 173 174 // Process break recorded at startup. 175 static address g_brk_at_startup = NULL; 176 177 // This describes the state of multipage support of the underlying 178 // OS. Note that this is of no interest to the outsize world and 179 // therefore should not be defined in AIX class. 180 // 181 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The 182 // latter two (16M "large" resp. 16G "huge" pages) require special 183 // setup and are normally not available. 184 // 185 // AIX supports multiple page sizes per process, for: 186 // - Stack (of the primordial thread, so not relevant for us) 187 // - Data - data, bss, heap, for us also pthread stacks 188 // - Text - text code 189 // - shared memory 190 // 191 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...) 192 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...). 193 // 194 // For shared memory, page size can be set dynamically via 195 // shmctl(). Different shared memory regions can have different page 196 // sizes. 197 // 198 // More information can be found at AIBM info center: 199 // http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm 200 // 201 static struct { 202 size_t pagesize; // sysconf _SC_PAGESIZE (4K) 203 size_t datapsize; // default data page size (LDR_CNTRL DATAPSIZE) 204 size_t shmpsize; // default shared memory page size (LDR_CNTRL SHMPSIZE) 205 size_t pthr_stack_pagesize; // stack page size of pthread threads 206 size_t textpsize; // default text page size (LDR_CNTRL STACKPSIZE) 207 bool can_use_64K_pages; // True if we can alloc 64K pages dynamically with Sys V shm. 208 bool can_use_16M_pages; // True if we can alloc 16M pages dynamically with Sys V shm. 209 int error; // Error describing if something went wrong at multipage init. 210 } g_multipage_support = { 211 (size_t) -1, 212 (size_t) -1, 213 (size_t) -1, 214 (size_t) -1, 215 (size_t) -1, 216 false, false, 217 0 218 }; 219 220 // We must not accidentally allocate memory close to the BRK - even if 221 // that would work - because then we prevent the BRK segment from 222 // growing which may result in a malloc OOM even though there is 223 // enough memory. The problem only arises if we shmat() or mmap() at 224 // a specific wish address, e.g. to place the heap in a 225 // compressed-oops-friendly way. 226 static bool is_close_to_brk(address a) { 227 assert0(g_brk_at_startup != NULL); 228 if (a >= g_brk_at_startup && 229 a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) { 230 return true; 231 } 232 return false; 233 } 234 235 julong os::available_memory() { 236 return Aix::available_memory(); 237 } 238 239 julong os::Aix::available_memory() { 240 // Avoid expensive API call here, as returned value will always be null. 241 if (os::Aix::on_pase()) { 242 return 0x0LL; 243 } 244 os::Aix::meminfo_t mi; 245 if (os::Aix::get_meminfo(&mi)) { 246 return mi.real_free; 247 } else { 248 return ULONG_MAX; 249 } 250 } 251 252 julong os::physical_memory() { 253 return Aix::physical_memory(); 254 } 255 256 // Return true if user is running as root. 257 258 bool os::have_special_privileges() { 259 static bool init = false; 260 static bool privileges = false; 261 if (!init) { 262 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 263 init = true; 264 } 265 return privileges; 266 } 267 268 // Helper function, emulates disclaim64 using multiple 32bit disclaims 269 // because we cannot use disclaim64() on AS/400 and old AIX releases. 270 static bool my_disclaim64(char* addr, size_t size) { 271 272 if (size == 0) { 273 return true; 274 } 275 276 // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.) 277 const unsigned int maxDisclaimSize = 0x40000000; 278 279 const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize); 280 const unsigned int lastDisclaimSize = (size % maxDisclaimSize); 281 282 char* p = addr; 283 284 for (int i = 0; i < numFullDisclaimsNeeded; i ++) { 285 if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) { 286 trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno); 287 return false; 288 } 289 p += maxDisclaimSize; 290 } 291 292 if (lastDisclaimSize > 0) { 293 if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) { 294 trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno); 295 return false; 296 } 297 } 298 299 return true; 300 } 301 302 // Cpu architecture string 303 #if defined(PPC32) 304 static char cpu_arch[] = "ppc"; 305 #elif defined(PPC64) 306 static char cpu_arch[] = "ppc64"; 307 #else 308 #error Add appropriate cpu_arch setting 309 #endif 310 311 // Wrap the function "vmgetinfo" which is not available on older OS releases. 312 static int checked_vmgetinfo(void *out, int command, int arg) { 313 if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) { 314 guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1"); 315 } 316 return ::vmgetinfo(out, command, arg); 317 } 318 319 // Given an address, returns the size of the page backing that address. 320 size_t os::Aix::query_pagesize(void* addr) { 321 322 if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) { 323 // AS/400 older than V6R1: no vmgetinfo here, default to 4K 324 return 4*K; 325 } 326 327 vm_page_info pi; 328 pi.addr = (uint64_t)addr; 329 if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) { 330 return pi.pagesize; 331 } else { 332 assert(false, "vmgetinfo failed to retrieve page size"); 333 return 4*K; 334 } 335 } 336 337 void os::Aix::initialize_system_info() { 338 339 // Get the number of online(logical) cpus instead of configured. 340 os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN); 341 assert(_processor_count > 0, "_processor_count must be > 0"); 342 343 // Retrieve total physical storage. 344 os::Aix::meminfo_t mi; 345 if (!os::Aix::get_meminfo(&mi)) { 346 assert(false, "os::Aix::get_meminfo failed."); 347 } 348 _physical_memory = (julong) mi.real_total; 349 } 350 351 // Helper function for tracing page sizes. 352 static const char* describe_pagesize(size_t pagesize) { 353 switch (pagesize) { 354 case 4*K : return "4K"; 355 case 64*K: return "64K"; 356 case 16*M: return "16M"; 357 case 16*G: return "16G"; 358 default: 359 assert(false, "surprise"); 360 return "??"; 361 } 362 } 363 364 // Probe OS for multipage support. 365 // Will fill the global g_multipage_support structure. 366 // Must be called before calling os::large_page_init(). 367 static void query_multipage_support() { 368 369 guarantee(g_multipage_support.pagesize == -1, 370 "do not call twice"); 371 372 g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE); 373 374 // This really would surprise me. 375 assert(g_multipage_support.pagesize == 4*K, "surprise!"); 376 377 // Query default data page size (default page size for C-Heap, pthread stacks and .bss). 378 // Default data page size is defined either by linker options (-bdatapsize) 379 // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given, 380 // default should be 4K. 381 { 382 void* p = ::malloc(16*M); 383 g_multipage_support.datapsize = os::Aix::query_pagesize(p); 384 ::free(p); 385 } 386 387 // Query default shm page size (LDR_CNTRL SHMPSIZE). 388 // Note that this is pure curiosity. We do not rely on default page size but set 389 // our own page size after allocated. 390 { 391 const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR); 392 guarantee(shmid != -1, "shmget failed"); 393 void* p = ::shmat(shmid, NULL, 0); 394 ::shmctl(shmid, IPC_RMID, NULL); 395 guarantee(p != (void*) -1, "shmat failed"); 396 g_multipage_support.shmpsize = os::Aix::query_pagesize(p); 397 ::shmdt(p); 398 } 399 400 // Before querying the stack page size, make sure we are not running as primordial 401 // thread (because primordial thread's stack may have different page size than 402 // pthread thread stacks). Running a VM on the primordial thread won't work for a 403 // number of reasons so we may just as well guarantee it here. 404 guarantee0(!os::Aix::is_primordial_thread()); 405 406 // Query pthread stack page size. Should be the same as data page size because 407 // pthread stacks are allocated from C-Heap. 408 { 409 int dummy = 0; 410 g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy); 411 } 412 413 // Query default text page size (LDR_CNTRL TEXTPSIZE). 414 { 415 address any_function = 416 resolve_function_descriptor_to_code_pointer((address)describe_pagesize); 417 g_multipage_support.textpsize = os::Aix::query_pagesize(any_function); 418 } 419 420 // Now probe for support of 64K pages and 16M pages. 421 422 // Before OS/400 V6R1, there is no support for pages other than 4K. 423 if (os::Aix::on_pase_V5R4_or_older()) { 424 trcVerbose("OS/400 < V6R1 - no large page support."); 425 g_multipage_support.error = ERROR_MP_OS_TOO_OLD; 426 goto query_multipage_support_end; 427 } 428 429 // Now check which page sizes the OS claims it supports, and of those, which actually can be used. 430 { 431 const int MAX_PAGE_SIZES = 4; 432 psize_t sizes[MAX_PAGE_SIZES]; 433 const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES); 434 if (num_psizes == -1) { 435 trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno); 436 trcVerbose("disabling multipage support."); 437 g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED; 438 goto query_multipage_support_end; 439 } 440 guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed."); 441 assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?"); 442 trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes); 443 for (int i = 0; i < num_psizes; i ++) { 444 trcVerbose(" %s ", describe_pagesize(sizes[i])); 445 } 446 447 // Can we use 64K, 16M pages? 448 for (int i = 0; i < num_psizes; i ++) { 449 const size_t pagesize = sizes[i]; 450 if (pagesize != 64*K && pagesize != 16*M) { 451 continue; 452 } 453 bool can_use = false; 454 trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize)); 455 const int shmid = ::shmget(IPC_PRIVATE, pagesize, 456 IPC_CREAT | S_IRUSR | S_IWUSR); 457 guarantee0(shmid != -1); // Should always work. 458 // Try to set pagesize. 459 struct shmid_ds shm_buf = { 0 }; 460 shm_buf.shm_pagesize = pagesize; 461 if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) { 462 const int en = errno; 463 ::shmctl(shmid, IPC_RMID, NULL); // As early as possible! 464 trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n", 465 errno); 466 } else { 467 // Attach and double check pageisze. 468 void* p = ::shmat(shmid, NULL, 0); 469 ::shmctl(shmid, IPC_RMID, NULL); // As early as possible! 470 guarantee0(p != (void*) -1); // Should always work. 471 const size_t real_pagesize = os::Aix::query_pagesize(p); 472 if (real_pagesize != pagesize) { 473 trcVerbose("real page size (0x%llX) differs.", real_pagesize); 474 } else { 475 can_use = true; 476 } 477 ::shmdt(p); 478 } 479 trcVerbose("Can use: %s", (can_use ? "yes" : "no")); 480 if (pagesize == 64*K) { 481 g_multipage_support.can_use_64K_pages = can_use; 482 } else if (pagesize == 16*M) { 483 g_multipage_support.can_use_16M_pages = can_use; 484 } 485 } 486 487 } // end: check which pages can be used for shared memory 488 489 query_multipage_support_end: 490 491 trcVerbose("base page size (sysconf _SC_PAGESIZE): %s", 492 describe_pagesize(g_multipage_support.pagesize)); 493 trcVerbose("Data page size (C-Heap, bss, etc): %s", 494 describe_pagesize(g_multipage_support.datapsize)); 495 trcVerbose("Text page size: %s", 496 describe_pagesize(g_multipage_support.textpsize)); 497 trcVerbose("Thread stack page size (pthread): %s", 498 describe_pagesize(g_multipage_support.pthr_stack_pagesize)); 499 trcVerbose("Default shared memory page size: %s", 500 describe_pagesize(g_multipage_support.shmpsize)); 501 trcVerbose("Can use 64K pages dynamically with shared meory: %s", 502 (g_multipage_support.can_use_64K_pages ? "yes" :"no")); 503 trcVerbose("Can use 16M pages dynamically with shared memory: %s", 504 (g_multipage_support.can_use_16M_pages ? "yes" :"no")); 505 trcVerbose("Multipage error details: %d", 506 g_multipage_support.error); 507 508 // sanity checks 509 assert0(g_multipage_support.pagesize == 4*K); 510 assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K); 511 assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K); 512 assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize); 513 assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K); 514 515 } 516 517 void os::init_system_properties_values() { 518 519 #define DEFAULT_LIBPATH "/lib:/usr/lib" 520 #define EXTENSIONS_DIR "/lib/ext" 521 522 // Buffer that fits several sprintfs. 523 // Note that the space for the trailing null is provided 524 // by the nulls included by the sizeof operator. 525 const size_t bufsize = 526 MAX2((size_t)MAXPATHLEN, // For dll_dir & friends. 527 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir 528 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 529 530 // sysclasspath, java_home, dll_dir 531 { 532 char *pslash; 533 os::jvm_path(buf, bufsize); 534 535 // Found the full path to libjvm.so. 536 // Now cut the path to <java_home>/jre if we can. 537 pslash = strrchr(buf, '/'); 538 if (pslash != NULL) { 539 *pslash = '\0'; // Get rid of /libjvm.so. 540 } 541 pslash = strrchr(buf, '/'); 542 if (pslash != NULL) { 543 *pslash = '\0'; // Get rid of /{client|server|hotspot}. 544 } 545 Arguments::set_dll_dir(buf); 546 547 if (pslash != NULL) { 548 pslash = strrchr(buf, '/'); 549 if (pslash != NULL) { 550 *pslash = '\0'; // Get rid of /<arch>. 551 pslash = strrchr(buf, '/'); 552 if (pslash != NULL) { 553 *pslash = '\0'; // Get rid of /lib. 554 } 555 } 556 } 557 Arguments::set_java_home(buf); 558 set_boot_path('/', ':'); 559 } 560 561 // Where to look for native libraries. 562 563 // On Aix we get the user setting of LIBPATH. 564 // Eventually, all the library path setting will be done here. 565 // Get the user setting of LIBPATH. 566 const char *v = ::getenv("LIBPATH"); 567 const char *v_colon = ":"; 568 if (v == NULL) { v = ""; v_colon = ""; } 569 570 // Concatenate user and invariant part of ld_library_path. 571 // That's +1 for the colon and +1 for the trailing '\0'. 572 char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal); 573 sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon); 574 Arguments::set_library_path(ld_library_path); 575 FREE_C_HEAP_ARRAY(char, ld_library_path); 576 577 // Extensions directories. 578 sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home()); 579 Arguments::set_ext_dirs(buf); 580 581 FREE_C_HEAP_ARRAY(char, buf); 582 583 #undef DEFAULT_LIBPATH 584 #undef EXTENSIONS_DIR 585 } 586 587 //////////////////////////////////////////////////////////////////////////////// 588 // breakpoint support 589 590 void os::breakpoint() { 591 BREAKPOINT; 592 } 593 594 extern "C" void breakpoint() { 595 // use debugger to set breakpoint here 596 } 597 598 //////////////////////////////////////////////////////////////////////////////// 599 // signal support 600 601 debug_only(static bool signal_sets_initialized = false); 602 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 603 604 bool os::Aix::is_sig_ignored(int sig) { 605 struct sigaction oact; 606 sigaction(sig, (struct sigaction*)NULL, &oact); 607 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 608 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 609 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) { 610 return true; 611 } else { 612 return false; 613 } 614 } 615 616 void os::Aix::signal_sets_init() { 617 // Should also have an assertion stating we are still single-threaded. 618 assert(!signal_sets_initialized, "Already initialized"); 619 // Fill in signals that are necessarily unblocked for all threads in 620 // the VM. Currently, we unblock the following signals: 621 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 622 // by -Xrs (=ReduceSignalUsage)); 623 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 624 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 625 // the dispositions or masks wrt these signals. 626 // Programs embedding the VM that want to use the above signals for their 627 // own purposes must, at this time, use the "-Xrs" option to prevent 628 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 629 // (See bug 4345157, and other related bugs). 630 // In reality, though, unblocking these signals is really a nop, since 631 // these signals are not blocked by default. 632 sigemptyset(&unblocked_sigs); 633 sigemptyset(&allowdebug_blocked_sigs); 634 sigaddset(&unblocked_sigs, SIGILL); 635 sigaddset(&unblocked_sigs, SIGSEGV); 636 sigaddset(&unblocked_sigs, SIGBUS); 637 sigaddset(&unblocked_sigs, SIGFPE); 638 sigaddset(&unblocked_sigs, SIGTRAP); 639 sigaddset(&unblocked_sigs, SIGDANGER); 640 sigaddset(&unblocked_sigs, SR_signum); 641 642 if (!ReduceSignalUsage) { 643 if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 644 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 645 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 646 } 647 if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 648 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 649 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 650 } 651 if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 652 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 653 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 654 } 655 } 656 // Fill in signals that are blocked by all but the VM thread. 657 sigemptyset(&vm_sigs); 658 if (!ReduceSignalUsage) 659 sigaddset(&vm_sigs, BREAK_SIGNAL); 660 debug_only(signal_sets_initialized = true); 661 } 662 663 // These are signals that are unblocked while a thread is running Java. 664 // (For some reason, they get blocked by default.) 665 sigset_t* os::Aix::unblocked_signals() { 666 assert(signal_sets_initialized, "Not initialized"); 667 return &unblocked_sigs; 668 } 669 670 // These are the signals that are blocked while a (non-VM) thread is 671 // running Java. Only the VM thread handles these signals. 672 sigset_t* os::Aix::vm_signals() { 673 assert(signal_sets_initialized, "Not initialized"); 674 return &vm_sigs; 675 } 676 677 // These are signals that are blocked during cond_wait to allow debugger in 678 sigset_t* os::Aix::allowdebug_blocked_signals() { 679 assert(signal_sets_initialized, "Not initialized"); 680 return &allowdebug_blocked_sigs; 681 } 682 683 void os::Aix::hotspot_sigmask(Thread* thread) { 684 685 //Save caller's signal mask before setting VM signal mask 686 sigset_t caller_sigmask; 687 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask); 688 689 OSThread* osthread = thread->osthread(); 690 osthread->set_caller_sigmask(caller_sigmask); 691 692 pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL); 693 694 if (!ReduceSignalUsage) { 695 if (thread->is_VM_thread()) { 696 // Only the VM thread handles BREAK_SIGNAL ... 697 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL); 698 } else { 699 // ... all other threads block BREAK_SIGNAL 700 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL); 701 } 702 } 703 } 704 705 // retrieve memory information. 706 // Returns false if something went wrong; 707 // content of pmi undefined in this case. 708 bool os::Aix::get_meminfo(meminfo_t* pmi) { 709 710 assert(pmi, "get_meminfo: invalid parameter"); 711 712 memset(pmi, 0, sizeof(meminfo_t)); 713 714 if (os::Aix::on_pase()) { 715 // On PASE, use the libo4 porting library. 716 717 unsigned long long virt_total = 0; 718 unsigned long long real_total = 0; 719 unsigned long long real_free = 0; 720 unsigned long long pgsp_total = 0; 721 unsigned long long pgsp_free = 0; 722 if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) { 723 pmi->virt_total = virt_total; 724 pmi->real_total = real_total; 725 pmi->real_free = real_free; 726 pmi->pgsp_total = pgsp_total; 727 pmi->pgsp_free = pgsp_free; 728 return true; 729 } 730 return false; 731 732 } else { 733 734 // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics 735 // See: 736 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 737 // ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm 738 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 739 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm 740 741 perfstat_memory_total_t psmt; 742 memset (&psmt, '\0', sizeof(psmt)); 743 const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1); 744 if (rc == -1) { 745 trcVerbose("perfstat_memory_total() failed (errno=%d)", errno); 746 assert(0, "perfstat_memory_total() failed"); 747 return false; 748 } 749 750 assert(rc == 1, "perfstat_memory_total() - weird return code"); 751 752 // excerpt from 753 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp 754 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm 755 // The fields of perfstat_memory_total_t: 756 // u_longlong_t virt_total Total virtual memory (in 4 KB pages). 757 // u_longlong_t real_total Total real memory (in 4 KB pages). 758 // u_longlong_t real_free Free real memory (in 4 KB pages). 759 // u_longlong_t pgsp_total Total paging space (in 4 KB pages). 760 // u_longlong_t pgsp_free Free paging space (in 4 KB pages). 761 762 pmi->virt_total = psmt.virt_total * 4096; 763 pmi->real_total = psmt.real_total * 4096; 764 pmi->real_free = psmt.real_free * 4096; 765 pmi->pgsp_total = psmt.pgsp_total * 4096; 766 pmi->pgsp_free = psmt.pgsp_free * 4096; 767 768 return true; 769 770 } 771 } // end os::Aix::get_meminfo 772 773 ////////////////////////////////////////////////////////////////////////////// 774 // create new thread 775 776 // Thread start routine for all newly created threads 777 static void *thread_native_entry(Thread *thread) { 778 779 // find out my own stack dimensions 780 { 781 // actually, this should do exactly the same as thread->record_stack_base_and_size... 782 address base = 0; 783 size_t size = 0; 784 query_stack_dimensions(&base, &size); 785 thread->set_stack_base(base); 786 thread->set_stack_size(size); 787 } 788 789 const pthread_t pthread_id = ::pthread_self(); 790 const tid_t kernel_thread_id = ::thread_self(); 791 792 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").", 793 os::current_thread_id(), (uintx) kernel_thread_id); 794 795 // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc() 796 // by the pthread library). In rare cases, this may not be the case, e.g. when third-party 797 // tools hook pthread_create(). In this case, we may run into problems establishing 798 // guard pages on those stacks, because the stacks may reside in memory which is not 799 // protectable (shmated). 800 if (thread->stack_base() > ::sbrk(0)) { 801 log_warning(os, thread)("Thread stack not in data segment."); 802 } 803 804 // Try to randomize the cache line index of hot stack frames. 805 // This helps when threads of the same stack traces evict each other's 806 // cache lines. The threads can be either from the same JVM instance, or 807 // from different JVM instances. The benefit is especially true for 808 // processors with hyperthreading technology. 809 810 static int counter = 0; 811 int pid = os::current_process_id(); 812 alloca(((pid ^ counter++) & 7) * 128); 813 814 thread->initialize_thread_current(); 815 816 OSThread* osthread = thread->osthread(); 817 818 // Thread_id is pthread id. 819 osthread->set_thread_id(pthread_id); 820 821 // .. but keep kernel thread id too for diagnostics 822 osthread->set_kernel_thread_id(kernel_thread_id); 823 824 // Initialize signal mask for this thread. 825 os::Aix::hotspot_sigmask(thread); 826 827 // Initialize floating point control register. 828 os::Aix::init_thread_fpu_state(); 829 830 assert(osthread->get_state() == RUNNABLE, "invalid os thread state"); 831 832 // Call one more level start routine. 833 thread->run(); 834 835 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").", 836 os::current_thread_id(), (uintx) kernel_thread_id); 837 838 // If a thread has not deleted itself ("delete this") as part of its 839 // termination sequence, we have to ensure thread-local-storage is 840 // cleared before we actually terminate. No threads should ever be 841 // deleted asynchronously with respect to their termination. 842 if (Thread::current_or_null_safe() != NULL) { 843 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 844 thread->clear_thread_current(); 845 } 846 847 return 0; 848 } 849 850 bool os::create_thread(Thread* thread, ThreadType thr_type, 851 size_t req_stack_size) { 852 853 assert(thread->osthread() == NULL, "caller responsible"); 854 855 // Allocate the OSThread object. 856 OSThread* osthread = new OSThread(NULL, NULL); 857 if (osthread == NULL) { 858 return false; 859 } 860 861 // Set the correct thread state. 862 osthread->set_thread_type(thr_type); 863 864 // Initial state is ALLOCATED but not INITIALIZED 865 osthread->set_state(ALLOCATED); 866 867 thread->set_osthread(osthread); 868 869 // Init thread attributes. 870 pthread_attr_t attr; 871 pthread_attr_init(&attr); 872 guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???"); 873 874 // Make sure we run in 1:1 kernel-user-thread mode. 875 if (os::Aix::on_aix()) { 876 guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???"); 877 guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???"); 878 } 879 880 // Start in suspended state, and in os::thread_start, wake the thread up. 881 guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???"); 882 883 // Calculate stack size if it's not specified by caller. 884 size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); 885 pthread_attr_setstacksize(&attr, stack_size); 886 887 // libc guard page 888 pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type)); 889 890 pthread_t tid; 891 int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread); 892 893 char buf[64]; 894 if (ret == 0) { 895 log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ", 896 (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); 897 } else { 898 log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.", 899 ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); 900 } 901 902 pthread_attr_destroy(&attr); 903 904 if (ret != 0) { 905 // Need to clean up stuff we've allocated so far. 906 thread->set_osthread(NULL); 907 delete osthread; 908 return false; 909 } 910 911 // OSThread::thread_id is the pthread id. 912 osthread->set_thread_id(tid); 913 914 return true; 915 } 916 917 ///////////////////////////////////////////////////////////////////////////// 918 // attach existing thread 919 920 // bootstrap the main thread 921 bool os::create_main_thread(JavaThread* thread) { 922 assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread"); 923 return create_attached_thread(thread); 924 } 925 926 bool os::create_attached_thread(JavaThread* thread) { 927 #ifdef ASSERT 928 thread->verify_not_published(); 929 #endif 930 931 // Allocate the OSThread object 932 OSThread* osthread = new OSThread(NULL, NULL); 933 934 if (osthread == NULL) { 935 return false; 936 } 937 938 const pthread_t pthread_id = ::pthread_self(); 939 const tid_t kernel_thread_id = ::thread_self(); 940 941 // OSThread::thread_id is the pthread id. 942 osthread->set_thread_id(pthread_id); 943 944 // .. but keep kernel thread id too for diagnostics 945 osthread->set_kernel_thread_id(kernel_thread_id); 946 947 // initialize floating point control register 948 os::Aix::init_thread_fpu_state(); 949 950 // Initial thread state is RUNNABLE 951 osthread->set_state(RUNNABLE); 952 953 thread->set_osthread(osthread); 954 955 if (UseNUMA) { 956 int lgrp_id = os::numa_get_group_id(); 957 if (lgrp_id != -1) { 958 thread->set_lgrp_id(lgrp_id); 959 } 960 } 961 962 // initialize signal mask for this thread 963 // and save the caller's signal mask 964 os::Aix::hotspot_sigmask(thread); 965 966 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").", 967 os::current_thread_id(), (uintx) kernel_thread_id); 968 969 return true; 970 } 971 972 void os::pd_start_thread(Thread* thread) { 973 int status = pthread_continue_np(thread->osthread()->pthread_id()); 974 assert(status == 0, "thr_continue failed"); 975 } 976 977 // Free OS resources related to the OSThread 978 void os::free_thread(OSThread* osthread) { 979 assert(osthread != NULL, "osthread not set"); 980 981 // We are told to free resources of the argument thread, 982 // but we can only really operate on the current thread. 983 assert(Thread::current()->osthread() == osthread, 984 "os::free_thread but not current thread"); 985 986 // Restore caller's signal mask 987 sigset_t sigmask = osthread->caller_sigmask(); 988 pthread_sigmask(SIG_SETMASK, &sigmask, NULL); 989 990 delete osthread; 991 } 992 993 //////////////////////////////////////////////////////////////////////////////// 994 // time support 995 996 // Time since start-up in seconds to a fine granularity. 997 // Used by VMSelfDestructTimer and the MemProfiler. 998 double os::elapsedTime() { 999 return (double)(os::elapsed_counter()) * 0.000001; 1000 } 1001 1002 jlong os::elapsed_counter() { 1003 timeval time; 1004 int status = gettimeofday(&time, NULL); 1005 return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; 1006 } 1007 1008 jlong os::elapsed_frequency() { 1009 return (1000 * 1000); 1010 } 1011 1012 bool os::supports_vtime() { return true; } 1013 bool os::enable_vtime() { return false; } 1014 bool os::vtime_enabled() { return false; } 1015 1016 double os::elapsedVTime() { 1017 struct rusage usage; 1018 int retval = getrusage(RUSAGE_THREAD, &usage); 1019 if (retval == 0) { 1020 return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000); 1021 } else { 1022 // better than nothing, but not much 1023 return elapsedTime(); 1024 } 1025 } 1026 1027 jlong os::javaTimeMillis() { 1028 timeval time; 1029 int status = gettimeofday(&time, NULL); 1030 assert(status != -1, "aix error at gettimeofday()"); 1031 return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000); 1032 } 1033 1034 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 1035 timeval time; 1036 int status = gettimeofday(&time, NULL); 1037 assert(status != -1, "aix error at gettimeofday()"); 1038 seconds = jlong(time.tv_sec); 1039 nanos = jlong(time.tv_usec) * 1000; 1040 } 1041 1042 jlong os::javaTimeNanos() { 1043 if (os::Aix::on_pase()) { 1044 1045 timeval time; 1046 int status = gettimeofday(&time, NULL); 1047 assert(status != -1, "PASE error at gettimeofday()"); 1048 jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec); 1049 return 1000 * usecs; 1050 1051 } else { 1052 // On AIX use the precision of processors real time clock 1053 // or time base registers. 1054 timebasestruct_t time; 1055 int rc; 1056 1057 // If the CPU has a time register, it will be used and 1058 // we have to convert to real time first. After convertion we have following data: 1059 // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970] 1060 // time.tb_low [nanoseconds after the last full second above] 1061 // We better use mread_real_time here instead of read_real_time 1062 // to ensure that we will get a monotonic increasing time. 1063 if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) { 1064 rc = time_base_to_time(&time, TIMEBASE_SZ); 1065 assert(rc != -1, "aix error at time_base_to_time()"); 1066 } 1067 return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low); 1068 } 1069 } 1070 1071 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1072 info_ptr->max_value = ALL_64_BITS; 1073 // mread_real_time() is monotonic (see 'os::javaTimeNanos()') 1074 info_ptr->may_skip_backward = false; 1075 info_ptr->may_skip_forward = false; 1076 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1077 } 1078 1079 // Return the real, user, and system times in seconds from an 1080 // arbitrary fixed point in the past. 1081 bool os::getTimesSecs(double* process_real_time, 1082 double* process_user_time, 1083 double* process_system_time) { 1084 struct tms ticks; 1085 clock_t real_ticks = times(&ticks); 1086 1087 if (real_ticks == (clock_t) (-1)) { 1088 return false; 1089 } else { 1090 double ticks_per_second = (double) clock_tics_per_sec; 1091 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1092 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1093 *process_real_time = ((double) real_ticks) / ticks_per_second; 1094 1095 return true; 1096 } 1097 } 1098 1099 char * os::local_time_string(char *buf, size_t buflen) { 1100 struct tm t; 1101 time_t long_time; 1102 time(&long_time); 1103 localtime_r(&long_time, &t); 1104 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1105 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1106 t.tm_hour, t.tm_min, t.tm_sec); 1107 return buf; 1108 } 1109 1110 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 1111 return localtime_r(clock, res); 1112 } 1113 1114 //////////////////////////////////////////////////////////////////////////////// 1115 // runtime exit support 1116 1117 // Note: os::shutdown() might be called very early during initialization, or 1118 // called from signal handler. Before adding something to os::shutdown(), make 1119 // sure it is async-safe and can handle partially initialized VM. 1120 void os::shutdown() { 1121 1122 // allow PerfMemory to attempt cleanup of any persistent resources 1123 perfMemory_exit(); 1124 1125 // needs to remove object in file system 1126 AttachListener::abort(); 1127 1128 // flush buffered output, finish log files 1129 ostream_abort(); 1130 1131 // Check for abort hook 1132 abort_hook_t abort_hook = Arguments::abort_hook(); 1133 if (abort_hook != NULL) { 1134 abort_hook(); 1135 } 1136 } 1137 1138 // Note: os::abort() might be called very early during initialization, or 1139 // called from signal handler. Before adding something to os::abort(), make 1140 // sure it is async-safe and can handle partially initialized VM. 1141 void os::abort(bool dump_core, void* siginfo, const void* context) { 1142 os::shutdown(); 1143 if (dump_core) { 1144 #ifndef PRODUCT 1145 fdStream out(defaultStream::output_fd()); 1146 out.print_raw("Current thread is "); 1147 char buf[16]; 1148 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1149 out.print_raw_cr(buf); 1150 out.print_raw_cr("Dumping core ..."); 1151 #endif 1152 ::abort(); // dump core 1153 } 1154 1155 ::exit(1); 1156 } 1157 1158 // Die immediately, no exit hook, no abort hook, no cleanup. 1159 void os::die() { 1160 ::abort(); 1161 } 1162 1163 // This method is a copy of JDK's sysGetLastErrorString 1164 // from src/solaris/hpi/src/system_md.c 1165 1166 size_t os::lasterror(char *buf, size_t len) { 1167 if (errno == 0) return 0; 1168 1169 const char *s = os::strerror(errno); 1170 size_t n = ::strlen(s); 1171 if (n >= len) { 1172 n = len - 1; 1173 } 1174 ::strncpy(buf, s, n); 1175 buf[n] = '\0'; 1176 return n; 1177 } 1178 1179 intx os::current_thread_id() { 1180 return (intx)pthread_self(); 1181 } 1182 1183 int os::current_process_id() { 1184 return getpid(); 1185 } 1186 1187 // DLL functions 1188 1189 const char* os::dll_file_extension() { return ".so"; } 1190 1191 // This must be hard coded because it's the system's temporary 1192 // directory not the java application's temp directory, ala java.io.tmpdir. 1193 const char* os::get_temp_directory() { return "/tmp"; } 1194 1195 static bool file_exists(const char* filename) { 1196 struct stat statbuf; 1197 if (filename == NULL || strlen(filename) == 0) { 1198 return false; 1199 } 1200 return os::stat(filename, &statbuf) == 0; 1201 } 1202 1203 bool os::dll_build_name(char* buffer, size_t buflen, 1204 const char* pname, const char* fname) { 1205 bool retval = false; 1206 // Copied from libhpi 1207 const size_t pnamelen = pname ? strlen(pname) : 0; 1208 1209 // Return error on buffer overflow. 1210 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1211 *buffer = '\0'; 1212 return retval; 1213 } 1214 1215 if (pnamelen == 0) { 1216 snprintf(buffer, buflen, "lib%s.so", fname); 1217 retval = true; 1218 } else if (strchr(pname, *os::path_separator()) != NULL) { 1219 int n; 1220 char** pelements = split_path(pname, &n); 1221 if (pelements == NULL) { 1222 return false; 1223 } 1224 for (int i = 0; i < n; i++) { 1225 // Really shouldn't be NULL, but check can't hurt 1226 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1227 continue; // skip the empty path values 1228 } 1229 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1230 if (file_exists(buffer)) { 1231 retval = true; 1232 break; 1233 } 1234 } 1235 // release the storage 1236 for (int i = 0; i < n; i++) { 1237 if (pelements[i] != NULL) { 1238 FREE_C_HEAP_ARRAY(char, pelements[i]); 1239 } 1240 } 1241 if (pelements != NULL) { 1242 FREE_C_HEAP_ARRAY(char*, pelements); 1243 } 1244 } else { 1245 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1246 retval = true; 1247 } 1248 return retval; 1249 } 1250 1251 // Check if addr is inside libjvm.so. 1252 bool os::address_is_in_vm(address addr) { 1253 1254 // Input could be a real pc or a function pointer literal. The latter 1255 // would be a function descriptor residing in the data segment of a module. 1256 loaded_module_t lm; 1257 if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) { 1258 return lm.is_in_vm; 1259 } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) { 1260 return lm.is_in_vm; 1261 } else { 1262 return false; 1263 } 1264 1265 } 1266 1267 // Resolve an AIX function descriptor literal to a code pointer. 1268 // If the input is a valid code pointer to a text segment of a loaded module, 1269 // it is returned unchanged. 1270 // If the input is a valid AIX function descriptor, it is resolved to the 1271 // code entry point. 1272 // If the input is neither a valid function descriptor nor a valid code pointer, 1273 // NULL is returned. 1274 static address resolve_function_descriptor_to_code_pointer(address p) { 1275 1276 if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) { 1277 // It is a real code pointer. 1278 return p; 1279 } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) { 1280 // Pointer to data segment, potential function descriptor. 1281 address code_entry = (address)(((FunctionDescriptor*)p)->entry()); 1282 if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) { 1283 // It is a function descriptor. 1284 return code_entry; 1285 } 1286 } 1287 1288 return NULL; 1289 } 1290 1291 bool os::dll_address_to_function_name(address addr, char *buf, 1292 int buflen, int *offset, 1293 bool demangle) { 1294 if (offset) { 1295 *offset = -1; 1296 } 1297 // Buf is not optional, but offset is optional. 1298 assert(buf != NULL, "sanity check"); 1299 buf[0] = '\0'; 1300 1301 // Resolve function ptr literals first. 1302 addr = resolve_function_descriptor_to_code_pointer(addr); 1303 if (!addr) { 1304 return false; 1305 } 1306 1307 return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle); 1308 } 1309 1310 bool os::dll_address_to_library_name(address addr, char* buf, 1311 int buflen, int* offset) { 1312 if (offset) { 1313 *offset = -1; 1314 } 1315 // Buf is not optional, but offset is optional. 1316 assert(buf != NULL, "sanity check"); 1317 buf[0] = '\0'; 1318 1319 // Resolve function ptr literals first. 1320 addr = resolve_function_descriptor_to_code_pointer(addr); 1321 if (!addr) { 1322 return false; 1323 } 1324 1325 return AixSymbols::get_module_name(addr, buf, buflen); 1326 } 1327 1328 // Loads .dll/.so and in case of error it checks if .dll/.so was built 1329 // for the same architecture as Hotspot is running on. 1330 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) { 1331 1332 if (ebuf && ebuflen > 0) { 1333 ebuf[0] = '\0'; 1334 ebuf[ebuflen - 1] = '\0'; 1335 } 1336 1337 if (!filename || strlen(filename) == 0) { 1338 ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1); 1339 return NULL; 1340 } 1341 1342 // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants. 1343 void * result= ::dlopen(filename, RTLD_LAZY); 1344 if (result != NULL) { 1345 // Reload dll cache. Don't do this in signal handling. 1346 LoadedLibraries::reload(); 1347 return result; 1348 } else { 1349 // error analysis when dlopen fails 1350 const char* const error_report = ::dlerror(); 1351 if (error_report && ebuf && ebuflen > 0) { 1352 snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s", 1353 filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report); 1354 } 1355 } 1356 return NULL; 1357 } 1358 1359 void* os::dll_lookup(void* handle, const char* name) { 1360 void* res = dlsym(handle, name); 1361 return res; 1362 } 1363 1364 void* os::get_default_process_handle() { 1365 return (void*)::dlopen(NULL, RTLD_LAZY); 1366 } 1367 1368 void os::print_dll_info(outputStream *st) { 1369 st->print_cr("Dynamic libraries:"); 1370 LoadedLibraries::print(st); 1371 } 1372 1373 void os::get_summary_os_info(char* buf, size_t buflen) { 1374 // There might be something more readable than uname results for AIX. 1375 struct utsname name; 1376 uname(&name); 1377 snprintf(buf, buflen, "%s %s", name.release, name.version); 1378 } 1379 1380 void os::print_os_info(outputStream* st) { 1381 st->print("OS:"); 1382 1383 st->print("uname:"); 1384 struct utsname name; 1385 uname(&name); 1386 st->print(name.sysname); st->print(" "); 1387 st->print(name.nodename); st->print(" "); 1388 st->print(name.release); st->print(" "); 1389 st->print(name.version); st->print(" "); 1390 st->print(name.machine); 1391 st->cr(); 1392 1393 uint32_t ver = os::Aix::os_version(); 1394 st->print_cr("AIX kernel version %u.%u.%u.%u", 1395 (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF); 1396 1397 // rlimit 1398 st->print("rlimit:"); 1399 struct rlimit rlim; 1400 1401 st->print(" STACK "); 1402 getrlimit(RLIMIT_STACK, &rlim); 1403 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1404 else st->print("%uk", rlim.rlim_cur >> 10); 1405 1406 st->print(", CORE "); 1407 getrlimit(RLIMIT_CORE, &rlim); 1408 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1409 else st->print("%uk", rlim.rlim_cur >> 10); 1410 1411 st->print(", NPROC "); 1412 st->print("%d", sysconf(_SC_CHILD_MAX)); 1413 1414 st->print(", NOFILE "); 1415 getrlimit(RLIMIT_NOFILE, &rlim); 1416 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1417 else st->print("%d", rlim.rlim_cur); 1418 1419 st->print(", AS "); 1420 getrlimit(RLIMIT_AS, &rlim); 1421 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1422 else st->print("%uk", rlim.rlim_cur >> 10); 1423 1424 // Print limits on DATA, because it limits the C-heap. 1425 st->print(", DATA "); 1426 getrlimit(RLIMIT_DATA, &rlim); 1427 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 1428 else st->print("%uk", rlim.rlim_cur >> 10); 1429 st->cr(); 1430 1431 // load average 1432 st->print("load average:"); 1433 double loadavg[3] = {-1.L, -1.L, -1.L}; 1434 os::loadavg(loadavg, 3); 1435 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); 1436 st->cr(); 1437 1438 // print wpar info 1439 libperfstat::wparinfo_t wi; 1440 if (libperfstat::get_wparinfo(&wi)) { 1441 st->print_cr("wpar info"); 1442 st->print_cr("name: %s", wi.name); 1443 st->print_cr("id: %d", wi.wpar_id); 1444 st->print_cr("type: %s", (wi.app_wpar ? "application" : "system")); 1445 } 1446 1447 // print partition info 1448 libperfstat::partitioninfo_t pi; 1449 if (libperfstat::get_partitioninfo(&pi)) { 1450 st->print_cr("partition info"); 1451 st->print_cr(" name: %s", pi.name); 1452 } 1453 1454 } 1455 1456 void os::print_memory_info(outputStream* st) { 1457 1458 st->print_cr("Memory:"); 1459 1460 st->print_cr(" Base page size (sysconf _SC_PAGESIZE): %s", 1461 describe_pagesize(g_multipage_support.pagesize)); 1462 st->print_cr(" Data page size (C-Heap, bss, etc): %s", 1463 describe_pagesize(g_multipage_support.datapsize)); 1464 st->print_cr(" Text page size: %s", 1465 describe_pagesize(g_multipage_support.textpsize)); 1466 st->print_cr(" Thread stack page size (pthread): %s", 1467 describe_pagesize(g_multipage_support.pthr_stack_pagesize)); 1468 st->print_cr(" Default shared memory page size: %s", 1469 describe_pagesize(g_multipage_support.shmpsize)); 1470 st->print_cr(" Can use 64K pages dynamically with shared meory: %s", 1471 (g_multipage_support.can_use_64K_pages ? "yes" :"no")); 1472 st->print_cr(" Can use 16M pages dynamically with shared memory: %s", 1473 (g_multipage_support.can_use_16M_pages ? "yes" :"no")); 1474 st->print_cr(" Multipage error: %d", 1475 g_multipage_support.error); 1476 st->cr(); 1477 st->print_cr(" os::vm_page_size: %s", describe_pagesize(os::vm_page_size())); 1478 1479 // print out LDR_CNTRL because it affects the default page sizes 1480 const char* const ldr_cntrl = ::getenv("LDR_CNTRL"); 1481 st->print_cr(" LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>"); 1482 1483 // Print out EXTSHM because it is an unsupported setting. 1484 const char* const extshm = ::getenv("EXTSHM"); 1485 st->print_cr(" EXTSHM=%s.", extshm ? extshm : "<unset>"); 1486 if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) { 1487 st->print_cr(" *** Unsupported! Please remove EXTSHM from your environment! ***"); 1488 } 1489 1490 // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks. 1491 const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES"); 1492 st->print_cr(" AIXTHREAD_GUARDPAGES=%s.", 1493 aixthread_guardpages ? aixthread_guardpages : "<unset>"); 1494 1495 os::Aix::meminfo_t mi; 1496 if (os::Aix::get_meminfo(&mi)) { 1497 char buffer[256]; 1498 if (os::Aix::on_aix()) { 1499 st->print_cr("physical total : " SIZE_FORMAT, mi.real_total); 1500 st->print_cr("physical free : " SIZE_FORMAT, mi.real_free); 1501 st->print_cr("swap total : " SIZE_FORMAT, mi.pgsp_total); 1502 st->print_cr("swap free : " SIZE_FORMAT, mi.pgsp_free); 1503 } else { 1504 // PASE - Numbers are result of QWCRSSTS; they mean: 1505 // real_total: Sum of all system pools 1506 // real_free: always 0 1507 // pgsp_total: we take the size of the system ASP 1508 // pgsp_free: size of system ASP times percentage of system ASP unused 1509 st->print_cr("physical total : " SIZE_FORMAT, mi.real_total); 1510 st->print_cr("system asp total : " SIZE_FORMAT, mi.pgsp_total); 1511 st->print_cr("%% system asp used : " SIZE_FORMAT, 1512 mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f); 1513 } 1514 st->print_raw(buffer); 1515 } 1516 st->cr(); 1517 1518 // Print segments allocated with os::reserve_memory. 1519 st->print_cr("internal virtual memory regions used by vm:"); 1520 vmembk_print_on(st); 1521 } 1522 1523 // Get a string for the cpuinfo that is a summary of the cpu type 1524 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1525 // This looks good 1526 libperfstat::cpuinfo_t ci; 1527 if (libperfstat::get_cpuinfo(&ci)) { 1528 strncpy(buf, ci.version, buflen); 1529 } else { 1530 strncpy(buf, "AIX", buflen); 1531 } 1532 } 1533 1534 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1535 st->print("CPU:"); 1536 st->print("total %d", os::processor_count()); 1537 // It's not safe to query number of active processors after crash. 1538 // st->print("(active %d)", os::active_processor_count()); 1539 st->print(" %s", VM_Version::features()); 1540 st->cr(); 1541 } 1542 1543 static void print_signal_handler(outputStream* st, int sig, 1544 char* buf, size_t buflen); 1545 1546 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1547 st->print_cr("Signal Handlers:"); 1548 print_signal_handler(st, SIGSEGV, buf, buflen); 1549 print_signal_handler(st, SIGBUS , buf, buflen); 1550 print_signal_handler(st, SIGFPE , buf, buflen); 1551 print_signal_handler(st, SIGPIPE, buf, buflen); 1552 print_signal_handler(st, SIGXFSZ, buf, buflen); 1553 print_signal_handler(st, SIGILL , buf, buflen); 1554 print_signal_handler(st, SR_signum, buf, buflen); 1555 print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen); 1556 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 1557 print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen); 1558 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 1559 print_signal_handler(st, SIGTRAP, buf, buflen); 1560 print_signal_handler(st, SIGDANGER, buf, buflen); 1561 } 1562 1563 static char saved_jvm_path[MAXPATHLEN] = {0}; 1564 1565 // Find the full path to the current module, libjvm.so. 1566 void os::jvm_path(char *buf, jint buflen) { 1567 // Error checking. 1568 if (buflen < MAXPATHLEN) { 1569 assert(false, "must use a large-enough buffer"); 1570 buf[0] = '\0'; 1571 return; 1572 } 1573 // Lazy resolve the path to current module. 1574 if (saved_jvm_path[0] != 0) { 1575 strcpy(buf, saved_jvm_path); 1576 return; 1577 } 1578 1579 Dl_info dlinfo; 1580 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 1581 assert(ret != 0, "cannot locate libjvm"); 1582 char* rp = realpath((char *)dlinfo.dli_fname, buf); 1583 assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?"); 1584 1585 strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path)); 1586 saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0'; 1587 } 1588 1589 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1590 // no prefix required, not even "_" 1591 } 1592 1593 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1594 // no suffix required 1595 } 1596 1597 //////////////////////////////////////////////////////////////////////////////// 1598 // sun.misc.Signal support 1599 1600 static volatile jint sigint_count = 0; 1601 1602 static void 1603 UserHandler(int sig, void *siginfo, void *context) { 1604 // 4511530 - sem_post is serialized and handled by the manager thread. When 1605 // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We 1606 // don't want to flood the manager thread with sem_post requests. 1607 if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) 1608 return; 1609 1610 // Ctrl-C is pressed during error reporting, likely because the error 1611 // handler fails to abort. Let VM die immediately. 1612 if (sig == SIGINT && is_error_reported()) { 1613 os::die(); 1614 } 1615 1616 os::signal_notify(sig); 1617 } 1618 1619 void* os::user_handler() { 1620 return CAST_FROM_FN_PTR(void*, UserHandler); 1621 } 1622 1623 extern "C" { 1624 typedef void (*sa_handler_t)(int); 1625 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 1626 } 1627 1628 void* os::signal(int signal_number, void* handler) { 1629 struct sigaction sigAct, oldSigAct; 1630 1631 sigfillset(&(sigAct.sa_mask)); 1632 1633 // Do not block out synchronous signals in the signal handler. 1634 // Blocking synchronous signals only makes sense if you can really 1635 // be sure that those signals won't happen during signal handling, 1636 // when the blocking applies. Normal signal handlers are lean and 1637 // do not cause signals. But our signal handlers tend to be "risky" 1638 // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen. 1639 // On AIX, PASE there was a case where a SIGSEGV happened, followed 1640 // by a SIGILL, which was blocked due to the signal mask. The process 1641 // just hung forever. Better to crash from a secondary signal than to hang. 1642 sigdelset(&(sigAct.sa_mask), SIGSEGV); 1643 sigdelset(&(sigAct.sa_mask), SIGBUS); 1644 sigdelset(&(sigAct.sa_mask), SIGILL); 1645 sigdelset(&(sigAct.sa_mask), SIGFPE); 1646 sigdelset(&(sigAct.sa_mask), SIGTRAP); 1647 1648 sigAct.sa_flags = SA_RESTART|SA_SIGINFO; 1649 1650 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 1651 1652 if (sigaction(signal_number, &sigAct, &oldSigAct)) { 1653 // -1 means registration failed 1654 return (void *)-1; 1655 } 1656 1657 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 1658 } 1659 1660 void os::signal_raise(int signal_number) { 1661 ::raise(signal_number); 1662 } 1663 1664 // 1665 // The following code is moved from os.cpp for making this 1666 // code platform specific, which it is by its very nature. 1667 // 1668 1669 // Will be modified when max signal is changed to be dynamic 1670 int os::sigexitnum_pd() { 1671 return NSIG; 1672 } 1673 1674 // a counter for each possible signal value 1675 static volatile jint pending_signals[NSIG+1] = { 0 }; 1676 1677 // Wrapper functions for: sem_init(), sem_post(), sem_wait() 1678 // On AIX, we use sem_init(), sem_post(), sem_wait() 1679 // On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores 1680 // do not seem to work at all on PASE (unimplemented, will cause SIGILL). 1681 // Note that just using msem_.. APIs for both PASE and AIX is not an option either, as 1682 // on AIX, msem_..() calls are suspected of causing problems. 1683 static sem_t sig_sem; 1684 static msemaphore* p_sig_msem = 0; 1685 1686 static void local_sem_init() { 1687 if (os::Aix::on_aix()) { 1688 int rc = ::sem_init(&sig_sem, 0, 0); 1689 guarantee(rc != -1, "sem_init failed"); 1690 } else { 1691 // Memory semaphores must live in shared mem. 1692 guarantee0(p_sig_msem == NULL); 1693 p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL); 1694 guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore"); 1695 guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed"); 1696 } 1697 } 1698 1699 static void local_sem_post() { 1700 static bool warn_only_once = false; 1701 if (os::Aix::on_aix()) { 1702 int rc = ::sem_post(&sig_sem); 1703 if (rc == -1 && !warn_only_once) { 1704 trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno)); 1705 warn_only_once = true; 1706 } 1707 } else { 1708 guarantee0(p_sig_msem != NULL); 1709 int rc = ::msem_unlock(p_sig_msem, 0); 1710 if (rc == -1 && !warn_only_once) { 1711 trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno)); 1712 warn_only_once = true; 1713 } 1714 } 1715 } 1716 1717 static void local_sem_wait() { 1718 static bool warn_only_once = false; 1719 if (os::Aix::on_aix()) { 1720 int rc = ::sem_wait(&sig_sem); 1721 if (rc == -1 && !warn_only_once) { 1722 trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno)); 1723 warn_only_once = true; 1724 } 1725 } else { 1726 guarantee0(p_sig_msem != NULL); // must init before use 1727 int rc = ::msem_lock(p_sig_msem, 0); 1728 if (rc == -1 && !warn_only_once) { 1729 trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno)); 1730 warn_only_once = true; 1731 } 1732 } 1733 } 1734 1735 void os::signal_init_pd() { 1736 // Initialize signal structures 1737 ::memset((void*)pending_signals, 0, sizeof(pending_signals)); 1738 1739 // Initialize signal semaphore 1740 local_sem_init(); 1741 } 1742 1743 void os::signal_notify(int sig) { 1744 Atomic::inc(&pending_signals[sig]); 1745 local_sem_post(); 1746 } 1747 1748 static int check_pending_signals(bool wait) { 1749 Atomic::store(0, &sigint_count); 1750 for (;;) { 1751 for (int i = 0; i < NSIG + 1; i++) { 1752 jint n = pending_signals[i]; 1753 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 1754 return i; 1755 } 1756 } 1757 if (!wait) { 1758 return -1; 1759 } 1760 JavaThread *thread = JavaThread::current(); 1761 ThreadBlockInVM tbivm(thread); 1762 1763 bool threadIsSuspended; 1764 do { 1765 thread->set_suspend_equivalent(); 1766 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 1767 1768 local_sem_wait(); 1769 1770 // were we externally suspended while we were waiting? 1771 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 1772 if (threadIsSuspended) { 1773 // 1774 // The semaphore has been incremented, but while we were waiting 1775 // another thread suspended us. We don't want to continue running 1776 // while suspended because that would surprise the thread that 1777 // suspended us. 1778 // 1779 1780 local_sem_post(); 1781 1782 thread->java_suspend_self(); 1783 } 1784 } while (threadIsSuspended); 1785 } 1786 } 1787 1788 int os::signal_lookup() { 1789 return check_pending_signals(false); 1790 } 1791 1792 int os::signal_wait() { 1793 return check_pending_signals(true); 1794 } 1795 1796 //////////////////////////////////////////////////////////////////////////////// 1797 // Virtual Memory 1798 1799 // We need to keep small simple bookkeeping for os::reserve_memory and friends. 1800 1801 #define VMEM_MAPPED 1 1802 #define VMEM_SHMATED 2 1803 1804 struct vmembk_t { 1805 int type; // 1 - mmap, 2 - shmat 1806 char* addr; 1807 size_t size; // Real size, may be larger than usersize. 1808 size_t pagesize; // page size of area 1809 vmembk_t* next; 1810 1811 bool contains_addr(char* p) const { 1812 return p >= addr && p < (addr + size); 1813 } 1814 1815 bool contains_range(char* p, size_t s) const { 1816 return contains_addr(p) && contains_addr(p + s - 1); 1817 } 1818 1819 void print_on(outputStream* os) const { 1820 os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT 1821 " bytes, %d %s pages), %s", 1822 addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize), 1823 (type == VMEM_SHMATED ? "shmat" : "mmap") 1824 ); 1825 } 1826 1827 // Check that range is a sub range of memory block (or equal to memory block); 1828 // also check that range is fully page aligned to the page size if the block. 1829 void assert_is_valid_subrange(char* p, size_t s) const { 1830 if (!contains_range(p, s)) { 1831 trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub " 1832 "range of [" PTR_FORMAT " - " PTR_FORMAT "].", 1833 p, p + s, addr, addr + size); 1834 guarantee0(false); 1835 } 1836 if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) { 1837 trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not" 1838 " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize); 1839 guarantee0(false); 1840 } 1841 } 1842 }; 1843 1844 static struct { 1845 vmembk_t* first; 1846 MiscUtils::CritSect cs; 1847 } vmem; 1848 1849 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) { 1850 vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t)); 1851 assert0(p); 1852 if (p) { 1853 MiscUtils::AutoCritSect lck(&vmem.cs); 1854 p->addr = addr; p->size = size; 1855 p->pagesize = pagesize; 1856 p->type = type; 1857 p->next = vmem.first; 1858 vmem.first = p; 1859 } 1860 } 1861 1862 static vmembk_t* vmembk_find(char* addr) { 1863 MiscUtils::AutoCritSect lck(&vmem.cs); 1864 for (vmembk_t* p = vmem.first; p; p = p->next) { 1865 if (p->addr <= addr && (p->addr + p->size) > addr) { 1866 return p; 1867 } 1868 } 1869 return NULL; 1870 } 1871 1872 static void vmembk_remove(vmembk_t* p0) { 1873 MiscUtils::AutoCritSect lck(&vmem.cs); 1874 assert0(p0); 1875 assert0(vmem.first); // List should not be empty. 1876 for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) { 1877 if (*pp == p0) { 1878 *pp = p0->next; 1879 ::free(p0); 1880 return; 1881 } 1882 } 1883 assert0(false); // Not found? 1884 } 1885 1886 static void vmembk_print_on(outputStream* os) { 1887 MiscUtils::AutoCritSect lck(&vmem.cs); 1888 for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) { 1889 vmi->print_on(os); 1890 os->cr(); 1891 } 1892 } 1893 1894 // Reserve and attach a section of System V memory. 1895 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given 1896 // address. Failing that, it will attach the memory anywhere. 1897 // If <requested_addr> is NULL, function will attach the memory anywhere. 1898 // 1899 // <alignment_hint> is being ignored by this function. It is very probable however that the 1900 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries. 1901 // Should this be not enogh, we can put more work into it. 1902 static char* reserve_shmated_memory ( 1903 size_t bytes, 1904 char* requested_addr, 1905 size_t alignment_hint) { 1906 1907 trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress " 1908 PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...", 1909 bytes, requested_addr, alignment_hint); 1910 1911 // Either give me wish address or wish alignment but not both. 1912 assert0(!(requested_addr != NULL && alignment_hint != 0)); 1913 1914 // We must prevent anyone from attaching too close to the 1915 // BRK because that may cause malloc OOM. 1916 if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) { 1917 trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. " 1918 "Will attach anywhere.", requested_addr); 1919 // Act like the OS refused to attach there. 1920 requested_addr = NULL; 1921 } 1922 1923 // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not 1924 // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead. 1925 if (os::Aix::on_pase_V5R4_or_older()) { 1926 ShouldNotReachHere(); 1927 } 1928 1929 // Align size of shm up to 64K to avoid errors if we later try to change the page size. 1930 const size_t size = align_size_up(bytes, 64*K); 1931 1932 // Reserve the shared segment. 1933 int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR); 1934 if (shmid == -1) { 1935 trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno); 1936 return NULL; 1937 } 1938 1939 // Important note: 1940 // It is very important that we, upon leaving this function, do not leave a shm segment alive. 1941 // We must right after attaching it remove it from the system. System V shm segments are global and 1942 // survive the process. 1943 // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A). 1944 1945 struct shmid_ds shmbuf; 1946 memset(&shmbuf, 0, sizeof(shmbuf)); 1947 shmbuf.shm_pagesize = 64*K; 1948 if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) { 1949 trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.", 1950 size / (64*K), errno); 1951 // I want to know if this ever happens. 1952 assert(false, "failed to set page size for shmat"); 1953 } 1954 1955 // Now attach the shared segment. 1956 // Note that I attach with SHM_RND - which means that the requested address is rounded down, if 1957 // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address 1958 // were not a segment boundary. 1959 char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND); 1960 const int errno_shmat = errno; 1961 1962 // (A) Right after shmat and before handing shmat errors delete the shm segment. 1963 if (::shmctl(shmid, IPC_RMID, NULL) == -1) { 1964 trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno); 1965 assert(false, "failed to remove shared memory segment!"); 1966 } 1967 1968 // Handle shmat error. If we failed to attach, just return. 1969 if (addr == (char*)-1) { 1970 trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat); 1971 return NULL; 1972 } 1973 1974 // Just for info: query the real page size. In case setting the page size did not 1975 // work (see above), the system may have given us something other then 4K (LDR_CNTRL). 1976 const size_t real_pagesize = os::Aix::query_pagesize(addr); 1977 if (real_pagesize != shmbuf.shm_pagesize) { 1978 trcVerbose("pagesize is, surprisingly, %h.", real_pagesize); 1979 } 1980 1981 if (addr) { 1982 trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)", 1983 addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize)); 1984 } else { 1985 if (requested_addr != NULL) { 1986 trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr); 1987 } else { 1988 trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size); 1989 } 1990 } 1991 1992 // book-keeping 1993 vmembk_add(addr, size, real_pagesize, VMEM_SHMATED); 1994 assert0(is_aligned_to(addr, os::vm_page_size())); 1995 1996 return addr; 1997 } 1998 1999 static bool release_shmated_memory(char* addr, size_t size) { 2000 2001 trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2002 addr, addr + size - 1); 2003 2004 bool rc = false; 2005 2006 // TODO: is there a way to verify shm size without doing bookkeeping? 2007 if (::shmdt(addr) != 0) { 2008 trcVerbose("error (%d).", errno); 2009 } else { 2010 trcVerbose("ok."); 2011 rc = true; 2012 } 2013 return rc; 2014 } 2015 2016 static bool uncommit_shmated_memory(char* addr, size_t size) { 2017 trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2018 addr, addr + size - 1); 2019 2020 const bool rc = my_disclaim64(addr, size); 2021 2022 if (!rc) { 2023 trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size); 2024 return false; 2025 } 2026 return true; 2027 } 2028 2029 //////////////////////////////// mmap-based routines ///////////////////////////////// 2030 2031 // Reserve memory via mmap. 2032 // If <requested_addr> is given, an attempt is made to attach at the given address. 2033 // Failing that, memory is allocated at any address. 2034 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to 2035 // allocate at an address aligned with the given alignment. Failing that, memory 2036 // is aligned anywhere. 2037 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2038 trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", " 2039 "alignment_hint " UINTX_FORMAT "...", 2040 bytes, requested_addr, alignment_hint); 2041 2042 // If a wish address is given, but not aligned to 4K page boundary, mmap will fail. 2043 if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) { 2044 trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr); 2045 return NULL; 2046 } 2047 2048 // We must prevent anyone from attaching too close to the 2049 // BRK because that may cause malloc OOM. 2050 if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) { 2051 trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. " 2052 "Will attach anywhere.", requested_addr); 2053 // Act like the OS refused to attach there. 2054 requested_addr = NULL; 2055 } 2056 2057 // Specify one or the other but not both. 2058 assert0(!(requested_addr != NULL && alignment_hint > 0)); 2059 2060 // In 64K mode, we claim the global page size (os::vm_page_size()) 2061 // is 64K. This is one of the few points where that illusion may 2062 // break, because mmap() will always return memory aligned to 4K. So 2063 // we must ensure we only ever return memory aligned to 64k. 2064 if (alignment_hint) { 2065 alignment_hint = lcm(alignment_hint, os::vm_page_size()); 2066 } else { 2067 alignment_hint = os::vm_page_size(); 2068 } 2069 2070 // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode). 2071 const size_t size = align_size_up(bytes, os::vm_page_size()); 2072 2073 // alignment: Allocate memory large enough to include an aligned range of the right size and 2074 // cut off the leading and trailing waste pages. 2075 assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above 2076 const size_t extra_size = size + alignment_hint; 2077 2078 // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to 2079 // later use msync(MS_INVALIDATE) (see os::uncommit_memory). 2080 int flags = MAP_ANONYMOUS | MAP_SHARED; 2081 2082 // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what 2083 // it means if wishaddress is given but MAP_FIXED is not set. 2084 // 2085 // Important! Behaviour differs depending on whether SPEC1170 mode is active or not. 2086 // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings. 2087 // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will 2088 // get clobbered. 2089 if (requested_addr != NULL) { 2090 if (!os::Aix::xpg_sus_mode()) { // not SPEC1170 Behaviour 2091 flags |= MAP_FIXED; 2092 } 2093 } 2094 2095 char* addr = (char*)::mmap(requested_addr, extra_size, 2096 PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0); 2097 2098 if (addr == MAP_FAILED) { 2099 trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno); 2100 return NULL; 2101 } 2102 2103 // Handle alignment. 2104 char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint); 2105 const size_t waste_pre = addr_aligned - addr; 2106 char* const addr_aligned_end = addr_aligned + size; 2107 const size_t waste_post = extra_size - waste_pre - size; 2108 if (waste_pre > 0) { 2109 ::munmap(addr, waste_pre); 2110 } 2111 if (waste_post > 0) { 2112 ::munmap(addr_aligned_end, waste_post); 2113 } 2114 addr = addr_aligned; 2115 2116 if (addr) { 2117 trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)", 2118 addr, addr + bytes, bytes); 2119 } else { 2120 if (requested_addr != NULL) { 2121 trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr); 2122 } else { 2123 trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes); 2124 } 2125 } 2126 2127 // bookkeeping 2128 vmembk_add(addr, size, 4*K, VMEM_MAPPED); 2129 2130 // Test alignment, see above. 2131 assert0(is_aligned_to(addr, os::vm_page_size())); 2132 2133 return addr; 2134 } 2135 2136 static bool release_mmaped_memory(char* addr, size_t size) { 2137 assert0(is_aligned_to(addr, os::vm_page_size())); 2138 assert0(is_aligned_to(size, os::vm_page_size())); 2139 2140 trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2141 addr, addr + size - 1); 2142 bool rc = false; 2143 2144 if (::munmap(addr, size) != 0) { 2145 trcVerbose("failed (%d)\n", errno); 2146 rc = false; 2147 } else { 2148 trcVerbose("ok."); 2149 rc = true; 2150 } 2151 2152 return rc; 2153 } 2154 2155 static bool uncommit_mmaped_memory(char* addr, size_t size) { 2156 2157 assert0(is_aligned_to(addr, os::vm_page_size())); 2158 assert0(is_aligned_to(size, os::vm_page_size())); 2159 2160 trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].", 2161 addr, addr + size - 1); 2162 bool rc = false; 2163 2164 // Uncommit mmap memory with msync MS_INVALIDATE. 2165 if (::msync(addr, size, MS_INVALIDATE) != 0) { 2166 trcVerbose("failed (%d)\n", errno); 2167 rc = false; 2168 } else { 2169 trcVerbose("ok."); 2170 rc = true; 2171 } 2172 2173 return rc; 2174 } 2175 2176 int os::vm_page_size() { 2177 // Seems redundant as all get out. 2178 assert(os::Aix::page_size() != -1, "must call os::init"); 2179 return os::Aix::page_size(); 2180 } 2181 2182 // Aix allocates memory by pages. 2183 int os::vm_allocation_granularity() { 2184 assert(os::Aix::page_size() != -1, "must call os::init"); 2185 return os::Aix::page_size(); 2186 } 2187 2188 #ifdef PRODUCT 2189 static void warn_fail_commit_memory(char* addr, size_t size, bool exec, 2190 int err) { 2191 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2192 ", %d) failed; error='%s' (errno=%d)", addr, size, exec, 2193 os::errno_name(err), err); 2194 } 2195 #endif 2196 2197 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 2198 const char* mesg) { 2199 assert(mesg != NULL, "mesg must be specified"); 2200 if (!pd_commit_memory(addr, size, exec)) { 2201 // Add extra info in product mode for vm_exit_out_of_memory(): 2202 PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);) 2203 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 2204 } 2205 } 2206 2207 bool os::pd_commit_memory(char* addr, size_t size, bool exec) { 2208 2209 assert(is_aligned_to(addr, os::vm_page_size()), 2210 "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2211 p2i(addr), os::vm_page_size()); 2212 assert(is_aligned_to(size, os::vm_page_size()), 2213 "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2214 size, os::vm_page_size()); 2215 2216 vmembk_t* const vmi = vmembk_find(addr); 2217 guarantee0(vmi); 2218 vmi->assert_is_valid_subrange(addr, size); 2219 2220 trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1); 2221 2222 if (UseExplicitCommit) { 2223 // AIX commits memory on touch. So, touch all pages to be committed. 2224 for (char* p = addr; p < (addr + size); p += 4*K) { 2225 *p = '\0'; 2226 } 2227 } 2228 2229 return true; 2230 } 2231 2232 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { 2233 return pd_commit_memory(addr, size, exec); 2234 } 2235 2236 void os::pd_commit_memory_or_exit(char* addr, size_t size, 2237 size_t alignment_hint, bool exec, 2238 const char* mesg) { 2239 // Alignment_hint is ignored on this OS. 2240 pd_commit_memory_or_exit(addr, size, exec, mesg); 2241 } 2242 2243 bool os::pd_uncommit_memory(char* addr, size_t size) { 2244 assert(is_aligned_to(addr, os::vm_page_size()), 2245 "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2246 p2i(addr), os::vm_page_size()); 2247 assert(is_aligned_to(size, os::vm_page_size()), 2248 "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", 2249 size, os::vm_page_size()); 2250 2251 // Dynamically do different things for mmap/shmat. 2252 const vmembk_t* const vmi = vmembk_find(addr); 2253 guarantee0(vmi); 2254 vmi->assert_is_valid_subrange(addr, size); 2255 2256 if (vmi->type == VMEM_SHMATED) { 2257 return uncommit_shmated_memory(addr, size); 2258 } else { 2259 return uncommit_mmaped_memory(addr, size); 2260 } 2261 } 2262 2263 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2264 // Do not call this; no need to commit stack pages on AIX. 2265 ShouldNotReachHere(); 2266 return true; 2267 } 2268 2269 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2270 // Do not call this; no need to commit stack pages on AIX. 2271 ShouldNotReachHere(); 2272 return true; 2273 } 2274 2275 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2276 } 2277 2278 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { 2279 } 2280 2281 void os::numa_make_global(char *addr, size_t bytes) { 2282 } 2283 2284 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2285 } 2286 2287 bool os::numa_topology_changed() { 2288 return false; 2289 } 2290 2291 size_t os::numa_get_groups_num() { 2292 return 1; 2293 } 2294 2295 int os::numa_get_group_id() { 2296 return 0; 2297 } 2298 2299 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2300 if (size > 0) { 2301 ids[0] = 0; 2302 return 1; 2303 } 2304 return 0; 2305 } 2306 2307 bool os::get_page_info(char *start, page_info* info) { 2308 return false; 2309 } 2310 2311 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2312 return end; 2313 } 2314 2315 // Reserves and attaches a shared memory segment. 2316 // Will assert if a wish address is given and could not be obtained. 2317 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 2318 2319 // All other Unices do a mmap(MAP_FIXED) if the addr is given, 2320 // thereby clobbering old mappings at that place. That is probably 2321 // not intended, never used and almost certainly an error were it 2322 // ever be used this way (to try attaching at a specified address 2323 // without clobbering old mappings an alternate API exists, 2324 // os::attempt_reserve_memory_at()). 2325 // Instead of mimicking the dangerous coding of the other platforms, here I 2326 // just ignore the request address (release) or assert(debug). 2327 assert0(requested_addr == NULL); 2328 2329 // Always round to os::vm_page_size(), which may be larger than 4K. 2330 bytes = align_size_up(bytes, os::vm_page_size()); 2331 const size_t alignment_hint0 = 2332 alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0; 2333 2334 // In 4K mode always use mmap. 2335 // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. 2336 if (os::vm_page_size() == 4*K) { 2337 return reserve_mmaped_memory(bytes, requested_addr, alignment_hint); 2338 } else { 2339 if (bytes >= Use64KPagesThreshold) { 2340 return reserve_shmated_memory(bytes, requested_addr, alignment_hint); 2341 } else { 2342 return reserve_mmaped_memory(bytes, requested_addr, alignment_hint); 2343 } 2344 } 2345 } 2346 2347 bool os::pd_release_memory(char* addr, size_t size) { 2348 2349 // Dynamically do different things for mmap/shmat. 2350 vmembk_t* const vmi = vmembk_find(addr); 2351 guarantee0(vmi); 2352 2353 // Always round to os::vm_page_size(), which may be larger than 4K. 2354 size = align_size_up(size, os::vm_page_size()); 2355 addr = (char *)align_ptr_up(addr, os::vm_page_size()); 2356 2357 bool rc = false; 2358 bool remove_bookkeeping = false; 2359 if (vmi->type == VMEM_SHMATED) { 2360 // For shmatted memory, we do: 2361 // - If user wants to release the whole range, release the memory (shmdt). 2362 // - If user only wants to release a partial range, uncommit (disclaim) that 2363 // range. That way, at least, we do not use memory anymore (bust still page 2364 // table space). 2365 vmi->assert_is_valid_subrange(addr, size); 2366 if (addr == vmi->addr && size == vmi->size) { 2367 rc = release_shmated_memory(addr, size); 2368 remove_bookkeeping = true; 2369 } else { 2370 rc = uncommit_shmated_memory(addr, size); 2371 } 2372 } else { 2373 // User may unmap partial regions but region has to be fully contained. 2374 #ifdef ASSERT 2375 vmi->assert_is_valid_subrange(addr, size); 2376 #endif 2377 rc = release_mmaped_memory(addr, size); 2378 remove_bookkeeping = true; 2379 } 2380 2381 // update bookkeeping 2382 if (rc && remove_bookkeeping) { 2383 vmembk_remove(vmi); 2384 } 2385 2386 return rc; 2387 } 2388 2389 static bool checked_mprotect(char* addr, size_t size, int prot) { 2390 2391 // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will 2392 // not tell me if protection failed when trying to protect an un-protectable range. 2393 // 2394 // This means if the memory was allocated using shmget/shmat, protection wont work 2395 // but mprotect will still return 0: 2396 // 2397 // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm 2398 2399 bool rc = ::mprotect(addr, size, prot) == 0 ? true : false; 2400 2401 if (!rc) { 2402 const char* const s_errno = os::errno_name(errno); 2403 warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno); 2404 return false; 2405 } 2406 2407 // mprotect success check 2408 // 2409 // Mprotect said it changed the protection but can I believe it? 2410 // 2411 // To be sure I need to check the protection afterwards. Try to 2412 // read from protected memory and check whether that causes a segfault. 2413 // 2414 if (!os::Aix::xpg_sus_mode()) { 2415 2416 if (CanUseSafeFetch32()) { 2417 2418 const bool read_protected = 2419 (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 && 2420 SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false; 2421 2422 if (prot & PROT_READ) { 2423 rc = !read_protected; 2424 } else { 2425 rc = read_protected; 2426 } 2427 2428 if (!rc) { 2429 if (os::Aix::on_pase()) { 2430 // There is an issue on older PASE systems where mprotect() will return success but the 2431 // memory will not be protected. 2432 // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible 2433 // machines; we only see it rarely, when using mprotect() to protect the guard page of 2434 // a stack. It is an OS error. 2435 // 2436 // A valid strategy is just to try again. This usually works. :-/ 2437 2438 ::usleep(1000); 2439 if (::mprotect(addr, size, prot) == 0) { 2440 const bool read_protected_2 = 2441 (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 && 2442 SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false; 2443 rc = true; 2444 } 2445 } 2446 } 2447 } 2448 } 2449 2450 assert(rc == true, "mprotect failed."); 2451 2452 return rc; 2453 } 2454 2455 // Set protections specified 2456 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) { 2457 unsigned int p = 0; 2458 switch (prot) { 2459 case MEM_PROT_NONE: p = PROT_NONE; break; 2460 case MEM_PROT_READ: p = PROT_READ; break; 2461 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 2462 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 2463 default: 2464 ShouldNotReachHere(); 2465 } 2466 // is_committed is unused. 2467 return checked_mprotect(addr, size, p); 2468 } 2469 2470 bool os::guard_memory(char* addr, size_t size) { 2471 return checked_mprotect(addr, size, PROT_NONE); 2472 } 2473 2474 bool os::unguard_memory(char* addr, size_t size) { 2475 return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC); 2476 } 2477 2478 // Large page support 2479 2480 static size_t _large_page_size = 0; 2481 2482 // Enable large page support if OS allows that. 2483 void os::large_page_init() { 2484 return; // Nothing to do. See query_multipage_support and friends. 2485 } 2486 2487 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) { 2488 // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement 2489 // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()), 2490 // so this is not needed. 2491 assert(false, "should not be called on AIX"); 2492 return NULL; 2493 } 2494 2495 bool os::release_memory_special(char* base, size_t bytes) { 2496 // Detaching the SHM segment will also delete it, see reserve_memory_special(). 2497 Unimplemented(); 2498 return false; 2499 } 2500 2501 size_t os::large_page_size() { 2502 return _large_page_size; 2503 } 2504 2505 bool os::can_commit_large_page_memory() { 2506 // Does not matter, we do not support huge pages. 2507 return false; 2508 } 2509 2510 bool os::can_execute_large_page_memory() { 2511 // Does not matter, we do not support huge pages. 2512 return false; 2513 } 2514 2515 // Reserve memory at an arbitrary address, only if that area is 2516 // available (and not reserved for something else). 2517 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2518 char* addr = NULL; 2519 2520 // Always round to os::vm_page_size(), which may be larger than 4K. 2521 bytes = align_size_up(bytes, os::vm_page_size()); 2522 2523 // In 4K mode always use mmap. 2524 // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. 2525 if (os::vm_page_size() == 4*K) { 2526 return reserve_mmaped_memory(bytes, requested_addr, 0); 2527 } else { 2528 if (bytes >= Use64KPagesThreshold) { 2529 return reserve_shmated_memory(bytes, requested_addr, 0); 2530 } else { 2531 return reserve_mmaped_memory(bytes, requested_addr, 0); 2532 } 2533 } 2534 2535 return addr; 2536 } 2537 2538 size_t os::read(int fd, void *buf, unsigned int nBytes) { 2539 return ::read(fd, buf, nBytes); 2540 } 2541 2542 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 2543 return ::pread(fd, buf, nBytes, offset); 2544 } 2545 2546 void os::naked_short_sleep(jlong ms) { 2547 struct timespec req; 2548 2549 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 2550 req.tv_sec = 0; 2551 if (ms > 0) { 2552 req.tv_nsec = (ms % 1000) * 1000000; 2553 } 2554 else { 2555 req.tv_nsec = 1; 2556 } 2557 2558 nanosleep(&req, NULL); 2559 2560 return; 2561 } 2562 2563 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 2564 void os::infinite_sleep() { 2565 while (true) { // sleep forever ... 2566 ::sleep(100); // ... 100 seconds at a time 2567 } 2568 } 2569 2570 // Used to convert frequent JVM_Yield() to nops 2571 bool os::dont_yield() { 2572 return DontYieldALot; 2573 } 2574 2575 void os::naked_yield() { 2576 sched_yield(); 2577 } 2578 2579 //////////////////////////////////////////////////////////////////////////////// 2580 // thread priority support 2581 2582 // From AIX manpage to pthread_setschedparam 2583 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp? 2584 // topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm): 2585 // 2586 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the 2587 // range from 40 to 80, where 40 is the least favored priority and 80 2588 // is the most favored." 2589 // 2590 // (Actually, I doubt this even has an impact on AIX, as we do kernel 2591 // scheduling there; however, this still leaves iSeries.) 2592 // 2593 // We use the same values for AIX and PASE. 2594 int os::java_to_os_priority[CriticalPriority + 1] = { 2595 54, // 0 Entry should never be used 2596 2597 55, // 1 MinPriority 2598 55, // 2 2599 56, // 3 2600 2601 56, // 4 2602 57, // 5 NormPriority 2603 57, // 6 2604 2605 58, // 7 2606 58, // 8 2607 59, // 9 NearMaxPriority 2608 2609 60, // 10 MaxPriority 2610 2611 60 // 11 CriticalPriority 2612 }; 2613 2614 OSReturn os::set_native_priority(Thread* thread, int newpri) { 2615 if (!UseThreadPriorities) return OS_OK; 2616 pthread_t thr = thread->osthread()->pthread_id(); 2617 int policy = SCHED_OTHER; 2618 struct sched_param param; 2619 param.sched_priority = newpri; 2620 int ret = pthread_setschedparam(thr, policy, ¶m); 2621 2622 if (ret != 0) { 2623 trcVerbose("Could not change priority for thread %d to %d (error %d, %s)", 2624 (int)thr, newpri, ret, os::errno_name(ret)); 2625 } 2626 return (ret == 0) ? OS_OK : OS_ERR; 2627 } 2628 2629 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 2630 if (!UseThreadPriorities) { 2631 *priority_ptr = java_to_os_priority[NormPriority]; 2632 return OS_OK; 2633 } 2634 pthread_t thr = thread->osthread()->pthread_id(); 2635 int policy = SCHED_OTHER; 2636 struct sched_param param; 2637 int ret = pthread_getschedparam(thr, &policy, ¶m); 2638 *priority_ptr = param.sched_priority; 2639 2640 return (ret == 0) ? OS_OK : OS_ERR; 2641 } 2642 2643 // Hint to the underlying OS that a task switch would not be good. 2644 // Void return because it's a hint and can fail. 2645 void os::hint_no_preempt() {} 2646 2647 //////////////////////////////////////////////////////////////////////////////// 2648 // suspend/resume support 2649 2650 // the low-level signal-based suspend/resume support is a remnant from the 2651 // old VM-suspension that used to be for java-suspension, safepoints etc, 2652 // within hotspot. Now there is a single use-case for this: 2653 // - calling get_thread_pc() on the VMThread by the flat-profiler task 2654 // that runs in the watcher thread. 2655 // The remaining code is greatly simplified from the more general suspension 2656 // code that used to be used. 2657 // 2658 // The protocol is quite simple: 2659 // - suspend: 2660 // - sends a signal to the target thread 2661 // - polls the suspend state of the osthread using a yield loop 2662 // - target thread signal handler (SR_handler) sets suspend state 2663 // and blocks in sigsuspend until continued 2664 // - resume: 2665 // - sets target osthread state to continue 2666 // - sends signal to end the sigsuspend loop in the SR_handler 2667 // 2668 // Note that the SR_lock plays no role in this suspend/resume protocol, 2669 // but is checked for NULL in SR_handler as a thread termination indicator. 2670 // 2671 2672 static void resume_clear_context(OSThread *osthread) { 2673 osthread->set_ucontext(NULL); 2674 osthread->set_siginfo(NULL); 2675 } 2676 2677 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { 2678 osthread->set_ucontext(context); 2679 osthread->set_siginfo(siginfo); 2680 } 2681 2682 // 2683 // Handler function invoked when a thread's execution is suspended or 2684 // resumed. We have to be careful that only async-safe functions are 2685 // called here (Note: most pthread functions are not async safe and 2686 // should be avoided.) 2687 // 2688 // Note: sigwait() is a more natural fit than sigsuspend() from an 2689 // interface point of view, but sigwait() prevents the signal hander 2690 // from being run. libpthread would get very confused by not having 2691 // its signal handlers run and prevents sigwait()'s use with the 2692 // mutex granting granting signal. 2693 // 2694 // Currently only ever called on the VMThread and JavaThreads (PC sampling). 2695 // 2696 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { 2697 // Save and restore errno to avoid confusing native code with EINTR 2698 // after sigsuspend. 2699 int old_errno = errno; 2700 2701 Thread* thread = Thread::current_or_null_safe(); 2702 assert(thread != NULL, "Missing current thread in SR_handler"); 2703 2704 // On some systems we have seen signal delivery get "stuck" until the signal 2705 // mask is changed as part of thread termination. Check that the current thread 2706 // has not already terminated (via SR_lock()) - else the following assertion 2707 // will fail because the thread is no longer a JavaThread as the ~JavaThread 2708 // destructor has completed. 2709 2710 if (thread->SR_lock() == NULL) { 2711 return; 2712 } 2713 2714 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 2715 2716 OSThread* osthread = thread->osthread(); 2717 2718 os::SuspendResume::State current = osthread->sr.state(); 2719 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 2720 suspend_save_context(osthread, siginfo, context); 2721 2722 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 2723 os::SuspendResume::State state = osthread->sr.suspended(); 2724 if (state == os::SuspendResume::SR_SUSPENDED) { 2725 sigset_t suspend_set; // signals for sigsuspend() 2726 2727 // get current set of blocked signals and unblock resume signal 2728 pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); 2729 sigdelset(&suspend_set, SR_signum); 2730 2731 // wait here until we are resumed 2732 while (1) { 2733 sigsuspend(&suspend_set); 2734 2735 os::SuspendResume::State result = osthread->sr.running(); 2736 if (result == os::SuspendResume::SR_RUNNING) { 2737 break; 2738 } 2739 } 2740 2741 } else if (state == os::SuspendResume::SR_RUNNING) { 2742 // request was cancelled, continue 2743 } else { 2744 ShouldNotReachHere(); 2745 } 2746 2747 resume_clear_context(osthread); 2748 } else if (current == os::SuspendResume::SR_RUNNING) { 2749 // request was cancelled, continue 2750 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 2751 // ignore 2752 } else { 2753 ShouldNotReachHere(); 2754 } 2755 2756 errno = old_errno; 2757 } 2758 2759 static int SR_initialize() { 2760 struct sigaction act; 2761 char *s; 2762 // Get signal number to use for suspend/resume 2763 if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) { 2764 int sig = ::strtol(s, 0, 10); 2765 if (sig > MAX2(SIGSEGV, SIGBUS) && // See 4355769. 2766 sig < NSIG) { // Must be legal signal and fit into sigflags[]. 2767 SR_signum = sig; 2768 } else { 2769 warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.", 2770 sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum); 2771 } 2772 } 2773 2774 assert(SR_signum > SIGSEGV && SR_signum > SIGBUS, 2775 "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769"); 2776 2777 sigemptyset(&SR_sigset); 2778 sigaddset(&SR_sigset, SR_signum); 2779 2780 // Set up signal handler for suspend/resume. 2781 act.sa_flags = SA_RESTART|SA_SIGINFO; 2782 act.sa_handler = (void (*)(int)) SR_handler; 2783 2784 // SR_signum is blocked by default. 2785 pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask); 2786 2787 if (sigaction(SR_signum, &act, 0) == -1) { 2788 return -1; 2789 } 2790 2791 // Save signal flag 2792 os::Aix::set_our_sigflags(SR_signum, act.sa_flags); 2793 return 0; 2794 } 2795 2796 static int SR_finalize() { 2797 return 0; 2798 } 2799 2800 static int sr_notify(OSThread* osthread) { 2801 int status = pthread_kill(osthread->pthread_id(), SR_signum); 2802 assert_status(status == 0, status, "pthread_kill"); 2803 return status; 2804 } 2805 2806 // "Randomly" selected value for how long we want to spin 2807 // before bailing out on suspending a thread, also how often 2808 // we send a signal to a thread we want to resume 2809 static const int RANDOMLY_LARGE_INTEGER = 1000000; 2810 static const int RANDOMLY_LARGE_INTEGER2 = 100; 2811 2812 // returns true on success and false on error - really an error is fatal 2813 // but this seems the normal response to library errors 2814 static bool do_suspend(OSThread* osthread) { 2815 assert(osthread->sr.is_running(), "thread should be running"); 2816 // mark as suspended and send signal 2817 2818 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 2819 // failed to switch, state wasn't running? 2820 ShouldNotReachHere(); 2821 return false; 2822 } 2823 2824 if (sr_notify(osthread) != 0) { 2825 // try to cancel, switch to running 2826 2827 os::SuspendResume::State result = osthread->sr.cancel_suspend(); 2828 if (result == os::SuspendResume::SR_RUNNING) { 2829 // cancelled 2830 return false; 2831 } else if (result == os::SuspendResume::SR_SUSPENDED) { 2832 // somehow managed to suspend 2833 return true; 2834 } else { 2835 ShouldNotReachHere(); 2836 return false; 2837 } 2838 } 2839 2840 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 2841 2842 for (int n = 0; !osthread->sr.is_suspended(); n++) { 2843 for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) { 2844 os::naked_yield(); 2845 } 2846 2847 // timeout, try to cancel the request 2848 if (n >= RANDOMLY_LARGE_INTEGER) { 2849 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 2850 if (cancelled == os::SuspendResume::SR_RUNNING) { 2851 return false; 2852 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 2853 return true; 2854 } else { 2855 ShouldNotReachHere(); 2856 return false; 2857 } 2858 } 2859 } 2860 2861 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 2862 return true; 2863 } 2864 2865 static void do_resume(OSThread* osthread) { 2866 //assert(osthread->sr.is_suspended(), "thread should be suspended"); 2867 2868 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 2869 // failed to switch to WAKEUP_REQUEST 2870 ShouldNotReachHere(); 2871 return; 2872 } 2873 2874 while (!osthread->sr.is_running()) { 2875 if (sr_notify(osthread) == 0) { 2876 for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) { 2877 for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) { 2878 os::naked_yield(); 2879 } 2880 } 2881 } else { 2882 ShouldNotReachHere(); 2883 } 2884 } 2885 2886 guarantee(osthread->sr.is_running(), "Must be running!"); 2887 } 2888 2889 /////////////////////////////////////////////////////////////////////////////////// 2890 // signal handling (except suspend/resume) 2891 2892 // This routine may be used by user applications as a "hook" to catch signals. 2893 // The user-defined signal handler must pass unrecognized signals to this 2894 // routine, and if it returns true (non-zero), then the signal handler must 2895 // return immediately. If the flag "abort_if_unrecognized" is true, then this 2896 // routine will never retun false (zero), but instead will execute a VM panic 2897 // routine kill the process. 2898 // 2899 // If this routine returns false, it is OK to call it again. This allows 2900 // the user-defined signal handler to perform checks either before or after 2901 // the VM performs its own checks. Naturally, the user code would be making 2902 // a serious error if it tried to handle an exception (such as a null check 2903 // or breakpoint) that the VM was generating for its own correct operation. 2904 // 2905 // This routine may recognize any of the following kinds of signals: 2906 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1. 2907 // It should be consulted by handlers for any of those signals. 2908 // 2909 // The caller of this routine must pass in the three arguments supplied 2910 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 2911 // field of the structure passed to sigaction(). This routine assumes that 2912 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 2913 // 2914 // Note that the VM will print warnings if it detects conflicting signal 2915 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 2916 // 2917 extern "C" JNIEXPORT int 2918 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); 2919 2920 // Set thread signal mask (for some reason on AIX sigthreadmask() seems 2921 // to be the thing to call; documentation is not terribly clear about whether 2922 // pthread_sigmask also works, and if it does, whether it does the same. 2923 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) { 2924 const int rc = ::pthread_sigmask(how, set, oset); 2925 // return value semantics differ slightly for error case: 2926 // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno 2927 // (so, pthread_sigmask is more theadsafe for error handling) 2928 // But success is always 0. 2929 return rc == 0 ? true : false; 2930 } 2931 2932 // Function to unblock all signals which are, according 2933 // to POSIX, typical program error signals. If they happen while being blocked, 2934 // they typically will bring down the process immediately. 2935 bool unblock_program_error_signals() { 2936 sigset_t set; 2937 ::sigemptyset(&set); 2938 ::sigaddset(&set, SIGILL); 2939 ::sigaddset(&set, SIGBUS); 2940 ::sigaddset(&set, SIGFPE); 2941 ::sigaddset(&set, SIGSEGV); 2942 return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL); 2943 } 2944 2945 // Renamed from 'signalHandler' to avoid collision with other shared libs. 2946 void javaSignalHandler(int sig, siginfo_t* info, void* uc) { 2947 assert(info != NULL && uc != NULL, "it must be old kernel"); 2948 2949 // Never leave program error signals blocked; 2950 // on all our platforms they would bring down the process immediately when 2951 // getting raised while being blocked. 2952 unblock_program_error_signals(); 2953 2954 int orig_errno = errno; // Preserve errno value over signal handler. 2955 JVM_handle_aix_signal(sig, info, uc, true); 2956 errno = orig_errno; 2957 } 2958 2959 // This boolean allows users to forward their own non-matching signals 2960 // to JVM_handle_aix_signal, harmlessly. 2961 bool os::Aix::signal_handlers_are_installed = false; 2962 2963 // For signal-chaining 2964 struct sigaction sigact[NSIG]; 2965 sigset_t sigs; 2966 bool os::Aix::libjsig_is_loaded = false; 2967 typedef struct sigaction *(*get_signal_t)(int); 2968 get_signal_t os::Aix::get_signal_action = NULL; 2969 2970 struct sigaction* os::Aix::get_chained_signal_action(int sig) { 2971 struct sigaction *actp = NULL; 2972 2973 if (libjsig_is_loaded) { 2974 // Retrieve the old signal handler from libjsig 2975 actp = (*get_signal_action)(sig); 2976 } 2977 if (actp == NULL) { 2978 // Retrieve the preinstalled signal handler from jvm 2979 actp = get_preinstalled_handler(sig); 2980 } 2981 2982 return actp; 2983 } 2984 2985 static bool call_chained_handler(struct sigaction *actp, int sig, 2986 siginfo_t *siginfo, void *context) { 2987 // Call the old signal handler 2988 if (actp->sa_handler == SIG_DFL) { 2989 // It's more reasonable to let jvm treat it as an unexpected exception 2990 // instead of taking the default action. 2991 return false; 2992 } else if (actp->sa_handler != SIG_IGN) { 2993 if ((actp->sa_flags & SA_NODEFER) == 0) { 2994 // automaticlly block the signal 2995 sigaddset(&(actp->sa_mask), sig); 2996 } 2997 2998 sa_handler_t hand = NULL; 2999 sa_sigaction_t sa = NULL; 3000 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 3001 // retrieve the chained handler 3002 if (siginfo_flag_set) { 3003 sa = actp->sa_sigaction; 3004 } else { 3005 hand = actp->sa_handler; 3006 } 3007 3008 if ((actp->sa_flags & SA_RESETHAND) != 0) { 3009 actp->sa_handler = SIG_DFL; 3010 } 3011 3012 // try to honor the signal mask 3013 sigset_t oset; 3014 pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset); 3015 3016 // call into the chained handler 3017 if (siginfo_flag_set) { 3018 (*sa)(sig, siginfo, context); 3019 } else { 3020 (*hand)(sig); 3021 } 3022 3023 // restore the signal mask 3024 pthread_sigmask(SIG_SETMASK, &oset, 0); 3025 } 3026 // Tell jvm's signal handler the signal is taken care of. 3027 return true; 3028 } 3029 3030 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) { 3031 bool chained = false; 3032 // signal-chaining 3033 if (UseSignalChaining) { 3034 struct sigaction *actp = get_chained_signal_action(sig); 3035 if (actp != NULL) { 3036 chained = call_chained_handler(actp, sig, siginfo, context); 3037 } 3038 } 3039 return chained; 3040 } 3041 3042 size_t os::Aix::default_guard_size(os::ThreadType thr_type) { 3043 // Creating guard pages is very expensive. Java thread has HotSpot 3044 // guard pages, so only enable libc guard pages for non-Java threads. 3045 // 3046 // Aix can have different page sizes for stack (4K) and heap (64K). 3047 // As Hotspot knows only one page size, we assume the stack has 3048 // the same page size as the heap. Returning page_size() here can 3049 // cause 16 guard pages which we want to avoid. Thus we return 4K 3050 // which will be rounded to the real page size by the OS. 3051 return ((thr_type == java_thread || thr_type == os::compiler_thread) ? 0 : 4*K); 3052 } 3053 3054 struct sigaction* os::Aix::get_preinstalled_handler(int sig) { 3055 if (sigismember(&sigs, sig)) { 3056 return &sigact[sig]; 3057 } 3058 return NULL; 3059 } 3060 3061 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 3062 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3063 sigact[sig] = oldAct; 3064 sigaddset(&sigs, sig); 3065 } 3066 3067 // for diagnostic 3068 int sigflags[NSIG]; 3069 3070 int os::Aix::get_our_sigflags(int sig) { 3071 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3072 return sigflags[sig]; 3073 } 3074 3075 void os::Aix::set_our_sigflags(int sig, int flags) { 3076 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3077 if (sig > 0 && sig < NSIG) { 3078 sigflags[sig] = flags; 3079 } 3080 } 3081 3082 void os::Aix::set_signal_handler(int sig, bool set_installed) { 3083 // Check for overwrite. 3084 struct sigaction oldAct; 3085 sigaction(sig, (struct sigaction*)NULL, &oldAct); 3086 3087 void* oldhand = oldAct.sa_sigaction 3088 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3089 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3090 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 3091 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 3092 oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) { 3093 if (AllowUserSignalHandlers || !set_installed) { 3094 // Do not overwrite; user takes responsibility to forward to us. 3095 return; 3096 } else if (UseSignalChaining) { 3097 // save the old handler in jvm 3098 save_preinstalled_handler(sig, oldAct); 3099 // libjsig also interposes the sigaction() call below and saves the 3100 // old sigaction on it own. 3101 } else { 3102 fatal("Encountered unexpected pre-existing sigaction handler " 3103 "%#lx for signal %d.", (long)oldhand, sig); 3104 } 3105 } 3106 3107 struct sigaction sigAct; 3108 sigfillset(&(sigAct.sa_mask)); 3109 if (!set_installed) { 3110 sigAct.sa_handler = SIG_DFL; 3111 sigAct.sa_flags = SA_RESTART; 3112 } else { 3113 sigAct.sa_sigaction = javaSignalHandler; 3114 sigAct.sa_flags = SA_SIGINFO|SA_RESTART; 3115 } 3116 // Save flags, which are set by ours 3117 assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); 3118 sigflags[sig] = sigAct.sa_flags; 3119 3120 int ret = sigaction(sig, &sigAct, &oldAct); 3121 assert(ret == 0, "check"); 3122 3123 void* oldhand2 = oldAct.sa_sigaction 3124 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3125 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3126 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 3127 } 3128 3129 // install signal handlers for signals that HotSpot needs to 3130 // handle in order to support Java-level exception handling. 3131 void os::Aix::install_signal_handlers() { 3132 if (!signal_handlers_are_installed) { 3133 signal_handlers_are_installed = true; 3134 3135 // signal-chaining 3136 typedef void (*signal_setting_t)(); 3137 signal_setting_t begin_signal_setting = NULL; 3138 signal_setting_t end_signal_setting = NULL; 3139 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 3140 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 3141 if (begin_signal_setting != NULL) { 3142 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 3143 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 3144 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 3145 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 3146 libjsig_is_loaded = true; 3147 assert(UseSignalChaining, "should enable signal-chaining"); 3148 } 3149 if (libjsig_is_loaded) { 3150 // Tell libjsig jvm is setting signal handlers. 3151 (*begin_signal_setting)(); 3152 } 3153 3154 ::sigemptyset(&sigs); 3155 set_signal_handler(SIGSEGV, true); 3156 set_signal_handler(SIGPIPE, true); 3157 set_signal_handler(SIGBUS, true); 3158 set_signal_handler(SIGILL, true); 3159 set_signal_handler(SIGFPE, true); 3160 set_signal_handler(SIGTRAP, true); 3161 set_signal_handler(SIGXFSZ, true); 3162 set_signal_handler(SIGDANGER, true); 3163 3164 if (libjsig_is_loaded) { 3165 // Tell libjsig jvm finishes setting signal handlers. 3166 (*end_signal_setting)(); 3167 } 3168 3169 // We don't activate signal checker if libjsig is in place, we trust ourselves 3170 // and if UserSignalHandler is installed all bets are off. 3171 // Log that signal checking is off only if -verbose:jni is specified. 3172 if (CheckJNICalls) { 3173 if (libjsig_is_loaded) { 3174 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 3175 check_signals = false; 3176 } 3177 if (AllowUserSignalHandlers) { 3178 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 3179 check_signals = false; 3180 } 3181 // Need to initialize check_signal_done. 3182 ::sigemptyset(&check_signal_done); 3183 } 3184 } 3185 } 3186 3187 static const char* get_signal_handler_name(address handler, 3188 char* buf, int buflen) { 3189 int offset; 3190 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 3191 if (found) { 3192 // skip directory names 3193 const char *p1, *p2; 3194 p1 = buf; 3195 size_t len = strlen(os::file_separator()); 3196 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 3197 // The way os::dll_address_to_library_name is implemented on Aix 3198 // right now, it always returns -1 for the offset which is not 3199 // terribly informative. 3200 // Will fix that. For now, omit the offset. 3201 jio_snprintf(buf, buflen, "%s", p1); 3202 } else { 3203 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 3204 } 3205 return buf; 3206 } 3207 3208 static void print_signal_handler(outputStream* st, int sig, 3209 char* buf, size_t buflen) { 3210 struct sigaction sa; 3211 sigaction(sig, NULL, &sa); 3212 3213 st->print("%s: ", os::exception_name(sig, buf, buflen)); 3214 3215 address handler = (sa.sa_flags & SA_SIGINFO) 3216 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 3217 : CAST_FROM_FN_PTR(address, sa.sa_handler); 3218 3219 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 3220 st->print("SIG_DFL"); 3221 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 3222 st->print("SIG_IGN"); 3223 } else { 3224 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 3225 } 3226 3227 // Print readable mask. 3228 st->print(", sa_mask[0]="); 3229 os::Posix::print_signal_set_short(st, &sa.sa_mask); 3230 3231 address rh = VMError::get_resetted_sighandler(sig); 3232 // May be, handler was resetted by VMError? 3233 if (rh != NULL) { 3234 handler = rh; 3235 sa.sa_flags = VMError::get_resetted_sigflags(sig); 3236 } 3237 3238 // Print textual representation of sa_flags. 3239 st->print(", sa_flags="); 3240 os::Posix::print_sa_flags(st, sa.sa_flags); 3241 3242 // Check: is it our handler? 3243 if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) || 3244 handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) { 3245 // It is our signal handler. 3246 // Check for flags, reset system-used one! 3247 if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) { 3248 st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library", 3249 os::Aix::get_our_sigflags(sig)); 3250 } 3251 } 3252 st->cr(); 3253 } 3254 3255 #define DO_SIGNAL_CHECK(sig) \ 3256 if (!sigismember(&check_signal_done, sig)) \ 3257 os::Aix::check_signal_handler(sig) 3258 3259 // This method is a periodic task to check for misbehaving JNI applications 3260 // under CheckJNI, we can add any periodic checks here 3261 3262 void os::run_periodic_checks() { 3263 3264 if (check_signals == false) return; 3265 3266 // SEGV and BUS if overridden could potentially prevent 3267 // generation of hs*.log in the event of a crash, debugging 3268 // such a case can be very challenging, so we absolutely 3269 // check the following for a good measure: 3270 DO_SIGNAL_CHECK(SIGSEGV); 3271 DO_SIGNAL_CHECK(SIGILL); 3272 DO_SIGNAL_CHECK(SIGFPE); 3273 DO_SIGNAL_CHECK(SIGBUS); 3274 DO_SIGNAL_CHECK(SIGPIPE); 3275 DO_SIGNAL_CHECK(SIGXFSZ); 3276 if (UseSIGTRAP) { 3277 DO_SIGNAL_CHECK(SIGTRAP); 3278 } 3279 DO_SIGNAL_CHECK(SIGDANGER); 3280 3281 // ReduceSignalUsage allows the user to override these handlers 3282 // see comments at the very top and jvm_solaris.h 3283 if (!ReduceSignalUsage) { 3284 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 3285 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 3286 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 3287 DO_SIGNAL_CHECK(BREAK_SIGNAL); 3288 } 3289 3290 DO_SIGNAL_CHECK(SR_signum); 3291 } 3292 3293 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 3294 3295 static os_sigaction_t os_sigaction = NULL; 3296 3297 void os::Aix::check_signal_handler(int sig) { 3298 char buf[O_BUFLEN]; 3299 address jvmHandler = NULL; 3300 3301 struct sigaction act; 3302 if (os_sigaction == NULL) { 3303 // only trust the default sigaction, in case it has been interposed 3304 os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction")); 3305 if (os_sigaction == NULL) return; 3306 } 3307 3308 os_sigaction(sig, (struct sigaction*)NULL, &act); 3309 3310 address thisHandler = (act.sa_flags & SA_SIGINFO) 3311 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 3312 : CAST_FROM_FN_PTR(address, act.sa_handler); 3313 3314 switch(sig) { 3315 case SIGSEGV: 3316 case SIGBUS: 3317 case SIGFPE: 3318 case SIGPIPE: 3319 case SIGILL: 3320 case SIGXFSZ: 3321 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler); 3322 break; 3323 3324 case SHUTDOWN1_SIGNAL: 3325 case SHUTDOWN2_SIGNAL: 3326 case SHUTDOWN3_SIGNAL: 3327 case BREAK_SIGNAL: 3328 jvmHandler = (address)user_handler(); 3329 break; 3330 3331 default: 3332 if (sig == SR_signum) { 3333 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler); 3334 } else { 3335 return; 3336 } 3337 break; 3338 } 3339 3340 if (thisHandler != jvmHandler) { 3341 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 3342 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 3343 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 3344 // No need to check this sig any longer 3345 sigaddset(&check_signal_done, sig); 3346 // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN 3347 if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) { 3348 tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell", 3349 exception_name(sig, buf, O_BUFLEN)); 3350 } 3351 } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) { 3352 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 3353 tty->print("expected:"); 3354 os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig)); 3355 tty->cr(); 3356 tty->print(" found:"); 3357 os::Posix::print_sa_flags(tty, act.sa_flags); 3358 tty->cr(); 3359 // No need to check this sig any longer 3360 sigaddset(&check_signal_done, sig); 3361 } 3362 3363 // Dump all the signal 3364 if (sigismember(&check_signal_done, sig)) { 3365 print_signal_handlers(tty, buf, O_BUFLEN); 3366 } 3367 } 3368 3369 // To install functions for atexit system call 3370 extern "C" { 3371 static void perfMemory_exit_helper() { 3372 perfMemory_exit(); 3373 } 3374 } 3375 3376 // This is called _before_ the most of global arguments have been parsed. 3377 void os::init(void) { 3378 // This is basic, we want to know if that ever changes. 3379 // (Shared memory boundary is supposed to be a 256M aligned.) 3380 assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected"); 3381 3382 // Record process break at startup. 3383 g_brk_at_startup = (address) ::sbrk(0); 3384 assert(g_brk_at_startup != (address) -1, "sbrk failed"); 3385 3386 // First off, we need to know whether we run on AIX or PASE, and 3387 // the OS level we run on. 3388 os::Aix::initialize_os_info(); 3389 3390 // Scan environment (SPEC1170 behaviour, etc). 3391 os::Aix::scan_environment(); 3392 3393 // Probe multipage support. 3394 query_multipage_support(); 3395 3396 // Act like we only have one page size by eliminating corner cases which 3397 // we did not support very well anyway. 3398 // We have two input conditions: 3399 // 1) Data segment page size. This is controlled by linker setting (datapsize) on the 3400 // launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker 3401 // setting. 3402 // Data segment page size is important for us because it defines the thread stack page 3403 // size, which is needed for guard page handling, stack banging etc. 3404 // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can 3405 // and should be allocated with 64k pages. 3406 // 3407 // So, we do the following: 3408 // LDR_CNTRL can_use_64K_pages_dynamically what we do remarks 3409 // 4K no 4K old systems (aix 5.2, as/400 v5r4) or new systems with AME activated 3410 // 4k yes 64k (treat 4k stacks as 64k) different loader than java and standard settings 3411 // 64k no --- AIX 5.2 ? --- 3412 // 64k yes 64k new systems and standard java loader (we set datapsize=64k when linking) 3413 3414 // We explicitly leave no option to change page size, because only upgrading would work, 3415 // not downgrading (if stack page size is 64k you cannot pretend its 4k). 3416 3417 if (g_multipage_support.datapsize == 4*K) { 3418 // datapsize = 4K. Data segment, thread stacks are 4K paged. 3419 if (g_multipage_support.can_use_64K_pages) { 3420 // .. but we are able to use 64K pages dynamically. 3421 // This would be typical for java launchers which are not linked 3422 // with datapsize=64K (like, any other launcher but our own). 3423 // 3424 // In this case it would be smart to allocate the java heap with 64K 3425 // to get the performance benefit, and to fake 64k pages for the 3426 // data segment (when dealing with thread stacks). 3427 // 3428 // However, leave a possibility to downgrade to 4K, using 3429 // -XX:-Use64KPages. 3430 if (Use64KPages) { 3431 trcVerbose("64K page mode (faked for data segment)"); 3432 Aix::_page_size = 64*K; 3433 } else { 3434 trcVerbose("4K page mode (Use64KPages=off)"); 3435 Aix::_page_size = 4*K; 3436 } 3437 } else { 3438 // .. and not able to allocate 64k pages dynamically. Here, just 3439 // fall back to 4K paged mode and use mmap for everything. 3440 trcVerbose("4K page mode"); 3441 Aix::_page_size = 4*K; 3442 FLAG_SET_ERGO(bool, Use64KPages, false); 3443 } 3444 } else { 3445 // datapsize = 64k. Data segment, thread stacks are 64k paged. 3446 // This normally means that we can allocate 64k pages dynamically. 3447 // (There is one special case where this may be false: EXTSHM=on. 3448 // but we decided to not support that mode). 3449 assert0(g_multipage_support.can_use_64K_pages); 3450 Aix::_page_size = 64*K; 3451 trcVerbose("64K page mode"); 3452 FLAG_SET_ERGO(bool, Use64KPages, true); 3453 } 3454 3455 // For now UseLargePages is just ignored. 3456 FLAG_SET_ERGO(bool, UseLargePages, false); 3457 _page_sizes[0] = 0; 3458 3459 // debug trace 3460 trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size())); 3461 3462 // Next, we need to initialize libo4 and libperfstat libraries. 3463 if (os::Aix::on_pase()) { 3464 os::Aix::initialize_libo4(); 3465 } else { 3466 os::Aix::initialize_libperfstat(); 3467 } 3468 3469 // Reset the perfstat information provided by ODM. 3470 if (os::Aix::on_aix()) { 3471 libperfstat::perfstat_reset(); 3472 } 3473 3474 // Now initialze basic system properties. Note that for some of the values we 3475 // need libperfstat etc. 3476 os::Aix::initialize_system_info(); 3477 3478 clock_tics_per_sec = sysconf(_SC_CLK_TCK); 3479 3480 init_random(1234567); 3481 3482 ThreadCritical::initialize(); 3483 3484 // Main_thread points to the aboriginal thread. 3485 Aix::_main_thread = pthread_self(); 3486 3487 initial_time_count = os::elapsed_counter(); 3488 } 3489 3490 // This is called _after_ the global arguments have been parsed. 3491 jint os::init_2(void) { 3492 3493 if (os::Aix::on_pase()) { 3494 trcVerbose("Running on PASE."); 3495 } else { 3496 trcVerbose("Running on AIX (not PASE)."); 3497 } 3498 3499 trcVerbose("processor count: %d", os::_processor_count); 3500 trcVerbose("physical memory: %lu", Aix::_physical_memory); 3501 3502 // Initially build up the loaded dll map. 3503 LoadedLibraries::reload(); 3504 if (Verbose) { 3505 trcVerbose("Loaded Libraries: "); 3506 LoadedLibraries::print(tty); 3507 } 3508 3509 const int page_size = Aix::page_size(); 3510 const int map_size = page_size; 3511 3512 address map_address = (address) MAP_FAILED; 3513 const int prot = PROT_READ; 3514 const int flags = MAP_PRIVATE|MAP_ANONYMOUS; 3515 3516 // Use optimized addresses for the polling page, 3517 // e.g. map it to a special 32-bit address. 3518 if (OptimizePollingPageLocation) { 3519 // architecture-specific list of address wishes: 3520 address address_wishes[] = { 3521 // AIX: addresses lower than 0x30000000 don't seem to work on AIX. 3522 // PPC64: all address wishes are non-negative 32 bit values where 3523 // the lower 16 bits are all zero. we can load these addresses 3524 // with a single ppc_lis instruction. 3525 (address) 0x30000000, (address) 0x31000000, 3526 (address) 0x32000000, (address) 0x33000000, 3527 (address) 0x40000000, (address) 0x41000000, 3528 (address) 0x42000000, (address) 0x43000000, 3529 (address) 0x50000000, (address) 0x51000000, 3530 (address) 0x52000000, (address) 0x53000000, 3531 (address) 0x60000000, (address) 0x61000000, 3532 (address) 0x62000000, (address) 0x63000000 3533 }; 3534 int address_wishes_length = sizeof(address_wishes)/sizeof(address); 3535 3536 // iterate over the list of address wishes: 3537 for (int i=0; i<address_wishes_length; i++) { 3538 // Try to map with current address wish. 3539 // AIX: AIX needs MAP_FIXED if we provide an address and mmap will 3540 // fail if the address is already mapped. 3541 map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size, 3542 map_size, prot, 3543 flags | MAP_FIXED, 3544 -1, 0); 3545 trcVerbose("SafePoint Polling Page address: %p (wish) => %p", 3546 address_wishes[i], map_address + (ssize_t)page_size); 3547 3548 if (map_address + (ssize_t)page_size == address_wishes[i]) { 3549 // Map succeeded and map_address is at wished address, exit loop. 3550 break; 3551 } 3552 3553 if (map_address != (address) MAP_FAILED) { 3554 // Map succeeded, but polling_page is not at wished address, unmap and continue. 3555 ::munmap(map_address, map_size); 3556 map_address = (address) MAP_FAILED; 3557 } 3558 // Map failed, continue loop. 3559 } 3560 } // end OptimizePollingPageLocation 3561 3562 if (map_address == (address) MAP_FAILED) { 3563 map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0); 3564 } 3565 guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page"); 3566 os::set_polling_page(map_address); 3567 3568 if (!UseMembar) { 3569 address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 3570 guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 3571 os::set_memory_serialize_page(mem_serialize_page); 3572 3573 trcVerbose("Memory Serialize Page address: %p - %p, size %IX (%IB)", 3574 mem_serialize_page, mem_serialize_page + Aix::page_size(), 3575 Aix::page_size(), Aix::page_size()); 3576 } 3577 3578 // initialize suspend/resume support - must do this before signal_sets_init() 3579 if (SR_initialize() != 0) { 3580 perror("SR_initialize failed"); 3581 return JNI_ERR; 3582 } 3583 3584 Aix::signal_sets_init(); 3585 Aix::install_signal_handlers(); 3586 3587 // Check and sets minimum stack sizes against command line options 3588 if (Posix::set_minimum_stack_sizes() == JNI_ERR) { 3589 return JNI_ERR; 3590 } 3591 3592 if (UseNUMA) { 3593 UseNUMA = false; 3594 warning("NUMA optimizations are not available on this OS."); 3595 } 3596 3597 if (MaxFDLimit) { 3598 // Set the number of file descriptors to max. print out error 3599 // if getrlimit/setrlimit fails but continue regardless. 3600 struct rlimit nbr_files; 3601 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 3602 if (status != 0) { 3603 log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno)); 3604 } else { 3605 nbr_files.rlim_cur = nbr_files.rlim_max; 3606 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 3607 if (status != 0) { 3608 log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno)); 3609 } 3610 } 3611 } 3612 3613 if (PerfAllowAtExitRegistration) { 3614 // Only register atexit functions if PerfAllowAtExitRegistration is set. 3615 // At exit functions can be delayed until process exit time, which 3616 // can be problematic for embedded VM situations. Embedded VMs should 3617 // call DestroyJavaVM() to assure that VM resources are released. 3618 3619 // Note: perfMemory_exit_helper atexit function may be removed in 3620 // the future if the appropriate cleanup code can be added to the 3621 // VM_Exit VMOperation's doit method. 3622 if (atexit(perfMemory_exit_helper) != 0) { 3623 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 3624 } 3625 } 3626 3627 return JNI_OK; 3628 } 3629 3630 // Mark the polling page as unreadable 3631 void os::make_polling_page_unreadable(void) { 3632 if (!guard_memory((char*)_polling_page, Aix::page_size())) { 3633 fatal("Could not disable polling page"); 3634 } 3635 }; 3636 3637 // Mark the polling page as readable 3638 void os::make_polling_page_readable(void) { 3639 // Changed according to os_linux.cpp. 3640 if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) { 3641 fatal("Could not enable polling page at " PTR_FORMAT, _polling_page); 3642 } 3643 }; 3644 3645 int os::active_processor_count() { 3646 int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN); 3647 assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check"); 3648 return online_cpus; 3649 } 3650 3651 void os::set_native_thread_name(const char *name) { 3652 // Not yet implemented. 3653 return; 3654 } 3655 3656 bool os::distribute_processes(uint length, uint* distribution) { 3657 // Not yet implemented. 3658 return false; 3659 } 3660 3661 bool os::bind_to_processor(uint processor_id) { 3662 // Not yet implemented. 3663 return false; 3664 } 3665 3666 void os::SuspendedThreadTask::internal_do_task() { 3667 if (do_suspend(_thread->osthread())) { 3668 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 3669 do_task(context); 3670 do_resume(_thread->osthread()); 3671 } 3672 } 3673 3674 class PcFetcher : public os::SuspendedThreadTask { 3675 public: 3676 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} 3677 ExtendedPC result(); 3678 protected: 3679 void do_task(const os::SuspendedThreadTaskContext& context); 3680 private: 3681 ExtendedPC _epc; 3682 }; 3683 3684 ExtendedPC PcFetcher::result() { 3685 guarantee(is_done(), "task is not done yet."); 3686 return _epc; 3687 } 3688 3689 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { 3690 Thread* thread = context.thread(); 3691 OSThread* osthread = thread->osthread(); 3692 if (osthread->ucontext() != NULL) { 3693 _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext()); 3694 } else { 3695 // NULL context is unexpected, double-check this is the VMThread. 3696 guarantee(thread->is_VM_thread(), "can only be called for VMThread"); 3697 } 3698 } 3699 3700 // Suspends the target using the signal mechanism and then grabs the PC before 3701 // resuming the target. Used by the flat-profiler only 3702 ExtendedPC os::get_thread_pc(Thread* thread) { 3703 // Make sure that it is called by the watcher for the VMThread. 3704 assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); 3705 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 3706 3707 PcFetcher fetcher(thread); 3708 fetcher.run(); 3709 return fetcher.result(); 3710 } 3711 3712 //////////////////////////////////////////////////////////////////////////////// 3713 // debug support 3714 3715 bool os::find(address addr, outputStream* st) { 3716 3717 st->print(PTR_FORMAT ": ", addr); 3718 3719 loaded_module_t lm; 3720 if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL || 3721 LoadedLibraries::find_for_data_address(addr, &lm) != NULL) { 3722 st->print_cr("%s", lm.path); 3723 return true; 3724 } 3725 3726 return false; 3727 } 3728 3729 //////////////////////////////////////////////////////////////////////////////// 3730 // misc 3731 3732 // This does not do anything on Aix. This is basically a hook for being 3733 // able to use structured exception handling (thread-local exception filters) 3734 // on, e.g., Win32. 3735 void 3736 os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, 3737 JavaCallArguments* args, Thread* thread) { 3738 f(value, method, args, thread); 3739 } 3740 3741 void os::print_statistics() { 3742 } 3743 3744 bool os::message_box(const char* title, const char* message) { 3745 int i; 3746 fdStream err(defaultStream::error_fd()); 3747 for (i = 0; i < 78; i++) err.print_raw("="); 3748 err.cr(); 3749 err.print_raw_cr(title); 3750 for (i = 0; i < 78; i++) err.print_raw("-"); 3751 err.cr(); 3752 err.print_raw_cr(message); 3753 for (i = 0; i < 78; i++) err.print_raw("="); 3754 err.cr(); 3755 3756 char buf[16]; 3757 // Prevent process from exiting upon "read error" without consuming all CPU 3758 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 3759 3760 return buf[0] == 'y' || buf[0] == 'Y'; 3761 } 3762 3763 int os::stat(const char *path, struct stat *sbuf) { 3764 char pathbuf[MAX_PATH]; 3765 if (strlen(path) > MAX_PATH - 1) { 3766 errno = ENAMETOOLONG; 3767 return -1; 3768 } 3769 os::native_path(strcpy(pathbuf, path)); 3770 return ::stat(pathbuf, sbuf); 3771 } 3772 3773 // Is a (classpath) directory empty? 3774 bool os::dir_is_empty(const char* path) { 3775 DIR *dir = NULL; 3776 struct dirent *ptr; 3777 3778 dir = opendir(path); 3779 if (dir == NULL) return true; 3780 3781 /* Scan the directory */ 3782 bool result = true; 3783 char buf[sizeof(struct dirent) + MAX_PATH]; 3784 while (result && (ptr = ::readdir(dir)) != NULL) { 3785 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 3786 result = false; 3787 } 3788 } 3789 closedir(dir); 3790 return result; 3791 } 3792 3793 // This code originates from JDK's sysOpen and open64_w 3794 // from src/solaris/hpi/src/system_md.c 3795 3796 int os::open(const char *path, int oflag, int mode) { 3797 3798 if (strlen(path) > MAX_PATH - 1) { 3799 errno = ENAMETOOLONG; 3800 return -1; 3801 } 3802 int fd; 3803 3804 fd = ::open64(path, oflag, mode); 3805 if (fd == -1) return -1; 3806 3807 // If the open succeeded, the file might still be a directory. 3808 { 3809 struct stat64 buf64; 3810 int ret = ::fstat64(fd, &buf64); 3811 int st_mode = buf64.st_mode; 3812 3813 if (ret != -1) { 3814 if ((st_mode & S_IFMT) == S_IFDIR) { 3815 errno = EISDIR; 3816 ::close(fd); 3817 return -1; 3818 } 3819 } else { 3820 ::close(fd); 3821 return -1; 3822 } 3823 } 3824 3825 // All file descriptors that are opened in the JVM and not 3826 // specifically destined for a subprocess should have the 3827 // close-on-exec flag set. If we don't set it, then careless 3rd 3828 // party native code might fork and exec without closing all 3829 // appropriate file descriptors (e.g. as we do in closeDescriptors in 3830 // UNIXProcess.c), and this in turn might: 3831 // 3832 // - cause end-of-file to fail to be detected on some file 3833 // descriptors, resulting in mysterious hangs, or 3834 // 3835 // - might cause an fopen in the subprocess to fail on a system 3836 // suffering from bug 1085341. 3837 // 3838 // (Yes, the default setting of the close-on-exec flag is a Unix 3839 // design flaw.) 3840 // 3841 // See: 3842 // 1085341: 32-bit stdio routines should support file descriptors >255 3843 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed 3844 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 3845 #ifdef FD_CLOEXEC 3846 { 3847 int flags = ::fcntl(fd, F_GETFD); 3848 if (flags != -1) 3849 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 3850 } 3851 #endif 3852 3853 return fd; 3854 } 3855 3856 // create binary file, rewriting existing file if required 3857 int os::create_binary_file(const char* path, bool rewrite_existing) { 3858 int oflags = O_WRONLY | O_CREAT; 3859 if (!rewrite_existing) { 3860 oflags |= O_EXCL; 3861 } 3862 return ::open64(path, oflags, S_IREAD | S_IWRITE); 3863 } 3864 3865 // return current position of file pointer 3866 jlong os::current_file_offset(int fd) { 3867 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 3868 } 3869 3870 // move file pointer to the specified offset 3871 jlong os::seek_to_file_offset(int fd, jlong offset) { 3872 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 3873 } 3874 3875 // This code originates from JDK's sysAvailable 3876 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c 3877 3878 int os::available(int fd, jlong *bytes) { 3879 jlong cur, end; 3880 int mode; 3881 struct stat64 buf64; 3882 3883 if (::fstat64(fd, &buf64) >= 0) { 3884 mode = buf64.st_mode; 3885 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 3886 int n; 3887 if (::ioctl(fd, FIONREAD, &n) >= 0) { 3888 *bytes = n; 3889 return 1; 3890 } 3891 } 3892 } 3893 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 3894 return 0; 3895 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 3896 return 0; 3897 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 3898 return 0; 3899 } 3900 *bytes = end - cur; 3901 return 1; 3902 } 3903 3904 // Map a block of memory. 3905 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 3906 char *addr, size_t bytes, bool read_only, 3907 bool allow_exec) { 3908 int prot; 3909 int flags = MAP_PRIVATE; 3910 3911 if (read_only) { 3912 prot = PROT_READ; 3913 flags = MAP_SHARED; 3914 } else { 3915 prot = PROT_READ | PROT_WRITE; 3916 flags = MAP_PRIVATE; 3917 } 3918 3919 if (allow_exec) { 3920 prot |= PROT_EXEC; 3921 } 3922 3923 if (addr != NULL) { 3924 flags |= MAP_FIXED; 3925 } 3926 3927 // Allow anonymous mappings if 'fd' is -1. 3928 if (fd == -1) { 3929 flags |= MAP_ANONYMOUS; 3930 } 3931 3932 char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags, 3933 fd, file_offset); 3934 if (mapped_address == MAP_FAILED) { 3935 return NULL; 3936 } 3937 return mapped_address; 3938 } 3939 3940 // Remap a block of memory. 3941 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 3942 char *addr, size_t bytes, bool read_only, 3943 bool allow_exec) { 3944 // same as map_memory() on this OS 3945 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 3946 allow_exec); 3947 } 3948 3949 // Unmap a block of memory. 3950 bool os::pd_unmap_memory(char* addr, size_t bytes) { 3951 return munmap(addr, bytes) == 0; 3952 } 3953 3954 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 3955 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 3956 // of a thread. 3957 // 3958 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 3959 // the fast estimate available on the platform. 3960 3961 jlong os::current_thread_cpu_time() { 3962 // return user + sys since the cost is the same 3963 const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */); 3964 assert(n >= 0, "negative CPU time"); 3965 return n; 3966 } 3967 3968 jlong os::thread_cpu_time(Thread* thread) { 3969 // consistent with what current_thread_cpu_time() returns 3970 const jlong n = os::thread_cpu_time(thread, true /* user + sys */); 3971 assert(n >= 0, "negative CPU time"); 3972 return n; 3973 } 3974 3975 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 3976 const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 3977 assert(n >= 0, "negative CPU time"); 3978 return n; 3979 } 3980 3981 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) { 3982 bool error = false; 3983 3984 jlong sys_time = 0; 3985 jlong user_time = 0; 3986 3987 // Reimplemented using getthrds64(). 3988 // 3989 // Works like this: 3990 // For the thread in question, get the kernel thread id. Then get the 3991 // kernel thread statistics using that id. 3992 // 3993 // This only works of course when no pthread scheduling is used, 3994 // i.e. there is a 1:1 relationship to kernel threads. 3995 // On AIX, see AIXTHREAD_SCOPE variable. 3996 3997 pthread_t pthtid = thread->osthread()->pthread_id(); 3998 3999 // retrieve kernel thread id for the pthread: 4000 tid64_t tid = 0; 4001 struct __pthrdsinfo pinfo; 4002 // I just love those otherworldly IBM APIs which force me to hand down 4003 // dummy buffers for stuff I dont care for... 4004 char dummy[1]; 4005 int dummy_size = sizeof(dummy); 4006 if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo), 4007 dummy, &dummy_size) == 0) { 4008 tid = pinfo.__pi_tid; 4009 } else { 4010 tty->print_cr("pthread_getthrds_np failed."); 4011 error = true; 4012 } 4013 4014 // retrieve kernel timing info for that kernel thread 4015 if (!error) { 4016 struct thrdentry64 thrdentry; 4017 if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) { 4018 sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL; 4019 user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL; 4020 } else { 4021 tty->print_cr("pthread_getthrds_np failed."); 4022 error = true; 4023 } 4024 } 4025 4026 if (p_sys_time) { 4027 *p_sys_time = sys_time; 4028 } 4029 4030 if (p_user_time) { 4031 *p_user_time = user_time; 4032 } 4033 4034 if (error) { 4035 return false; 4036 } 4037 4038 return true; 4039 } 4040 4041 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 4042 jlong sys_time; 4043 jlong user_time; 4044 4045 if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) { 4046 return -1; 4047 } 4048 4049 return user_sys_cpu_time ? sys_time + user_time : user_time; 4050 } 4051 4052 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4053 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 4054 info_ptr->may_skip_backward = false; // elapsed time not wall time 4055 info_ptr->may_skip_forward = false; // elapsed time not wall time 4056 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4057 } 4058 4059 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4060 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 4061 info_ptr->may_skip_backward = false; // elapsed time not wall time 4062 info_ptr->may_skip_forward = false; // elapsed time not wall time 4063 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4064 } 4065 4066 bool os::is_thread_cpu_time_supported() { 4067 return true; 4068 } 4069 4070 // System loadavg support. Returns -1 if load average cannot be obtained. 4071 // For now just return the system wide load average (no processor sets). 4072 int os::loadavg(double values[], int nelem) { 4073 4074 guarantee(nelem >= 0 && nelem <= 3, "argument error"); 4075 guarantee(values, "argument error"); 4076 4077 if (os::Aix::on_pase()) { 4078 4079 // AS/400 PASE: use libo4 porting library 4080 double v[3] = { 0.0, 0.0, 0.0 }; 4081 4082 if (libo4::get_load_avg(v, v + 1, v + 2)) { 4083 for (int i = 0; i < nelem; i ++) { 4084 values[i] = v[i]; 4085 } 4086 return nelem; 4087 } else { 4088 return -1; 4089 } 4090 4091 } else { 4092 4093 // AIX: use libperfstat 4094 libperfstat::cpuinfo_t ci; 4095 if (libperfstat::get_cpuinfo(&ci)) { 4096 for (int i = 0; i < nelem; i++) { 4097 values[i] = ci.loadavg[i]; 4098 } 4099 } else { 4100 return -1; 4101 } 4102 return nelem; 4103 } 4104 } 4105 4106 void os::pause() { 4107 char filename[MAX_PATH]; 4108 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4109 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4110 } else { 4111 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4112 } 4113 4114 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4115 if (fd != -1) { 4116 struct stat buf; 4117 ::close(fd); 4118 while (::stat(filename, &buf) == 0) { 4119 (void)::poll(NULL, 0, 100); 4120 } 4121 } else { 4122 trcVerbose("Could not open pause file '%s', continuing immediately.", filename); 4123 } 4124 } 4125 4126 bool os::Aix::is_primordial_thread() { 4127 if (pthread_self() == (pthread_t)1) { 4128 return true; 4129 } else { 4130 return false; 4131 } 4132 } 4133 4134 // OS recognitions (PASE/AIX, OS level) call this before calling any 4135 // one of Aix::on_pase(), Aix::os_version() static 4136 void os::Aix::initialize_os_info() { 4137 4138 assert(_on_pase == -1 && _os_version == 0, "already called."); 4139 4140 struct utsname uts; 4141 memset(&uts, 0, sizeof(uts)); 4142 strcpy(uts.sysname, "?"); 4143 if (::uname(&uts) == -1) { 4144 trcVerbose("uname failed (%d)", errno); 4145 guarantee(0, "Could not determine whether we run on AIX or PASE"); 4146 } else { 4147 trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" " 4148 "node \"%s\" machine \"%s\"\n", 4149 uts.sysname, uts.version, uts.release, uts.nodename, uts.machine); 4150 const int major = atoi(uts.version); 4151 assert(major > 0, "invalid OS version"); 4152 const int minor = atoi(uts.release); 4153 assert(minor > 0, "invalid OS release"); 4154 _os_version = (major << 24) | (minor << 16); 4155 char ver_str[20] = {0}; 4156 char *name_str = "unknown OS"; 4157 if (strcmp(uts.sysname, "OS400") == 0) { 4158 // We run on AS/400 PASE. We do not support versions older than V5R4M0. 4159 _on_pase = 1; 4160 if (os_version_short() < 0x0504) { 4161 trcVerbose("OS/400 releases older than V5R4M0 not supported."); 4162 assert(false, "OS/400 release too old."); 4163 } 4164 name_str = "OS/400 (pase)"; 4165 jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor); 4166 } else if (strcmp(uts.sysname, "AIX") == 0) { 4167 // We run on AIX. We do not support versions older than AIX 5.3. 4168 _on_pase = 0; 4169 // Determine detailed AIX version: Version, Release, Modification, Fix Level. 4170 odmWrapper::determine_os_kernel_version(&_os_version); 4171 if (os_version_short() < 0x0503) { 4172 trcVerbose("AIX release older than AIX 5.3 not supported."); 4173 assert(false, "AIX release too old."); 4174 } 4175 name_str = "AIX"; 4176 jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u", 4177 major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF); 4178 } else { 4179 assert(false, name_str); 4180 } 4181 trcVerbose("We run on %s %s", name_str, ver_str); 4182 } 4183 4184 guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release"); 4185 } // end: os::Aix::initialize_os_info() 4186 4187 // Scan environment for important settings which might effect the VM. 4188 // Trace out settings. Warn about invalid settings and/or correct them. 4189 // 4190 // Must run after os::Aix::initialue_os_info(). 4191 void os::Aix::scan_environment() { 4192 4193 char* p; 4194 int rc; 4195 4196 // Warn explicity if EXTSHM=ON is used. That switch changes how 4197 // System V shared memory behaves. One effect is that page size of 4198 // shared memory cannot be change dynamically, effectivly preventing 4199 // large pages from working. 4200 // This switch was needed on AIX 32bit, but on AIX 64bit the general 4201 // recommendation is (in OSS notes) to switch it off. 4202 p = ::getenv("EXTSHM"); 4203 trcVerbose("EXTSHM=%s.", p ? p : "<unset>"); 4204 if (p && strcasecmp(p, "ON") == 0) { 4205 _extshm = 1; 4206 trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***"); 4207 if (!AllowExtshm) { 4208 // We allow under certain conditions the user to continue. However, we want this 4209 // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means 4210 // that the VM is not able to allocate 64k pages for the heap. 4211 // We do not want to run with reduced performance. 4212 vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment."); 4213 } 4214 } else { 4215 _extshm = 0; 4216 } 4217 4218 // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs. 4219 // Not tested, not supported. 4220 // 4221 // Note that it might be worth the trouble to test and to require it, if only to 4222 // get useful return codes for mprotect. 4223 // 4224 // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before 4225 // exec() ? before loading the libjvm ? ....) 4226 p = ::getenv("XPG_SUS_ENV"); 4227 trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>"); 4228 if (p && strcmp(p, "ON") == 0) { 4229 _xpg_sus_mode = 1; 4230 trcVerbose("Unsupported setting: XPG_SUS_ENV=ON"); 4231 // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to 4232 // clobber address ranges. If we ever want to support that, we have to do some 4233 // testing first. 4234 guarantee(false, "XPG_SUS_ENV=ON not supported"); 4235 } else { 4236 _xpg_sus_mode = 0; 4237 } 4238 4239 if (os::Aix::on_pase()) { 4240 p = ::getenv("QIBM_MULTI_THREADED"); 4241 trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>"); 4242 } 4243 4244 p = ::getenv("LDR_CNTRL"); 4245 trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>"); 4246 if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) { 4247 if (p && ::strstr(p, "TEXTPSIZE")) { 4248 trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. " 4249 "you may experience hangs or crashes on OS/400 V7R1."); 4250 } 4251 } 4252 4253 p = ::getenv("AIXTHREAD_GUARDPAGES"); 4254 trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>"); 4255 4256 } // end: os::Aix::scan_environment() 4257 4258 // PASE: initialize the libo4 library (PASE porting library). 4259 void os::Aix::initialize_libo4() { 4260 guarantee(os::Aix::on_pase(), "OS/400 only."); 4261 if (!libo4::init()) { 4262 trcVerbose("libo4 initialization failed."); 4263 assert(false, "libo4 initialization failed"); 4264 } else { 4265 trcVerbose("libo4 initialized."); 4266 } 4267 } 4268 4269 // AIX: initialize the libperfstat library. 4270 void os::Aix::initialize_libperfstat() { 4271 assert(os::Aix::on_aix(), "AIX only"); 4272 if (!libperfstat::init()) { 4273 trcVerbose("libperfstat initialization failed."); 4274 assert(false, "libperfstat initialization failed"); 4275 } else { 4276 trcVerbose("libperfstat initialized."); 4277 } 4278 } 4279 4280 ///////////////////////////////////////////////////////////////////////////// 4281 // thread stack 4282 4283 // Function to query the current stack size using pthread_getthrds_np. 4284 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) { 4285 // This only works when invoked on a pthread. As we agreed not to use 4286 // primordial threads anyway, I assert here. 4287 guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread"); 4288 4289 // Information about this api can be found (a) in the pthread.h header and 4290 // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm 4291 // 4292 // The use of this API to find out the current stack is kind of undefined. 4293 // But after a lot of tries and asking IBM about it, I concluded that it is safe 4294 // enough for cases where I let the pthread library create its stacks. For cases 4295 // where I create an own stack and pass this to pthread_create, it seems not to 4296 // work (the returned stack size in that case is 0). 4297 4298 pthread_t tid = pthread_self(); 4299 struct __pthrdsinfo pinfo; 4300 char dummy[1]; // Just needed to satisfy pthread_getthrds_np. 4301 int dummy_size = sizeof(dummy); 4302 4303 memset(&pinfo, 0, sizeof(pinfo)); 4304 4305 const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo, 4306 sizeof(pinfo), dummy, &dummy_size); 4307 4308 if (rc != 0) { 4309 assert0(false); 4310 trcVerbose("pthread_getthrds_np failed (%d)", rc); 4311 return false; 4312 } 4313 guarantee0(pinfo.__pi_stackend); 4314 4315 // The following may happen when invoking pthread_getthrds_np on a pthread 4316 // running on a user provided stack (when handing down a stack to pthread 4317 // create, see pthread_attr_setstackaddr). 4318 // Not sure what to do then. 4319 4320 guarantee0(pinfo.__pi_stacksize); 4321 4322 // Note: we get three values from pthread_getthrds_np: 4323 // __pi_stackaddr, __pi_stacksize, __pi_stackend 4324 // 4325 // high addr --------------------- 4326 // 4327 // | pthread internal data, like ~2K 4328 // | 4329 // | --------------------- __pi_stackend (usually not page aligned, (xxxxF890)) 4330 // | 4331 // | 4332 // | 4333 // | 4334 // | 4335 // | 4336 // | --------------------- (__pi_stackend - __pi_stacksize) 4337 // | 4338 // | padding to align the following AIX guard pages, if enabled. 4339 // | 4340 // V --------------------- __pi_stackaddr 4341 // 4342 // low addr AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0) 4343 // 4344 4345 address stack_base = (address)(pinfo.__pi_stackend); 4346 address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr, 4347 os::vm_page_size()); 4348 size_t stack_size = stack_base - stack_low_addr; 4349 4350 if (p_stack_base) { 4351 *p_stack_base = stack_base; 4352 } 4353 4354 if (p_stack_size) { 4355 *p_stack_size = stack_size; 4356 } 4357 4358 return true; 4359 } 4360 4361 // Get the current stack base from the OS (actually, the pthread library). 4362 address os::current_stack_base() { 4363 address p; 4364 query_stack_dimensions(&p, 0); 4365 return p; 4366 } 4367 4368 // Get the current stack size from the OS (actually, the pthread library). 4369 size_t os::current_stack_size() { 4370 size_t s; 4371 query_stack_dimensions(0, &s); 4372 return s; 4373 } 4374 4375 // Refer to the comments in os_solaris.cpp park-unpark. 4376 4377 // utility to compute the abstime argument to timedwait: 4378 // millis is the relative timeout time 4379 // abstime will be the absolute timeout time 4380 // TODO: replace compute_abstime() with unpackTime() 4381 4382 static struct timespec* compute_abstime(timespec* abstime, jlong millis) { 4383 if (millis < 0) millis = 0; 4384 struct timeval now; 4385 int status = gettimeofday(&now, NULL); 4386 assert(status == 0, "gettimeofday"); 4387 jlong seconds = millis / 1000; 4388 millis %= 1000; 4389 if (seconds > 50000000) { // see man cond_timedwait(3T) 4390 seconds = 50000000; 4391 } 4392 abstime->tv_sec = now.tv_sec + seconds; 4393 long usec = now.tv_usec + millis * 1000; 4394 if (usec >= 1000000) { 4395 abstime->tv_sec += 1; 4396 usec -= 1000000; 4397 } 4398 abstime->tv_nsec = usec * 1000; 4399 return abstime; 4400 } 4401 4402 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 4403 // Conceptually TryPark() should be equivalent to park(0). 4404 4405 int os::PlatformEvent::TryPark() { 4406 for (;;) { 4407 const int v = _Event; 4408 guarantee ((v == 0) || (v == 1), "invariant"); 4409 if (Atomic::cmpxchg (0, &_Event, v) == v) return v; 4410 } 4411 } 4412 4413 void os::PlatformEvent::park() { // AKA "down()" 4414 // Invariant: Only the thread associated with the Event/PlatformEvent 4415 // may call park(). 4416 // TODO: assert that _Assoc != NULL or _Assoc == Self 4417 int v; 4418 for (;;) { 4419 v = _Event; 4420 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break; 4421 } 4422 guarantee (v >= 0, "invariant"); 4423 if (v == 0) { 4424 // Do this the hard way by blocking ... 4425 int status = pthread_mutex_lock(_mutex); 4426 assert_status(status == 0, status, "mutex_lock"); 4427 guarantee (_nParked == 0, "invariant"); 4428 ++ _nParked; 4429 while (_Event < 0) { 4430 status = pthread_cond_wait(_cond, _mutex); 4431 assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait"); 4432 } 4433 -- _nParked; 4434 4435 // In theory we could move the ST of 0 into _Event past the unlock(), 4436 // but then we'd need a MEMBAR after the ST. 4437 _Event = 0; 4438 status = pthread_mutex_unlock(_mutex); 4439 assert_status(status == 0, status, "mutex_unlock"); 4440 } 4441 guarantee (_Event >= 0, "invariant"); 4442 } 4443 4444 int os::PlatformEvent::park(jlong millis) { 4445 guarantee (_nParked == 0, "invariant"); 4446 4447 int v; 4448 for (;;) { 4449 v = _Event; 4450 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break; 4451 } 4452 guarantee (v >= 0, "invariant"); 4453 if (v != 0) return OS_OK; 4454 4455 // We do this the hard way, by blocking the thread. 4456 // Consider enforcing a minimum timeout value. 4457 struct timespec abst; 4458 compute_abstime(&abst, millis); 4459 4460 int ret = OS_TIMEOUT; 4461 int status = pthread_mutex_lock(_mutex); 4462 assert_status(status == 0, status, "mutex_lock"); 4463 guarantee (_nParked == 0, "invariant"); 4464 ++_nParked; 4465 4466 // Object.wait(timo) will return because of 4467 // (a) notification 4468 // (b) timeout 4469 // (c) thread.interrupt 4470 // 4471 // Thread.interrupt and object.notify{All} both call Event::set. 4472 // That is, we treat thread.interrupt as a special case of notification. 4473 // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false. 4474 // We assume all ETIME returns are valid. 4475 // 4476 // TODO: properly differentiate simultaneous notify+interrupt. 4477 // In that case, we should propagate the notify to another waiter. 4478 4479 while (_Event < 0) { 4480 status = pthread_cond_timedwait(_cond, _mutex, &abst); 4481 assert_status(status == 0 || status == ETIMEDOUT, 4482 status, "cond_timedwait"); 4483 if (!FilterSpuriousWakeups) break; // previous semantics 4484 if (status == ETIMEDOUT) break; 4485 // We consume and ignore EINTR and spurious wakeups. 4486 } 4487 --_nParked; 4488 if (_Event >= 0) { 4489 ret = OS_OK; 4490 } 4491 _Event = 0; 4492 status = pthread_mutex_unlock(_mutex); 4493 assert_status(status == 0, status, "mutex_unlock"); 4494 assert (_nParked == 0, "invariant"); 4495 return ret; 4496 } 4497 4498 void os::PlatformEvent::unpark() { 4499 int v, AnyWaiters; 4500 for (;;) { 4501 v = _Event; 4502 if (v > 0) { 4503 // The LD of _Event could have reordered or be satisfied 4504 // by a read-aside from this processor's write buffer. 4505 // To avoid problems execute a barrier and then 4506 // ratify the value. 4507 OrderAccess::fence(); 4508 if (_Event == v) return; 4509 continue; 4510 } 4511 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break; 4512 } 4513 if (v < 0) { 4514 // Wait for the thread associated with the event to vacate 4515 int status = pthread_mutex_lock(_mutex); 4516 assert_status(status == 0, status, "mutex_lock"); 4517 AnyWaiters = _nParked; 4518 4519 if (AnyWaiters != 0) { 4520 // We intentional signal *after* dropping the lock 4521 // to avoid a common class of futile wakeups. 4522 status = pthread_cond_signal(_cond); 4523 assert_status(status == 0, status, "cond_signal"); 4524 } 4525 // Mutex should be locked for pthread_cond_signal(_cond). 4526 status = pthread_mutex_unlock(_mutex); 4527 assert_status(status == 0, status, "mutex_unlock"); 4528 } 4529 4530 // Note that we signal() _after dropping the lock for "immortal" Events. 4531 // This is safe and avoids a common class of futile wakeups. In rare 4532 // circumstances this can cause a thread to return prematurely from 4533 // cond_{timed}wait() but the spurious wakeup is benign and the victim will 4534 // simply re-test the condition and re-park itself. 4535 } 4536 4537 4538 // JSR166 4539 // ------------------------------------------------------- 4540 4541 // 4542 // The solaris and linux implementations of park/unpark are fairly 4543 // conservative for now, but can be improved. They currently use a 4544 // mutex/condvar pair, plus a a count. 4545 // Park decrements count if > 0, else does a condvar wait. Unpark 4546 // sets count to 1 and signals condvar. Only one thread ever waits 4547 // on the condvar. Contention seen when trying to park implies that someone 4548 // is unparking you, so don't wait. And spurious returns are fine, so there 4549 // is no need to track notifications. 4550 // 4551 4552 #define MAX_SECS 100000000 4553 // 4554 // This code is common to linux and solaris and will be moved to a 4555 // common place in dolphin. 4556 // 4557 // The passed in time value is either a relative time in nanoseconds 4558 // or an absolute time in milliseconds. Either way it has to be unpacked 4559 // into suitable seconds and nanoseconds components and stored in the 4560 // given timespec structure. 4561 // Given time is a 64-bit value and the time_t used in the timespec is only 4562 // a signed-32-bit value (except on 64-bit Linux) we have to watch for 4563 // overflow if times way in the future are given. Further on Solaris versions 4564 // prior to 10 there is a restriction (see cond_timedwait) that the specified 4565 // number of seconds, in abstime, is less than current_time + 100,000,000. 4566 // As it will be 28 years before "now + 100000000" will overflow we can 4567 // ignore overflow and just impose a hard-limit on seconds using the value 4568 // of "now + 100,000,000". This places a limit on the timeout of about 3.17 4569 // years from "now". 4570 // 4571 4572 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 4573 assert (time > 0, "convertTime"); 4574 4575 struct timeval now; 4576 int status = gettimeofday(&now, NULL); 4577 assert(status == 0, "gettimeofday"); 4578 4579 time_t max_secs = now.tv_sec + MAX_SECS; 4580 4581 if (isAbsolute) { 4582 jlong secs = time / 1000; 4583 if (secs > max_secs) { 4584 absTime->tv_sec = max_secs; 4585 } 4586 else { 4587 absTime->tv_sec = secs; 4588 } 4589 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 4590 } 4591 else { 4592 jlong secs = time / NANOSECS_PER_SEC; 4593 if (secs >= MAX_SECS) { 4594 absTime->tv_sec = max_secs; 4595 absTime->tv_nsec = 0; 4596 } 4597 else { 4598 absTime->tv_sec = now.tv_sec + secs; 4599 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 4600 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 4601 absTime->tv_nsec -= NANOSECS_PER_SEC; 4602 ++absTime->tv_sec; // note: this must be <= max_secs 4603 } 4604 } 4605 } 4606 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 4607 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 4608 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 4609 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 4610 } 4611 4612 void Parker::park(bool isAbsolute, jlong time) { 4613 // Optional fast-path check: 4614 // Return immediately if a permit is available. 4615 if (_counter > 0) { 4616 _counter = 0; 4617 OrderAccess::fence(); 4618 return; 4619 } 4620 4621 Thread* thread = Thread::current(); 4622 assert(thread->is_Java_thread(), "Must be JavaThread"); 4623 JavaThread *jt = (JavaThread *)thread; 4624 4625 // Optional optimization -- avoid state transitions if there's an interrupt pending. 4626 // Check interrupt before trying to wait 4627 if (Thread::is_interrupted(thread, false)) { 4628 return; 4629 } 4630 4631 // Next, demultiplex/decode time arguments 4632 timespec absTime; 4633 if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all 4634 return; 4635 } 4636 if (time > 0) { 4637 unpackTime(&absTime, isAbsolute, time); 4638 } 4639 4640 // Enter safepoint region 4641 // Beware of deadlocks such as 6317397. 4642 // The per-thread Parker:: mutex is a classic leaf-lock. 4643 // In particular a thread must never block on the Threads_lock while 4644 // holding the Parker:: mutex. If safepoints are pending both the 4645 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 4646 ThreadBlockInVM tbivm(jt); 4647 4648 // Don't wait if cannot get lock since interference arises from 4649 // unblocking. Also. check interrupt before trying wait 4650 if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) { 4651 return; 4652 } 4653 4654 int status; 4655 if (_counter > 0) { // no wait needed 4656 _counter = 0; 4657 status = pthread_mutex_unlock(_mutex); 4658 assert (status == 0, "invariant"); 4659 OrderAccess::fence(); 4660 return; 4661 } 4662 4663 #ifdef ASSERT 4664 // Don't catch signals while blocked; let the running threads have the signals. 4665 // (This allows a debugger to break into the running thread.) 4666 sigset_t oldsigs; 4667 sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals(); 4668 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 4669 #endif 4670 4671 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4672 jt->set_suspend_equivalent(); 4673 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 4674 4675 if (time == 0) { 4676 status = pthread_cond_wait (_cond, _mutex); 4677 } else { 4678 status = pthread_cond_timedwait (_cond, _mutex, &absTime); 4679 } 4680 assert_status(status == 0 || status == EINTR || 4681 status == ETIME || status == ETIMEDOUT, 4682 status, "cond_timedwait"); 4683 4684 #ifdef ASSERT 4685 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL); 4686 #endif 4687 4688 _counter = 0; 4689 status = pthread_mutex_unlock(_mutex); 4690 assert_status(status == 0, status, "invariant"); 4691 // If externally suspended while waiting, re-suspend 4692 if (jt->handle_special_suspend_equivalent_condition()) { 4693 jt->java_suspend_self(); 4694 } 4695 4696 OrderAccess::fence(); 4697 } 4698 4699 void Parker::unpark() { 4700 int s, status; 4701 status = pthread_mutex_lock(_mutex); 4702 assert (status == 0, "invariant"); 4703 s = _counter; 4704 _counter = 1; 4705 if (s < 1) { 4706 status = pthread_mutex_unlock(_mutex); 4707 assert (status == 0, "invariant"); 4708 status = pthread_cond_signal (_cond); 4709 assert (status == 0, "invariant"); 4710 } else { 4711 pthread_mutex_unlock(_mutex); 4712 assert (status == 0, "invariant"); 4713 } 4714 } 4715 4716 extern char** environ; 4717 4718 // Run the specified command in a separate process. Return its exit value, 4719 // or -1 on failure (e.g. can't fork a new process). 4720 // Unlike system(), this function can be called from signal handler. It 4721 // doesn't block SIGINT et al. 4722 int os::fork_and_exec(char* cmd) { 4723 char * argv[4] = {"sh", "-c", cmd, NULL}; 4724 4725 pid_t pid = fork(); 4726 4727 if (pid < 0) { 4728 // fork failed 4729 return -1; 4730 4731 } else if (pid == 0) { 4732 // child process 4733 4734 // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX. 4735 execve("/usr/bin/sh", argv, environ); 4736 4737 // execve failed 4738 _exit(-1); 4739 4740 } else { 4741 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 4742 // care about the actual exit code, for now. 4743 4744 int status; 4745 4746 // Wait for the child process to exit. This returns immediately if 4747 // the child has already exited. */ 4748 while (waitpid(pid, &status, 0) < 0) { 4749 switch (errno) { 4750 case ECHILD: return 0; 4751 case EINTR: break; 4752 default: return -1; 4753 } 4754 } 4755 4756 if (WIFEXITED(status)) { 4757 // The child exited normally; get its exit code. 4758 return WEXITSTATUS(status); 4759 } else if (WIFSIGNALED(status)) { 4760 // The child exited because of a signal. 4761 // The best value to return is 0x80 + signal number, 4762 // because that is what all Unix shells do, and because 4763 // it allows callers to distinguish between process exit and 4764 // process death by signal. 4765 return 0x80 + WTERMSIG(status); 4766 } else { 4767 // Unknown exit code; pass it through. 4768 return status; 4769 } 4770 } 4771 return -1; 4772 } 4773 4774 // is_headless_jre() 4775 // 4776 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 4777 // in order to report if we are running in a headless jre. 4778 // 4779 // Since JDK8 xawt/libmawt.so is moved into the same directory 4780 // as libawt.so, and renamed libawt_xawt.so 4781 bool os::is_headless_jre() { 4782 struct stat statbuf; 4783 char buf[MAXPATHLEN]; 4784 char libmawtpath[MAXPATHLEN]; 4785 const char *xawtstr = "/xawt/libmawt.so"; 4786 const char *new_xawtstr = "/libawt_xawt.so"; 4787 4788 char *p; 4789 4790 // Get path to libjvm.so 4791 os::jvm_path(buf, sizeof(buf)); 4792 4793 // Get rid of libjvm.so 4794 p = strrchr(buf, '/'); 4795 if (p == NULL) return false; 4796 else *p = '\0'; 4797 4798 // Get rid of client or server 4799 p = strrchr(buf, '/'); 4800 if (p == NULL) return false; 4801 else *p = '\0'; 4802 4803 // check xawt/libmawt.so 4804 strcpy(libmawtpath, buf); 4805 strcat(libmawtpath, xawtstr); 4806 if (::stat(libmawtpath, &statbuf) == 0) return false; 4807 4808 // check libawt_xawt.so 4809 strcpy(libmawtpath, buf); 4810 strcat(libmawtpath, new_xawtstr); 4811 if (::stat(libmawtpath, &statbuf) == 0) return false; 4812 4813 return true; 4814 } 4815 4816 // Get the default path to the core file 4817 // Returns the length of the string 4818 int os::get_core_path(char* buffer, size_t bufferSize) { 4819 const char* p = get_current_directory(buffer, bufferSize); 4820 4821 if (p == NULL) { 4822 assert(p != NULL, "failed to get current directory"); 4823 return 0; 4824 } 4825 4826 jio_snprintf(buffer, bufferSize, "%s/core or core.%d", 4827 p, current_process_id()); 4828 4829 return strlen(buffer); 4830 } 4831 4832 #ifndef PRODUCT 4833 void TestReserveMemorySpecial_test() { 4834 // No tests available for this platform 4835 } 4836 #endif 4837 4838 bool os::start_debugging(char *buf, int buflen) { 4839 int len = (int)strlen(buf); 4840 char *p = &buf[len]; 4841 4842 jio_snprintf(p, buflen -len, 4843 "\n\n" 4844 "Do you want to debug the problem?\n\n" 4845 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n" 4846 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n" 4847 "Otherwise, press RETURN to abort...", 4848 os::current_process_id(), 4849 os::current_thread_id(), thread_self()); 4850 4851 bool yes = os::message_box("Unexpected Error", buf); 4852 4853 if (yes) { 4854 // yes, user asked VM to launch debugger 4855 jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id()); 4856 4857 os::fork_and_exec(buf); 4858 yes = false; 4859 } 4860 return yes; 4861 } 4862 4863 static inline time_t get_mtime(const char* filename) { 4864 struct stat st; 4865 int ret = os::stat(filename, &st); 4866 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 4867 return st.st_mtime; 4868 } 4869 4870 int os::compare_file_modified_times(const char* file1, const char* file2) { 4871 time_t t1 = get_mtime(file1); 4872 time_t t2 = get_mtime(file2); 4873 return t1 - t2; 4874 }