1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "services/attachListener.hpp" 67 #include "services/memTracker.hpp" 68 #include "services/runtimeService.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/semaphore.hpp" 74 #include "utilities/vmError.hpp" 75 76 #ifdef _DEBUG 77 #include <crtdbg.h> 78 #endif 79 80 81 #include <windows.h> 82 #include <sys/types.h> 83 #include <sys/stat.h> 84 #include <sys/timeb.h> 85 #include <objidl.h> 86 #include <shlobj.h> 87 88 #include <malloc.h> 89 #include <signal.h> 90 #include <direct.h> 91 #include <errno.h> 92 #include <fcntl.h> 93 #include <io.h> 94 #include <process.h> // For _beginthreadex(), _endthreadex() 95 #include <imagehlp.h> // For os::dll_address_to_function_name 96 // for enumerating dll libraries 97 #include <vdmdbg.h> 98 99 // for timer info max values which include all bits 100 #define ALL_64_BITS CONST64(-1) 101 102 // For DLL loading/load error detection 103 // Values of PE COFF 104 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 105 #define IMAGE_FILE_SIGNATURE_LENGTH 4 106 107 static HANDLE main_process; 108 static HANDLE main_thread; 109 static int main_thread_id; 110 111 static FILETIME process_creation_time; 112 static FILETIME process_exit_time; 113 static FILETIME process_user_time; 114 static FILETIME process_kernel_time; 115 116 #ifdef _M_IA64 117 #define __CPU__ ia64 118 #else 119 #ifdef _M_AMD64 120 #define __CPU__ amd64 121 #else 122 #define __CPU__ i486 123 #endif 124 #endif 125 126 // save DLL module handle, used by GetModuleFileName 127 128 HINSTANCE vm_lib_handle; 129 130 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 131 switch (reason) { 132 case DLL_PROCESS_ATTACH: 133 vm_lib_handle = hinst; 134 if (ForceTimeHighResolution) { 135 timeBeginPeriod(1L); 136 } 137 break; 138 case DLL_PROCESS_DETACH: 139 if (ForceTimeHighResolution) { 140 timeEndPeriod(1L); 141 } 142 break; 143 default: 144 break; 145 } 146 return true; 147 } 148 149 static inline double fileTimeAsDouble(FILETIME* time) { 150 const double high = (double) ((unsigned int) ~0); 151 const double split = 10000000.0; 152 double result = (time->dwLowDateTime / split) + 153 time->dwHighDateTime * (high/split); 154 return result; 155 } 156 157 // Implementation of os 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 // previous UnhandledExceptionFilter, if there is one 178 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 179 180 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 181 182 void os::init_system_properties_values() { 183 // sysclasspath, java_home, dll_dir 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH + 1]; 190 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 191 192 if (alt_home_dir != NULL) { 193 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 194 home_dir[MAX_PATH] = '\0'; 195 } else { 196 os::jvm_path(home_dir, sizeof(home_dir)); 197 // Found the full path to jvm.dll. 198 // Now cut the path to <java_home>/jre if we can. 199 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) { 202 *pslash = '\0'; // get rid of \{client|server} 203 pslash = strrchr(home_dir, '\\'); 204 if (pslash != NULL) { 205 *pslash = '\0'; // get rid of \bin 206 } 207 } 208 } 209 210 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 211 if (home_path == NULL) { 212 return; 213 } 214 strcpy(home_path, home_dir); 215 Arguments::set_java_home(home_path); 216 FREE_C_HEAP_ARRAY(char, home_path); 217 218 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 219 mtInternal); 220 if (dll_path == NULL) { 221 return; 222 } 223 strcpy(dll_path, home_dir); 224 strcat(dll_path, bin); 225 Arguments::set_dll_dir(dll_path); 226 FREE_C_HEAP_ARRAY(char, dll_path); 227 228 if (!set_boot_path('\\', ';')) { 229 return; 230 } 231 } 232 233 // library_path 234 #define EXT_DIR "\\lib\\ext" 235 #define BIN_DIR "\\bin" 236 #define PACKAGE_DIR "\\Sun\\Java" 237 { 238 // Win32 library search order (See the documentation for LoadLibrary): 239 // 240 // 1. The directory from which application is loaded. 241 // 2. The system wide Java Extensions directory (Java only) 242 // 3. System directory (GetSystemDirectory) 243 // 4. Windows directory (GetWindowsDirectory) 244 // 5. The PATH environment variable 245 // 6. The current directory 246 247 char *library_path; 248 char tmp[MAX_PATH]; 249 char *path_str = ::getenv("PATH"); 250 251 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 252 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 253 254 library_path[0] = '\0'; 255 256 GetModuleFileName(NULL, tmp, sizeof(tmp)); 257 *(strrchr(tmp, '\\')) = '\0'; 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 strcat(library_path, PACKAGE_DIR BIN_DIR); 264 265 GetSystemDirectory(tmp, sizeof(tmp)); 266 strcat(library_path, ";"); 267 strcat(library_path, tmp); 268 269 GetWindowsDirectory(tmp, sizeof(tmp)); 270 strcat(library_path, ";"); 271 strcat(library_path, tmp); 272 273 if (path_str) { 274 strcat(library_path, ";"); 275 strcat(library_path, path_str); 276 } 277 278 strcat(library_path, ";."); 279 280 Arguments::set_library_path(library_path); 281 FREE_C_HEAP_ARRAY(char, library_path); 282 } 283 284 // Default extensions directory 285 { 286 char path[MAX_PATH]; 287 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 288 GetWindowsDirectory(path, MAX_PATH); 289 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 290 path, PACKAGE_DIR, EXT_DIR); 291 Arguments::set_ext_dirs(buf); 292 } 293 #undef EXT_DIR 294 #undef BIN_DIR 295 #undef PACKAGE_DIR 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 316 // So far, this method is only used by Native Memory Tracking, which is 317 // only supported on Windows XP or later. 318 // 319 int os::get_native_stack(address* stack, int frames, int toSkip) { 320 #ifdef _NMT_NOINLINE_ 321 toSkip++; 322 #endif 323 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 324 (PVOID*)stack, NULL); 325 for (int index = captured; index < frames; index ++) { 326 stack[index] = NULL; 327 } 328 return captured; 329 } 330 331 332 // os::current_stack_base() 333 // 334 // Returns the base of the stack, which is the stack's 335 // starting address. This function must be called 336 // while running on the stack of the thread being queried. 337 338 address os::current_stack_base() { 339 MEMORY_BASIC_INFORMATION minfo; 340 address stack_bottom; 341 size_t stack_size; 342 343 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 344 stack_bottom = (address)minfo.AllocationBase; 345 stack_size = minfo.RegionSize; 346 347 // Add up the sizes of all the regions with the same 348 // AllocationBase. 349 while (1) { 350 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 351 if (stack_bottom == (address)minfo.AllocationBase) { 352 stack_size += minfo.RegionSize; 353 } else { 354 break; 355 } 356 } 357 358 #ifdef _M_IA64 359 // IA64 has memory and register stacks 360 // 361 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 362 // at thread creation (1MB backing store growing upwards, 1MB memory stack 363 // growing downwards, 2MB summed up) 364 // 365 // ... 366 // ------- top of stack (high address) ----- 367 // | 368 // | 1MB 369 // | Backing Store (Register Stack) 370 // | 371 // | / \ 372 // | | 373 // | | 374 // | | 375 // ------------------------ stack base ----- 376 // | 1MB 377 // | Memory Stack 378 // | 379 // | | 380 // | | 381 // | | 382 // | \ / 383 // | 384 // ----- bottom of stack (low address) ----- 385 // ... 386 387 stack_size = stack_size / 2; 388 #endif 389 return stack_bottom + stack_size; 390 } 391 392 size_t os::current_stack_size() { 393 size_t sz; 394 MEMORY_BASIC_INFORMATION minfo; 395 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 396 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 397 return sz; 398 } 399 400 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 401 const struct tm* time_struct_ptr = localtime(clock); 402 if (time_struct_ptr != NULL) { 403 *res = *time_struct_ptr; 404 return res; 405 } 406 return NULL; 407 } 408 409 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 410 411 // Thread start routine for all new Java threads 412 static unsigned __stdcall java_start(Thread* thread) { 413 // Try to randomize the cache line index of hot stack frames. 414 // This helps when threads of the same stack traces evict each other's 415 // cache lines. The threads can be either from the same JVM instance, or 416 // from different JVM instances. The benefit is especially true for 417 // processors with hyperthreading technology. 418 static int counter = 0; 419 int pid = os::current_process_id(); 420 _alloca(((pid ^ counter++) & 7) * 128); 421 422 OSThread* osthr = thread->osthread(); 423 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 424 425 if (UseNUMA) { 426 int lgrp_id = os::numa_get_group_id(); 427 if (lgrp_id != -1) { 428 thread->set_lgrp_id(lgrp_id); 429 } 430 } 431 432 // Diagnostic code to investigate JDK-6573254 433 int res = 30115; // non-java thread 434 if (thread->is_Java_thread()) { 435 res = 20115; // java thread 436 } 437 438 // Install a win32 structured exception handler around every thread created 439 // by VM, so VM can generate error dump when an exception occurred in non- 440 // Java thread (e.g. VM thread). 441 __try { 442 thread->run(); 443 } __except(topLevelExceptionFilter( 444 (_EXCEPTION_POINTERS*)_exception_info())) { 445 // Nothing to do. 446 } 447 448 // One less thread is executing 449 // When the VMThread gets here, the main thread may have already exited 450 // which frees the CodeHeap containing the Atomic::add code 451 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 452 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 453 } 454 455 // Thread must not return from exit_process_or_thread(), but if it does, 456 // let it proceed to exit normally 457 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 458 } 459 460 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 461 int thread_id) { 462 // Allocate the OSThread object 463 OSThread* osthread = new OSThread(NULL, NULL); 464 if (osthread == NULL) return NULL; 465 466 // Initialize support for Java interrupts 467 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 468 if (interrupt_event == NULL) { 469 delete osthread; 470 return NULL; 471 } 472 osthread->set_interrupt_event(interrupt_event); 473 474 // Store info on the Win32 thread into the OSThread 475 osthread->set_thread_handle(thread_handle); 476 osthread->set_thread_id(thread_id); 477 478 if (UseNUMA) { 479 int lgrp_id = os::numa_get_group_id(); 480 if (lgrp_id != -1) { 481 thread->set_lgrp_id(lgrp_id); 482 } 483 } 484 485 // Initial thread state is INITIALIZED, not SUSPENDED 486 osthread->set_state(INITIALIZED); 487 488 return osthread; 489 } 490 491 492 bool os::create_attached_thread(JavaThread* thread) { 493 #ifdef ASSERT 494 thread->verify_not_published(); 495 #endif 496 HANDLE thread_h; 497 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 498 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 499 fatal("DuplicateHandle failed\n"); 500 } 501 OSThread* osthread = create_os_thread(thread, thread_h, 502 (int)current_thread_id()); 503 if (osthread == NULL) { 504 return false; 505 } 506 507 // Initial thread state is RUNNABLE 508 osthread->set_state(RUNNABLE); 509 510 thread->set_osthread(osthread); 511 return true; 512 } 513 514 bool os::create_main_thread(JavaThread* thread) { 515 #ifdef ASSERT 516 thread->verify_not_published(); 517 #endif 518 if (_starting_thread == NULL) { 519 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 520 if (_starting_thread == NULL) { 521 return false; 522 } 523 } 524 525 // The primordial thread is runnable from the start) 526 _starting_thread->set_state(RUNNABLE); 527 528 thread->set_osthread(_starting_thread); 529 return true; 530 } 531 532 // Allocate and initialize a new OSThread 533 bool os::create_thread(Thread* thread, ThreadType thr_type, 534 size_t stack_size) { 535 unsigned thread_id; 536 537 // Allocate the OSThread object 538 OSThread* osthread = new OSThread(NULL, NULL); 539 if (osthread == NULL) { 540 return false; 541 } 542 543 // Initialize support for Java interrupts 544 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 545 if (interrupt_event == NULL) { 546 delete osthread; 547 return NULL; 548 } 549 osthread->set_interrupt_event(interrupt_event); 550 osthread->set_interrupted(false); 551 552 thread->set_osthread(osthread); 553 554 if (stack_size == 0) { 555 switch (thr_type) { 556 case os::java_thread: 557 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 558 if (JavaThread::stack_size_at_create() > 0) { 559 stack_size = JavaThread::stack_size_at_create(); 560 } 561 break; 562 case os::compiler_thread: 563 if (CompilerThreadStackSize > 0) { 564 stack_size = (size_t)(CompilerThreadStackSize * K); 565 break; 566 } // else fall through: 567 // use VMThreadStackSize if CompilerThreadStackSize is not defined 568 case os::vm_thread: 569 case os::pgc_thread: 570 case os::cgc_thread: 571 case os::watcher_thread: 572 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 573 break; 574 } 575 } 576 577 // Create the Win32 thread 578 // 579 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 580 // does not specify stack size. Instead, it specifies the size of 581 // initially committed space. The stack size is determined by 582 // PE header in the executable. If the committed "stack_size" is larger 583 // than default value in the PE header, the stack is rounded up to the 584 // nearest multiple of 1MB. For example if the launcher has default 585 // stack size of 320k, specifying any size less than 320k does not 586 // affect the actual stack size at all, it only affects the initial 587 // commitment. On the other hand, specifying 'stack_size' larger than 588 // default value may cause significant increase in memory usage, because 589 // not only the stack space will be rounded up to MB, but also the 590 // entire space is committed upfront. 591 // 592 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 593 // for CreateThread() that can treat 'stack_size' as stack size. However we 594 // are not supposed to call CreateThread() directly according to MSDN 595 // document because JVM uses C runtime library. The good news is that the 596 // flag appears to work with _beginthredex() as well. 597 598 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 599 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 600 #endif 601 602 HANDLE thread_handle = 603 (HANDLE)_beginthreadex(NULL, 604 (unsigned)stack_size, 605 (unsigned (__stdcall *)(void*)) java_start, 606 thread, 607 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 608 &thread_id); 609 if (thread_handle == NULL) { 610 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 611 // without the flag. 612 thread_handle = 613 (HANDLE)_beginthreadex(NULL, 614 (unsigned)stack_size, 615 (unsigned (__stdcall *)(void*)) java_start, 616 thread, 617 CREATE_SUSPENDED, 618 &thread_id); 619 } 620 if (thread_handle == NULL) { 621 // Need to clean up stuff we've allocated so far 622 CloseHandle(osthread->interrupt_event()); 623 thread->set_osthread(NULL); 624 delete osthread; 625 return NULL; 626 } 627 628 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 629 630 // Store info on the Win32 thread into the OSThread 631 osthread->set_thread_handle(thread_handle); 632 osthread->set_thread_id(thread_id); 633 634 // Initial thread state is INITIALIZED, not SUSPENDED 635 osthread->set_state(INITIALIZED); 636 637 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 638 return true; 639 } 640 641 642 // Free Win32 resources related to the OSThread 643 void os::free_thread(OSThread* osthread) { 644 assert(osthread != NULL, "osthread not set"); 645 CloseHandle(osthread->thread_handle()); 646 CloseHandle(osthread->interrupt_event()); 647 delete osthread; 648 } 649 650 static jlong first_filetime; 651 static jlong initial_performance_count; 652 static jlong performance_frequency; 653 654 655 jlong as_long(LARGE_INTEGER x) { 656 jlong result = 0; // initialization to avoid warning 657 set_high(&result, x.HighPart); 658 set_low(&result, x.LowPart); 659 return result; 660 } 661 662 663 jlong os::elapsed_counter() { 664 LARGE_INTEGER count; 665 if (win32::_has_performance_count) { 666 QueryPerformanceCounter(&count); 667 return as_long(count) - initial_performance_count; 668 } else { 669 FILETIME wt; 670 GetSystemTimeAsFileTime(&wt); 671 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 672 } 673 } 674 675 676 jlong os::elapsed_frequency() { 677 if (win32::_has_performance_count) { 678 return performance_frequency; 679 } else { 680 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 681 return 10000000; 682 } 683 } 684 685 686 julong os::available_memory() { 687 return win32::available_memory(); 688 } 689 690 julong os::win32::available_memory() { 691 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 692 // value if total memory is larger than 4GB 693 MEMORYSTATUSEX ms; 694 ms.dwLength = sizeof(ms); 695 GlobalMemoryStatusEx(&ms); 696 697 return (julong)ms.ullAvailPhys; 698 } 699 700 julong os::physical_memory() { 701 return win32::physical_memory(); 702 } 703 704 bool os::has_allocatable_memory_limit(julong* limit) { 705 MEMORYSTATUSEX ms; 706 ms.dwLength = sizeof(ms); 707 GlobalMemoryStatusEx(&ms); 708 #ifdef _LP64 709 *limit = (julong)ms.ullAvailVirtual; 710 return true; 711 #else 712 // Limit to 1400m because of the 2gb address space wall 713 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 714 return true; 715 #endif 716 } 717 718 // VC6 lacks DWORD_PTR 719 #if _MSC_VER < 1300 720 typedef UINT_PTR DWORD_PTR; 721 #endif 722 723 int os::active_processor_count() { 724 DWORD_PTR lpProcessAffinityMask = 0; 725 DWORD_PTR lpSystemAffinityMask = 0; 726 int proc_count = processor_count(); 727 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 728 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 729 // Nof active processors is number of bits in process affinity mask 730 int bitcount = 0; 731 while (lpProcessAffinityMask != 0) { 732 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 733 bitcount++; 734 } 735 return bitcount; 736 } else { 737 return proc_count; 738 } 739 } 740 741 void os::set_native_thread_name(const char *name) { 742 743 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 744 // 745 // Note that unfortunately this only works if the process 746 // is already attached to a debugger; debugger must observe 747 // the exception below to show the correct name. 748 749 const DWORD MS_VC_EXCEPTION = 0x406D1388; 750 struct { 751 DWORD dwType; // must be 0x1000 752 LPCSTR szName; // pointer to name (in user addr space) 753 DWORD dwThreadID; // thread ID (-1=caller thread) 754 DWORD dwFlags; // reserved for future use, must be zero 755 } info; 756 757 info.dwType = 0x1000; 758 info.szName = name; 759 info.dwThreadID = -1; 760 info.dwFlags = 0; 761 762 __try { 763 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 764 } __except(EXCEPTION_CONTINUE_EXECUTION) {} 765 } 766 767 bool os::distribute_processes(uint length, uint* distribution) { 768 // Not yet implemented. 769 return false; 770 } 771 772 bool os::bind_to_processor(uint processor_id) { 773 // Not yet implemented. 774 return false; 775 } 776 777 void os::win32::initialize_performance_counter() { 778 LARGE_INTEGER count; 779 if (QueryPerformanceFrequency(&count)) { 780 win32::_has_performance_count = 1; 781 performance_frequency = as_long(count); 782 QueryPerformanceCounter(&count); 783 initial_performance_count = as_long(count); 784 } else { 785 win32::_has_performance_count = 0; 786 FILETIME wt; 787 GetSystemTimeAsFileTime(&wt); 788 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 789 } 790 } 791 792 793 double os::elapsedTime() { 794 return (double) elapsed_counter() / (double) elapsed_frequency(); 795 } 796 797 798 // Windows format: 799 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 800 // Java format: 801 // Java standards require the number of milliseconds since 1/1/1970 802 803 // Constant offset - calculated using offset() 804 static jlong _offset = 116444736000000000; 805 // Fake time counter for reproducible results when debugging 806 static jlong fake_time = 0; 807 808 #ifdef ASSERT 809 // Just to be safe, recalculate the offset in debug mode 810 static jlong _calculated_offset = 0; 811 static int _has_calculated_offset = 0; 812 813 jlong offset() { 814 if (_has_calculated_offset) return _calculated_offset; 815 SYSTEMTIME java_origin; 816 java_origin.wYear = 1970; 817 java_origin.wMonth = 1; 818 java_origin.wDayOfWeek = 0; // ignored 819 java_origin.wDay = 1; 820 java_origin.wHour = 0; 821 java_origin.wMinute = 0; 822 java_origin.wSecond = 0; 823 java_origin.wMilliseconds = 0; 824 FILETIME jot; 825 if (!SystemTimeToFileTime(&java_origin, &jot)) { 826 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 827 } 828 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 829 _has_calculated_offset = 1; 830 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 831 return _calculated_offset; 832 } 833 #else 834 jlong offset() { 835 return _offset; 836 } 837 #endif 838 839 jlong windows_to_java_time(FILETIME wt) { 840 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 841 return (a - offset()) / 10000; 842 } 843 844 // Returns time ticks in (10th of micro seconds) 845 jlong windows_to_time_ticks(FILETIME wt) { 846 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 847 return (a - offset()); 848 } 849 850 FILETIME java_to_windows_time(jlong l) { 851 jlong a = (l * 10000) + offset(); 852 FILETIME result; 853 result.dwHighDateTime = high(a); 854 result.dwLowDateTime = low(a); 855 return result; 856 } 857 858 bool os::supports_vtime() { return true; } 859 bool os::enable_vtime() { return false; } 860 bool os::vtime_enabled() { return false; } 861 862 double os::elapsedVTime() { 863 FILETIME created; 864 FILETIME exited; 865 FILETIME kernel; 866 FILETIME user; 867 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 868 // the resolution of windows_to_java_time() should be sufficient (ms) 869 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 870 } else { 871 return elapsedTime(); 872 } 873 } 874 875 jlong os::javaTimeMillis() { 876 if (UseFakeTimers) { 877 return fake_time++; 878 } else { 879 FILETIME wt; 880 GetSystemTimeAsFileTime(&wt); 881 return windows_to_java_time(wt); 882 } 883 } 884 885 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 886 FILETIME wt; 887 GetSystemTimeAsFileTime(&wt); 888 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 889 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 890 seconds = secs; 891 nanos = jlong(ticks - (secs*10000000)) * 100; 892 } 893 894 jlong os::javaTimeNanos() { 895 if (!win32::_has_performance_count) { 896 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 897 } else { 898 LARGE_INTEGER current_count; 899 QueryPerformanceCounter(¤t_count); 900 double current = as_long(current_count); 901 double freq = performance_frequency; 902 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 903 return time; 904 } 905 } 906 907 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 908 if (!win32::_has_performance_count) { 909 // javaTimeMillis() doesn't have much percision, 910 // but it is not going to wrap -- so all 64 bits 911 info_ptr->max_value = ALL_64_BITS; 912 913 // this is a wall clock timer, so may skip 914 info_ptr->may_skip_backward = true; 915 info_ptr->may_skip_forward = true; 916 } else { 917 jlong freq = performance_frequency; 918 if (freq < NANOSECS_PER_SEC) { 919 // the performance counter is 64 bits and we will 920 // be multiplying it -- so no wrap in 64 bits 921 info_ptr->max_value = ALL_64_BITS; 922 } else if (freq > NANOSECS_PER_SEC) { 923 // use the max value the counter can reach to 924 // determine the max value which could be returned 925 julong max_counter = (julong)ALL_64_BITS; 926 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 927 } else { 928 // the performance counter is 64 bits and we will 929 // be using it directly -- so no wrap in 64 bits 930 info_ptr->max_value = ALL_64_BITS; 931 } 932 933 // using a counter, so no skipping 934 info_ptr->may_skip_backward = false; 935 info_ptr->may_skip_forward = false; 936 } 937 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 938 } 939 940 char* os::local_time_string(char *buf, size_t buflen) { 941 SYSTEMTIME st; 942 GetLocalTime(&st); 943 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 944 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 945 return buf; 946 } 947 948 bool os::getTimesSecs(double* process_real_time, 949 double* process_user_time, 950 double* process_system_time) { 951 HANDLE h_process = GetCurrentProcess(); 952 FILETIME create_time, exit_time, kernel_time, user_time; 953 BOOL result = GetProcessTimes(h_process, 954 &create_time, 955 &exit_time, 956 &kernel_time, 957 &user_time); 958 if (result != 0) { 959 FILETIME wt; 960 GetSystemTimeAsFileTime(&wt); 961 jlong rtc_millis = windows_to_java_time(wt); 962 jlong user_millis = windows_to_java_time(user_time); 963 jlong system_millis = windows_to_java_time(kernel_time); 964 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 965 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 966 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 967 return true; 968 } else { 969 return false; 970 } 971 } 972 973 void os::shutdown() { 974 // allow PerfMemory to attempt cleanup of any persistent resources 975 perfMemory_exit(); 976 977 // flush buffered output, finish log files 978 ostream_abort(); 979 980 // Check for abort hook 981 abort_hook_t abort_hook = Arguments::abort_hook(); 982 if (abort_hook != NULL) { 983 abort_hook(); 984 } 985 } 986 987 988 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 989 PMINIDUMP_EXCEPTION_INFORMATION, 990 PMINIDUMP_USER_STREAM_INFORMATION, 991 PMINIDUMP_CALLBACK_INFORMATION); 992 993 static HANDLE dumpFile = NULL; 994 995 // Check if dump file can be created. 996 void os::check_dump_limit(char* buffer, size_t buffsz) { 997 bool status = true; 998 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 999 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 1000 status = false; 1001 } else { 1002 const char* cwd = get_current_directory(NULL, 0); 1003 int pid = current_process_id(); 1004 if (cwd != NULL) { 1005 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1006 } else { 1007 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1008 } 1009 1010 if (dumpFile == NULL && 1011 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1012 == INVALID_HANDLE_VALUE) { 1013 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1014 status = false; 1015 } 1016 } 1017 VMError::record_coredump_status(buffer, status); 1018 } 1019 1020 void os::abort(bool dump_core, void* siginfo, void* context) { 1021 HINSTANCE dbghelp; 1022 EXCEPTION_POINTERS ep; 1023 MINIDUMP_EXCEPTION_INFORMATION mei; 1024 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1025 1026 HANDLE hProcess = GetCurrentProcess(); 1027 DWORD processId = GetCurrentProcessId(); 1028 MINIDUMP_TYPE dumpType; 1029 1030 shutdown(); 1031 if (!dump_core || dumpFile == NULL) { 1032 if (dumpFile != NULL) { 1033 CloseHandle(dumpFile); 1034 } 1035 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1036 } 1037 1038 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 1039 1040 if (dbghelp == NULL) { 1041 jio_fprintf(stderr, "Failed to load dbghelp.dll\n"); 1042 CloseHandle(dumpFile); 1043 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1044 } 1045 1046 _MiniDumpWriteDump = 1047 CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 1048 PMINIDUMP_EXCEPTION_INFORMATION, 1049 PMINIDUMP_USER_STREAM_INFORMATION, 1050 PMINIDUMP_CALLBACK_INFORMATION), 1051 GetProcAddress(dbghelp, 1052 "MiniDumpWriteDump")); 1053 1054 if (_MiniDumpWriteDump == NULL) { 1055 jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n"); 1056 CloseHandle(dumpFile); 1057 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1058 } 1059 1060 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1061 1062 // Older versions of dbghelp.h do not contain all the dumptypes we want, dbghelp.h with 1063 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1064 #if API_VERSION_NUMBER >= 11 1065 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1066 MiniDumpWithUnloadedModules); 1067 #endif 1068 1069 if (siginfo != NULL && context != NULL) { 1070 ep.ContextRecord = (PCONTEXT) context; 1071 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1072 1073 mei.ThreadId = GetCurrentThreadId(); 1074 mei.ExceptionPointers = &ep; 1075 pmei = &mei; 1076 } else { 1077 pmei = NULL; 1078 } 1079 1080 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1081 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1082 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1083 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1084 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1085 } 1086 CloseHandle(dumpFile); 1087 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1088 } 1089 1090 void os::abort(bool dump_core) { 1091 abort(dump_core, NULL, NULL); 1092 } 1093 1094 // Die immediately, no exit hook, no abort hook, no cleanup. 1095 void os::die() { 1096 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1097 } 1098 1099 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1100 // * dirent_md.c 1.15 00/02/02 1101 // 1102 // The declarations for DIR and struct dirent are in jvm_win32.h. 1103 1104 // Caller must have already run dirname through JVM_NativePath, which removes 1105 // duplicate slashes and converts all instances of '/' into '\\'. 1106 1107 DIR * os::opendir(const char *dirname) { 1108 assert(dirname != NULL, "just checking"); // hotspot change 1109 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1110 DWORD fattr; // hotspot change 1111 char alt_dirname[4] = { 0, 0, 0, 0 }; 1112 1113 if (dirp == 0) { 1114 errno = ENOMEM; 1115 return 0; 1116 } 1117 1118 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1119 // as a directory in FindFirstFile(). We detect this case here and 1120 // prepend the current drive name. 1121 // 1122 if (dirname[1] == '\0' && dirname[0] == '\\') { 1123 alt_dirname[0] = _getdrive() + 'A' - 1; 1124 alt_dirname[1] = ':'; 1125 alt_dirname[2] = '\\'; 1126 alt_dirname[3] = '\0'; 1127 dirname = alt_dirname; 1128 } 1129 1130 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1131 if (dirp->path == 0) { 1132 free(dirp); 1133 errno = ENOMEM; 1134 return 0; 1135 } 1136 strcpy(dirp->path, dirname); 1137 1138 fattr = GetFileAttributes(dirp->path); 1139 if (fattr == 0xffffffff) { 1140 free(dirp->path); 1141 free(dirp); 1142 errno = ENOENT; 1143 return 0; 1144 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1145 free(dirp->path); 1146 free(dirp); 1147 errno = ENOTDIR; 1148 return 0; 1149 } 1150 1151 // Append "*.*", or possibly "\\*.*", to path 1152 if (dirp->path[1] == ':' && 1153 (dirp->path[2] == '\0' || 1154 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1155 // No '\\' needed for cases like "Z:" or "Z:\" 1156 strcat(dirp->path, "*.*"); 1157 } else { 1158 strcat(dirp->path, "\\*.*"); 1159 } 1160 1161 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1162 if (dirp->handle == INVALID_HANDLE_VALUE) { 1163 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1164 free(dirp->path); 1165 free(dirp); 1166 errno = EACCES; 1167 return 0; 1168 } 1169 } 1170 return dirp; 1171 } 1172 1173 // parameter dbuf unused on Windows 1174 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1175 assert(dirp != NULL, "just checking"); // hotspot change 1176 if (dirp->handle == INVALID_HANDLE_VALUE) { 1177 return 0; 1178 } 1179 1180 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1181 1182 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1183 if (GetLastError() == ERROR_INVALID_HANDLE) { 1184 errno = EBADF; 1185 return 0; 1186 } 1187 FindClose(dirp->handle); 1188 dirp->handle = INVALID_HANDLE_VALUE; 1189 } 1190 1191 return &dirp->dirent; 1192 } 1193 1194 int os::closedir(DIR *dirp) { 1195 assert(dirp != NULL, "just checking"); // hotspot change 1196 if (dirp->handle != INVALID_HANDLE_VALUE) { 1197 if (!FindClose(dirp->handle)) { 1198 errno = EBADF; 1199 return -1; 1200 } 1201 dirp->handle = INVALID_HANDLE_VALUE; 1202 } 1203 free(dirp->path); 1204 free(dirp); 1205 return 0; 1206 } 1207 1208 // This must be hard coded because it's the system's temporary 1209 // directory not the java application's temp directory, ala java.io.tmpdir. 1210 const char* os::get_temp_directory() { 1211 static char path_buf[MAX_PATH]; 1212 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1213 return path_buf; 1214 } else { 1215 path_buf[0] = '\0'; 1216 return path_buf; 1217 } 1218 } 1219 1220 static bool file_exists(const char* filename) { 1221 if (filename == NULL || strlen(filename) == 0) { 1222 return false; 1223 } 1224 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1225 } 1226 1227 bool os::dll_build_name(char *buffer, size_t buflen, 1228 const char* pname, const char* fname) { 1229 bool retval = false; 1230 const size_t pnamelen = pname ? strlen(pname) : 0; 1231 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1232 1233 // Return error on buffer overflow. 1234 if (pnamelen + strlen(fname) + 10 > buflen) { 1235 return retval; 1236 } 1237 1238 if (pnamelen == 0) { 1239 jio_snprintf(buffer, buflen, "%s.dll", fname); 1240 retval = true; 1241 } else if (c == ':' || c == '\\') { 1242 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1243 retval = true; 1244 } else if (strchr(pname, *os::path_separator()) != NULL) { 1245 int n; 1246 char** pelements = split_path(pname, &n); 1247 if (pelements == NULL) { 1248 return false; 1249 } 1250 for (int i = 0; i < n; i++) { 1251 char* path = pelements[i]; 1252 // Really shouldn't be NULL, but check can't hurt 1253 size_t plen = (path == NULL) ? 0 : strlen(path); 1254 if (plen == 0) { 1255 continue; // skip the empty path values 1256 } 1257 const char lastchar = path[plen - 1]; 1258 if (lastchar == ':' || lastchar == '\\') { 1259 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1260 } else { 1261 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1262 } 1263 if (file_exists(buffer)) { 1264 retval = true; 1265 break; 1266 } 1267 } 1268 // release the storage 1269 for (int i = 0; i < n; i++) { 1270 if (pelements[i] != NULL) { 1271 FREE_C_HEAP_ARRAY(char, pelements[i]); 1272 } 1273 } 1274 if (pelements != NULL) { 1275 FREE_C_HEAP_ARRAY(char*, pelements); 1276 } 1277 } else { 1278 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1279 retval = true; 1280 } 1281 return retval; 1282 } 1283 1284 // Needs to be in os specific directory because windows requires another 1285 // header file <direct.h> 1286 const char* os::get_current_directory(char *buf, size_t buflen) { 1287 int n = static_cast<int>(buflen); 1288 if (buflen > INT_MAX) n = INT_MAX; 1289 return _getcwd(buf, n); 1290 } 1291 1292 //----------------------------------------------------------- 1293 // Helper functions for fatal error handler 1294 #ifdef _WIN64 1295 // Helper routine which returns true if address in 1296 // within the NTDLL address space. 1297 // 1298 static bool _addr_in_ntdll(address addr) { 1299 HMODULE hmod; 1300 MODULEINFO minfo; 1301 1302 hmod = GetModuleHandle("NTDLL.DLL"); 1303 if (hmod == NULL) return false; 1304 if (!os::PSApiDll::GetModuleInformation(GetCurrentProcess(), hmod, 1305 &minfo, sizeof(MODULEINFO))) { 1306 return false; 1307 } 1308 1309 if ((addr >= minfo.lpBaseOfDll) && 1310 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1311 return true; 1312 } else { 1313 return false; 1314 } 1315 } 1316 #endif 1317 1318 struct _modinfo { 1319 address addr; 1320 char* full_path; // point to a char buffer 1321 int buflen; // size of the buffer 1322 address base_addr; 1323 }; 1324 1325 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1326 address top_address, void * param) { 1327 struct _modinfo *pmod = (struct _modinfo *)param; 1328 if (!pmod) return -1; 1329 1330 if (base_addr <= pmod->addr && 1331 top_address > pmod->addr) { 1332 // if a buffer is provided, copy path name to the buffer 1333 if (pmod->full_path) { 1334 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1335 } 1336 pmod->base_addr = base_addr; 1337 return 1; 1338 } 1339 return 0; 1340 } 1341 1342 bool os::dll_address_to_library_name(address addr, char* buf, 1343 int buflen, int* offset) { 1344 // buf is not optional, but offset is optional 1345 assert(buf != NULL, "sanity check"); 1346 1347 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1348 // return the full path to the DLL file, sometimes it returns path 1349 // to the corresponding PDB file (debug info); sometimes it only 1350 // returns partial path, which makes life painful. 1351 1352 struct _modinfo mi; 1353 mi.addr = addr; 1354 mi.full_path = buf; 1355 mi.buflen = buflen; 1356 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1357 // buf already contains path name 1358 if (offset) *offset = addr - mi.base_addr; 1359 return true; 1360 } 1361 1362 buf[0] = '\0'; 1363 if (offset) *offset = -1; 1364 return false; 1365 } 1366 1367 bool os::dll_address_to_function_name(address addr, char *buf, 1368 int buflen, int *offset) { 1369 // buf is not optional, but offset is optional 1370 assert(buf != NULL, "sanity check"); 1371 1372 if (Decoder::decode(addr, buf, buflen, offset)) { 1373 return true; 1374 } 1375 if (offset != NULL) *offset = -1; 1376 buf[0] = '\0'; 1377 return false; 1378 } 1379 1380 // save the start and end address of jvm.dll into param[0] and param[1] 1381 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1382 address top_address, void * param) { 1383 if (!param) return -1; 1384 1385 if (base_addr <= (address)_locate_jvm_dll && 1386 top_address > (address)_locate_jvm_dll) { 1387 ((address*)param)[0] = base_addr; 1388 ((address*)param)[1] = top_address; 1389 return 1; 1390 } 1391 return 0; 1392 } 1393 1394 address vm_lib_location[2]; // start and end address of jvm.dll 1395 1396 // check if addr is inside jvm.dll 1397 bool os::address_is_in_vm(address addr) { 1398 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1399 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1400 assert(false, "Can't find jvm module."); 1401 return false; 1402 } 1403 } 1404 1405 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1406 } 1407 1408 // print module info; param is outputStream* 1409 static int _print_module(const char* fname, address base_address, 1410 address top_address, void* param) { 1411 if (!param) return -1; 1412 1413 outputStream* st = (outputStream*)param; 1414 1415 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1416 return 0; 1417 } 1418 1419 // Loads .dll/.so and 1420 // in case of error it checks if .dll/.so was built for the 1421 // same architecture as Hotspot is running on 1422 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1423 void * result = LoadLibrary(name); 1424 if (result != NULL) { 1425 return result; 1426 } 1427 1428 DWORD errcode = GetLastError(); 1429 if (errcode == ERROR_MOD_NOT_FOUND) { 1430 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1431 ebuf[ebuflen - 1] = '\0'; 1432 return NULL; 1433 } 1434 1435 // Parsing dll below 1436 // If we can read dll-info and find that dll was built 1437 // for an architecture other than Hotspot is running in 1438 // - then print to buffer "DLL was built for a different architecture" 1439 // else call os::lasterror to obtain system error message 1440 1441 // Read system error message into ebuf 1442 // It may or may not be overwritten below (in the for loop and just above) 1443 lasterror(ebuf, (size_t) ebuflen); 1444 ebuf[ebuflen - 1] = '\0'; 1445 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1446 if (fd < 0) { 1447 return NULL; 1448 } 1449 1450 uint32_t signature_offset; 1451 uint16_t lib_arch = 0; 1452 bool failed_to_get_lib_arch = 1453 ( // Go to position 3c in the dll 1454 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1455 || 1456 // Read location of signature 1457 (sizeof(signature_offset) != 1458 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1459 || 1460 // Go to COFF File Header in dll 1461 // that is located after "signature" (4 bytes long) 1462 (os::seek_to_file_offset(fd, 1463 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1464 || 1465 // Read field that contains code of architecture 1466 // that dll was built for 1467 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1468 ); 1469 1470 ::close(fd); 1471 if (failed_to_get_lib_arch) { 1472 // file i/o error - report os::lasterror(...) msg 1473 return NULL; 1474 } 1475 1476 typedef struct { 1477 uint16_t arch_code; 1478 char* arch_name; 1479 } arch_t; 1480 1481 static const arch_t arch_array[] = { 1482 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1483 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1484 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1485 }; 1486 #if (defined _M_IA64) 1487 static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64; 1488 #elif (defined _M_AMD64) 1489 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1490 #elif (defined _M_IX86) 1491 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1492 #else 1493 #error Method os::dll_load requires that one of following \ 1494 is defined :_M_IA64,_M_AMD64 or _M_IX86 1495 #endif 1496 1497 1498 // Obtain a string for printf operation 1499 // lib_arch_str shall contain string what platform this .dll was built for 1500 // running_arch_str shall string contain what platform Hotspot was built for 1501 char *running_arch_str = NULL, *lib_arch_str = NULL; 1502 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1503 if (lib_arch == arch_array[i].arch_code) { 1504 lib_arch_str = arch_array[i].arch_name; 1505 } 1506 if (running_arch == arch_array[i].arch_code) { 1507 running_arch_str = arch_array[i].arch_name; 1508 } 1509 } 1510 1511 assert(running_arch_str, 1512 "Didn't find running architecture code in arch_array"); 1513 1514 // If the architecture is right 1515 // but some other error took place - report os::lasterror(...) msg 1516 if (lib_arch == running_arch) { 1517 return NULL; 1518 } 1519 1520 if (lib_arch_str != NULL) { 1521 ::_snprintf(ebuf, ebuflen - 1, 1522 "Can't load %s-bit .dll on a %s-bit platform", 1523 lib_arch_str, running_arch_str); 1524 } else { 1525 // don't know what architecture this dll was build for 1526 ::_snprintf(ebuf, ebuflen - 1, 1527 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1528 lib_arch, running_arch_str); 1529 } 1530 1531 return NULL; 1532 } 1533 1534 void os::print_dll_info(outputStream *st) { 1535 st->print_cr("Dynamic libraries:"); 1536 get_loaded_modules_info(_print_module, (void *)st); 1537 } 1538 1539 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1540 HANDLE hProcess; 1541 1542 # define MAX_NUM_MODULES 128 1543 HMODULE modules[MAX_NUM_MODULES]; 1544 static char filename[MAX_PATH]; 1545 int result = 0; 1546 1547 if (!os::PSApiDll::PSApiAvailable()) { 1548 return 0; 1549 } 1550 1551 int pid = os::current_process_id(); 1552 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1553 FALSE, pid); 1554 if (hProcess == NULL) return 0; 1555 1556 DWORD size_needed; 1557 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1558 sizeof(modules), &size_needed)) { 1559 CloseHandle(hProcess); 1560 return 0; 1561 } 1562 1563 // number of modules that are currently loaded 1564 int num_modules = size_needed / sizeof(HMODULE); 1565 1566 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1567 // Get Full pathname: 1568 if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1569 filename, sizeof(filename))) { 1570 filename[0] = '\0'; 1571 } 1572 1573 MODULEINFO modinfo; 1574 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1575 &modinfo, sizeof(modinfo))) { 1576 modinfo.lpBaseOfDll = NULL; 1577 modinfo.SizeOfImage = 0; 1578 } 1579 1580 // Invoke callback function 1581 result = callback(filename, (address)modinfo.lpBaseOfDll, 1582 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1583 if (result) break; 1584 } 1585 1586 CloseHandle(hProcess); 1587 return result; 1588 } 1589 1590 void os::print_os_info_brief(outputStream* st) { 1591 os::print_os_info(st); 1592 } 1593 1594 void os::print_os_info(outputStream* st) { 1595 #ifdef ASSERT 1596 char buffer[1024]; 1597 DWORD size = sizeof(buffer); 1598 st->print(" HostName: "); 1599 if (GetComputerNameEx(ComputerNameDnsHostname, buffer, &size)) { 1600 st->print("%s", buffer); 1601 } else { 1602 st->print("N/A"); 1603 } 1604 #endif 1605 st->print(" OS:"); 1606 os::win32::print_windows_version(st); 1607 } 1608 1609 void os::win32::print_windows_version(outputStream* st) { 1610 OSVERSIONINFOEX osvi; 1611 VS_FIXEDFILEINFO *file_info; 1612 TCHAR kernel32_path[MAX_PATH]; 1613 UINT len, ret; 1614 1615 // Use the GetVersionEx information to see if we're on a server or 1616 // workstation edition of Windows. Starting with Windows 8.1 we can't 1617 // trust the OS version information returned by this API. 1618 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1619 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1620 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1621 st->print_cr("Call to GetVersionEx failed"); 1622 return; 1623 } 1624 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1625 1626 // Get the full path to \Windows\System32\kernel32.dll and use that for 1627 // determining what version of Windows we're running on. 1628 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1629 ret = GetSystemDirectory(kernel32_path, len); 1630 if (ret == 0 || ret > len) { 1631 st->print_cr("Call to GetSystemDirectory failed"); 1632 return; 1633 } 1634 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1635 1636 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1637 if (version_size == 0) { 1638 st->print_cr("Call to GetFileVersionInfoSize failed"); 1639 return; 1640 } 1641 1642 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1643 if (version_info == NULL) { 1644 st->print_cr("Failed to allocate version_info"); 1645 return; 1646 } 1647 1648 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1649 os::free(version_info); 1650 st->print_cr("Call to GetFileVersionInfo failed"); 1651 return; 1652 } 1653 1654 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1655 os::free(version_info); 1656 st->print_cr("Call to VerQueryValue failed"); 1657 return; 1658 } 1659 1660 int major_version = HIWORD(file_info->dwProductVersionMS); 1661 int minor_version = LOWORD(file_info->dwProductVersionMS); 1662 int build_number = HIWORD(file_info->dwProductVersionLS); 1663 int build_minor = LOWORD(file_info->dwProductVersionLS); 1664 int os_vers = major_version * 1000 + minor_version; 1665 os::free(version_info); 1666 1667 st->print(" Windows "); 1668 switch (os_vers) { 1669 1670 case 6000: 1671 if (is_workstation) { 1672 st->print("Vista"); 1673 } else { 1674 st->print("Server 2008"); 1675 } 1676 break; 1677 1678 case 6001: 1679 if (is_workstation) { 1680 st->print("7"); 1681 } else { 1682 st->print("Server 2008 R2"); 1683 } 1684 break; 1685 1686 case 6002: 1687 if (is_workstation) { 1688 st->print("8"); 1689 } else { 1690 st->print("Server 2012"); 1691 } 1692 break; 1693 1694 case 6003: 1695 if (is_workstation) { 1696 st->print("8.1"); 1697 } else { 1698 st->print("Server 2012 R2"); 1699 } 1700 break; 1701 1702 case 10000: 1703 if (is_workstation) { 1704 st->print("10"); 1705 } else { 1706 // The server version name of Windows 10 is not known at this time 1707 st->print("%d.%d", major_version, minor_version); 1708 } 1709 break; 1710 1711 default: 1712 // Unrecognized windows, print out its major and minor versions 1713 st->print("%d.%d", major_version, minor_version); 1714 break; 1715 } 1716 1717 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1718 // find out whether we are running on 64 bit processor or not 1719 SYSTEM_INFO si; 1720 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1721 os::Kernel32Dll::GetNativeSystemInfo(&si); 1722 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1723 st->print(" , 64 bit"); 1724 } 1725 1726 st->print(" Build %d", build_number); 1727 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1728 st->cr(); 1729 } 1730 1731 void os::pd_print_cpu_info(outputStream* st) { 1732 // Nothing to do for now. 1733 } 1734 1735 void os::print_memory_info(outputStream* st) { 1736 st->print("Memory:"); 1737 st->print(" %dk page", os::vm_page_size()>>10); 1738 1739 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1740 // value if total memory is larger than 4GB 1741 MEMORYSTATUSEX ms; 1742 ms.dwLength = sizeof(ms); 1743 GlobalMemoryStatusEx(&ms); 1744 1745 st->print(", physical %uk", os::physical_memory() >> 10); 1746 st->print("(%uk free)", os::available_memory() >> 10); 1747 1748 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1749 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1750 st->cr(); 1751 } 1752 1753 void os::print_siginfo(outputStream *st, void *siginfo) { 1754 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1755 st->print("siginfo:"); 1756 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1757 1758 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1759 er->NumberParameters >= 2) { 1760 switch (er->ExceptionInformation[0]) { 1761 case 0: st->print(", reading address"); break; 1762 case 1: st->print(", writing address"); break; 1763 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1764 er->ExceptionInformation[0]); 1765 } 1766 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1767 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1768 er->NumberParameters >= 2 && UseSharedSpaces) { 1769 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1770 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1771 st->print("\n\nError accessing class data sharing archive." \ 1772 " Mapped file inaccessible during execution, " \ 1773 " possible disk/network problem."); 1774 } 1775 } else { 1776 int num = er->NumberParameters; 1777 if (num > 0) { 1778 st->print(", ExceptionInformation="); 1779 for (int i = 0; i < num; i++) { 1780 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1781 } 1782 } 1783 } 1784 st->cr(); 1785 } 1786 1787 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1788 // do nothing 1789 } 1790 1791 static char saved_jvm_path[MAX_PATH] = {0}; 1792 1793 // Find the full path to the current module, jvm.dll 1794 void os::jvm_path(char *buf, jint buflen) { 1795 // Error checking. 1796 if (buflen < MAX_PATH) { 1797 assert(false, "must use a large-enough buffer"); 1798 buf[0] = '\0'; 1799 return; 1800 } 1801 // Lazy resolve the path to current module. 1802 if (saved_jvm_path[0] != 0) { 1803 strcpy(buf, saved_jvm_path); 1804 return; 1805 } 1806 1807 buf[0] = '\0'; 1808 if (Arguments::sun_java_launcher_is_altjvm()) { 1809 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1810 // for a JAVA_HOME environment variable and fix up the path so it 1811 // looks like jvm.dll is installed there (append a fake suffix 1812 // hotspot/jvm.dll). 1813 char* java_home_var = ::getenv("JAVA_HOME"); 1814 if (java_home_var != NULL && java_home_var[0] != 0 && 1815 strlen(java_home_var) < (size_t)buflen) { 1816 strncpy(buf, java_home_var, buflen); 1817 1818 // determine if this is a legacy image or modules image 1819 // modules image doesn't have "jre" subdirectory 1820 size_t len = strlen(buf); 1821 char* jrebin_p = buf + len; 1822 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1823 if (0 != _access(buf, 0)) { 1824 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1825 } 1826 len = strlen(buf); 1827 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1828 } 1829 } 1830 1831 if (buf[0] == '\0') { 1832 GetModuleFileName(vm_lib_handle, buf, buflen); 1833 } 1834 strncpy(saved_jvm_path, buf, MAX_PATH); 1835 saved_jvm_path[MAX_PATH - 1] = '\0'; 1836 } 1837 1838 1839 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1840 #ifndef _WIN64 1841 st->print("_"); 1842 #endif 1843 } 1844 1845 1846 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1847 #ifndef _WIN64 1848 st->print("@%d", args_size * sizeof(int)); 1849 #endif 1850 } 1851 1852 // This method is a copy of JDK's sysGetLastErrorString 1853 // from src/windows/hpi/src/system_md.c 1854 1855 size_t os::lasterror(char* buf, size_t len) { 1856 DWORD errval; 1857 1858 if ((errval = GetLastError()) != 0) { 1859 // DOS error 1860 size_t n = (size_t)FormatMessage( 1861 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1862 NULL, 1863 errval, 1864 0, 1865 buf, 1866 (DWORD)len, 1867 NULL); 1868 if (n > 3) { 1869 // Drop final '.', CR, LF 1870 if (buf[n - 1] == '\n') n--; 1871 if (buf[n - 1] == '\r') n--; 1872 if (buf[n - 1] == '.') n--; 1873 buf[n] = '\0'; 1874 } 1875 return n; 1876 } 1877 1878 if (errno != 0) { 1879 // C runtime error that has no corresponding DOS error code 1880 const char* s = strerror(errno); 1881 size_t n = strlen(s); 1882 if (n >= len) n = len - 1; 1883 strncpy(buf, s, n); 1884 buf[n] = '\0'; 1885 return n; 1886 } 1887 1888 return 0; 1889 } 1890 1891 int os::get_last_error() { 1892 DWORD error = GetLastError(); 1893 if (error == 0) { 1894 error = errno; 1895 } 1896 return (int)error; 1897 } 1898 1899 Semaphore::Semaphore(uint value, uint max) { 1900 _semaphore = ::CreateSemaphore(NULL, value, max, NULL); 1901 1902 guarantee(_semaphore != NULL, err_msg("CreateSemaphore failed: %ld", GetLastError())); 1903 } 1904 1905 Semaphore::~Semaphore() { 1906 if (_semaphore != NULL) { 1907 ::CloseHandle(_semaphore); 1908 } 1909 } 1910 1911 void Semaphore::signal(uint count) { 1912 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1913 1914 guarantee(ret != 0, err_msg("ReleaseSemaphore failed: %d", GetLastError())); 1915 } 1916 1917 void Semaphore::signal() { 1918 signal(1); 1919 } 1920 1921 void Semaphore::wait() { 1922 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1923 guarantee(ret == WAIT_OBJECT_0, err_msg("WaitForSingleObject failed: %d", GetLastError())); 1924 } 1925 1926 bool Semaphore::trywait() { 1927 Unimplemented(); 1928 return false; 1929 } 1930 1931 bool Semaphore::timedwait(unsigned int sec, int nsec) { 1932 Unimplemented(); 1933 return false; 1934 } 1935 1936 // sun.misc.Signal 1937 // NOTE that this is a workaround for an apparent kernel bug where if 1938 // a signal handler for SIGBREAK is installed then that signal handler 1939 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1940 // See bug 4416763. 1941 static void (*sigbreakHandler)(int) = NULL; 1942 1943 static void UserHandler(int sig, void *siginfo, void *context) { 1944 os::signal_notify(sig); 1945 // We need to reinstate the signal handler each time... 1946 os::signal(sig, (void*)UserHandler); 1947 } 1948 1949 void* os::user_handler() { 1950 return (void*) UserHandler; 1951 } 1952 1953 void* os::signal(int signal_number, void* handler) { 1954 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1955 void (*oldHandler)(int) = sigbreakHandler; 1956 sigbreakHandler = (void (*)(int)) handler; 1957 return (void*) oldHandler; 1958 } else { 1959 return (void*)::signal(signal_number, (void (*)(int))handler); 1960 } 1961 } 1962 1963 void os::signal_raise(int signal_number) { 1964 raise(signal_number); 1965 } 1966 1967 // The Win32 C runtime library maps all console control events other than ^C 1968 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1969 // logoff, and shutdown events. We therefore install our own console handler 1970 // that raises SIGTERM for the latter cases. 1971 // 1972 static BOOL WINAPI consoleHandler(DWORD event) { 1973 switch (event) { 1974 case CTRL_C_EVENT: 1975 if (is_error_reported()) { 1976 // Ctrl-C is pressed during error reporting, likely because the error 1977 // handler fails to abort. Let VM die immediately. 1978 os::die(); 1979 } 1980 1981 os::signal_raise(SIGINT); 1982 return TRUE; 1983 break; 1984 case CTRL_BREAK_EVENT: 1985 if (sigbreakHandler != NULL) { 1986 (*sigbreakHandler)(SIGBREAK); 1987 } 1988 return TRUE; 1989 break; 1990 case CTRL_LOGOFF_EVENT: { 1991 // Don't terminate JVM if it is running in a non-interactive session, 1992 // such as a service process. 1993 USEROBJECTFLAGS flags; 1994 HANDLE handle = GetProcessWindowStation(); 1995 if (handle != NULL && 1996 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1997 sizeof(USEROBJECTFLAGS), NULL)) { 1998 // If it is a non-interactive session, let next handler to deal 1999 // with it. 2000 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 2001 return FALSE; 2002 } 2003 } 2004 } 2005 case CTRL_CLOSE_EVENT: 2006 case CTRL_SHUTDOWN_EVENT: 2007 os::signal_raise(SIGTERM); 2008 return TRUE; 2009 break; 2010 default: 2011 break; 2012 } 2013 return FALSE; 2014 } 2015 2016 // The following code is moved from os.cpp for making this 2017 // code platform specific, which it is by its very nature. 2018 2019 // Return maximum OS signal used + 1 for internal use only 2020 // Used as exit signal for signal_thread 2021 int os::sigexitnum_pd() { 2022 return NSIG; 2023 } 2024 2025 // a counter for each possible signal value, including signal_thread exit signal 2026 static volatile jint pending_signals[NSIG+1] = { 0 }; 2027 static HANDLE sig_sem = NULL; 2028 2029 void os::signal_init_pd() { 2030 // Initialize signal structures 2031 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2032 2033 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2034 2035 // Programs embedding the VM do not want it to attempt to receive 2036 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2037 // shutdown hooks mechanism introduced in 1.3. For example, when 2038 // the VM is run as part of a Windows NT service (i.e., a servlet 2039 // engine in a web server), the correct behavior is for any console 2040 // control handler to return FALSE, not TRUE, because the OS's 2041 // "final" handler for such events allows the process to continue if 2042 // it is a service (while terminating it if it is not a service). 2043 // To make this behavior uniform and the mechanism simpler, we 2044 // completely disable the VM's usage of these console events if -Xrs 2045 // (=ReduceSignalUsage) is specified. This means, for example, that 2046 // the CTRL-BREAK thread dump mechanism is also disabled in this 2047 // case. See bugs 4323062, 4345157, and related bugs. 2048 2049 if (!ReduceSignalUsage) { 2050 // Add a CTRL-C handler 2051 SetConsoleCtrlHandler(consoleHandler, TRUE); 2052 } 2053 } 2054 2055 void os::signal_notify(int signal_number) { 2056 BOOL ret; 2057 if (sig_sem != NULL) { 2058 Atomic::inc(&pending_signals[signal_number]); 2059 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2060 assert(ret != 0, "ReleaseSemaphore() failed"); 2061 } 2062 } 2063 2064 static int check_pending_signals(bool wait_for_signal) { 2065 DWORD ret; 2066 while (true) { 2067 for (int i = 0; i < NSIG + 1; i++) { 2068 jint n = pending_signals[i]; 2069 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2070 return i; 2071 } 2072 } 2073 if (!wait_for_signal) { 2074 return -1; 2075 } 2076 2077 JavaThread *thread = JavaThread::current(); 2078 2079 ThreadBlockInVM tbivm(thread); 2080 2081 bool threadIsSuspended; 2082 do { 2083 thread->set_suspend_equivalent(); 2084 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2085 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2086 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2087 2088 // were we externally suspended while we were waiting? 2089 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2090 if (threadIsSuspended) { 2091 // The semaphore has been incremented, but while we were waiting 2092 // another thread suspended us. We don't want to continue running 2093 // while suspended because that would surprise the thread that 2094 // suspended us. 2095 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2096 assert(ret != 0, "ReleaseSemaphore() failed"); 2097 2098 thread->java_suspend_self(); 2099 } 2100 } while (threadIsSuspended); 2101 } 2102 } 2103 2104 int os::signal_lookup() { 2105 return check_pending_signals(false); 2106 } 2107 2108 int os::signal_wait() { 2109 return check_pending_signals(true); 2110 } 2111 2112 // Implicit OS exception handling 2113 2114 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2115 address handler) { 2116 JavaThread* thread = JavaThread::current(); 2117 // Save pc in thread 2118 #ifdef _M_IA64 2119 // Do not blow up if no thread info available. 2120 if (thread) { 2121 // Saving PRECISE pc (with slot information) in thread. 2122 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2123 // Convert precise PC into "Unix" format 2124 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2125 thread->set_saved_exception_pc((address)precise_pc); 2126 } 2127 // Set pc to handler 2128 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2129 // Clear out psr.ri (= Restart Instruction) in order to continue 2130 // at the beginning of the target bundle. 2131 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2132 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2133 #else 2134 #ifdef _M_AMD64 2135 // Do not blow up if no thread info available. 2136 if (thread) { 2137 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2138 } 2139 // Set pc to handler 2140 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2141 #else 2142 // Do not blow up if no thread info available. 2143 if (thread) { 2144 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2145 } 2146 // Set pc to handler 2147 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2148 #endif 2149 #endif 2150 2151 // Continue the execution 2152 return EXCEPTION_CONTINUE_EXECUTION; 2153 } 2154 2155 2156 // Used for PostMortemDump 2157 extern "C" void safepoints(); 2158 extern "C" void find(int x); 2159 extern "C" void events(); 2160 2161 // According to Windows API documentation, an illegal instruction sequence should generate 2162 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2163 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2164 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2165 2166 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2167 2168 // From "Execution Protection in the Windows Operating System" draft 0.35 2169 // Once a system header becomes available, the "real" define should be 2170 // included or copied here. 2171 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2172 2173 // Handle NAT Bit consumption on IA64. 2174 #ifdef _M_IA64 2175 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2176 #endif 2177 2178 // Windows Vista/2008 heap corruption check 2179 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2180 2181 #define def_excpt(val) #val, val 2182 2183 struct siglabel { 2184 char *name; 2185 int number; 2186 }; 2187 2188 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2189 // C++ compiler contain this error code. Because this is a compiler-generated 2190 // error, the code is not listed in the Win32 API header files. 2191 // The code is actually a cryptic mnemonic device, with the initial "E" 2192 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2193 // ASCII values of "msc". 2194 2195 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2196 2197 2198 struct siglabel exceptlabels[] = { 2199 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2200 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2201 def_excpt(EXCEPTION_BREAKPOINT), 2202 def_excpt(EXCEPTION_SINGLE_STEP), 2203 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2204 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2205 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2206 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2207 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2208 def_excpt(EXCEPTION_FLT_OVERFLOW), 2209 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2210 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2211 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2212 def_excpt(EXCEPTION_INT_OVERFLOW), 2213 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2214 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2215 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2216 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2217 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2218 def_excpt(EXCEPTION_STACK_OVERFLOW), 2219 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2220 def_excpt(EXCEPTION_GUARD_PAGE), 2221 def_excpt(EXCEPTION_INVALID_HANDLE), 2222 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2223 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2224 #ifdef _M_IA64 2225 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2226 #endif 2227 NULL, 0 2228 }; 2229 2230 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2231 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2232 if (exceptlabels[i].number == exception_code) { 2233 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2234 return buf; 2235 } 2236 } 2237 2238 return NULL; 2239 } 2240 2241 //----------------------------------------------------------------------------- 2242 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2243 // handle exception caused by idiv; should only happen for -MinInt/-1 2244 // (division by zero is handled explicitly) 2245 #ifdef _M_IA64 2246 assert(0, "Fix Handle_IDiv_Exception"); 2247 #else 2248 #ifdef _M_AMD64 2249 PCONTEXT ctx = exceptionInfo->ContextRecord; 2250 address pc = (address)ctx->Rip; 2251 assert(pc[0] == 0xF7, "not an idiv opcode"); 2252 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2253 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2254 // set correct result values and continue after idiv instruction 2255 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2256 ctx->Rax = (DWORD)min_jint; // result 2257 ctx->Rdx = (DWORD)0; // remainder 2258 // Continue the execution 2259 #else 2260 PCONTEXT ctx = exceptionInfo->ContextRecord; 2261 address pc = (address)ctx->Eip; 2262 assert(pc[0] == 0xF7, "not an idiv opcode"); 2263 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2264 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2265 // set correct result values and continue after idiv instruction 2266 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2267 ctx->Eax = (DWORD)min_jint; // result 2268 ctx->Edx = (DWORD)0; // remainder 2269 // Continue the execution 2270 #endif 2271 #endif 2272 return EXCEPTION_CONTINUE_EXECUTION; 2273 } 2274 2275 //----------------------------------------------------------------------------- 2276 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2277 PCONTEXT ctx = exceptionInfo->ContextRecord; 2278 #ifndef _WIN64 2279 // handle exception caused by native method modifying control word 2280 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2281 2282 switch (exception_code) { 2283 case EXCEPTION_FLT_DENORMAL_OPERAND: 2284 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2285 case EXCEPTION_FLT_INEXACT_RESULT: 2286 case EXCEPTION_FLT_INVALID_OPERATION: 2287 case EXCEPTION_FLT_OVERFLOW: 2288 case EXCEPTION_FLT_STACK_CHECK: 2289 case EXCEPTION_FLT_UNDERFLOW: 2290 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2291 if (fp_control_word != ctx->FloatSave.ControlWord) { 2292 // Restore FPCW and mask out FLT exceptions 2293 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2294 // Mask out pending FLT exceptions 2295 ctx->FloatSave.StatusWord &= 0xffffff00; 2296 return EXCEPTION_CONTINUE_EXECUTION; 2297 } 2298 } 2299 2300 if (prev_uef_handler != NULL) { 2301 // We didn't handle this exception so pass it to the previous 2302 // UnhandledExceptionFilter. 2303 return (prev_uef_handler)(exceptionInfo); 2304 } 2305 #else // !_WIN64 2306 // On Windows, the mxcsr control bits are non-volatile across calls 2307 // See also CR 6192333 2308 // 2309 jint MxCsr = INITIAL_MXCSR; 2310 // we can't use StubRoutines::addr_mxcsr_std() 2311 // because in Win64 mxcsr is not saved there 2312 if (MxCsr != ctx->MxCsr) { 2313 ctx->MxCsr = MxCsr; 2314 return EXCEPTION_CONTINUE_EXECUTION; 2315 } 2316 #endif // !_WIN64 2317 2318 return EXCEPTION_CONTINUE_SEARCH; 2319 } 2320 2321 static inline void report_error(Thread* t, DWORD exception_code, 2322 address addr, void* siginfo, void* context) { 2323 VMError err(t, exception_code, addr, siginfo, context); 2324 err.report_and_die(); 2325 2326 // If UseOsErrorReporting, this will return here and save the error file 2327 // somewhere where we can find it in the minidump. 2328 } 2329 2330 //----------------------------------------------------------------------------- 2331 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2332 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2333 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2334 #ifdef _M_IA64 2335 // On Itanium, we need the "precise pc", which has the slot number coded 2336 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2337 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2338 // Convert the pc to "Unix format", which has the slot number coded 2339 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2340 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2341 // information is saved in the Unix format. 2342 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2343 #else 2344 #ifdef _M_AMD64 2345 address pc = (address) exceptionInfo->ContextRecord->Rip; 2346 #else 2347 address pc = (address) exceptionInfo->ContextRecord->Eip; 2348 #endif 2349 #endif 2350 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2351 2352 // Handle SafeFetch32 and SafeFetchN exceptions. 2353 if (StubRoutines::is_safefetch_fault(pc)) { 2354 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2355 } 2356 2357 #ifndef _WIN64 2358 // Execution protection violation - win32 running on AMD64 only 2359 // Handled first to avoid misdiagnosis as a "normal" access violation; 2360 // This is safe to do because we have a new/unique ExceptionInformation 2361 // code for this condition. 2362 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2363 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2364 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2365 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2366 2367 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2368 int page_size = os::vm_page_size(); 2369 2370 // Make sure the pc and the faulting address are sane. 2371 // 2372 // If an instruction spans a page boundary, and the page containing 2373 // the beginning of the instruction is executable but the following 2374 // page is not, the pc and the faulting address might be slightly 2375 // different - we still want to unguard the 2nd page in this case. 2376 // 2377 // 15 bytes seems to be a (very) safe value for max instruction size. 2378 bool pc_is_near_addr = 2379 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2380 bool instr_spans_page_boundary = 2381 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2382 (intptr_t) page_size) > 0); 2383 2384 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2385 static volatile address last_addr = 2386 (address) os::non_memory_address_word(); 2387 2388 // In conservative mode, don't unguard unless the address is in the VM 2389 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2390 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2391 2392 // Set memory to RWX and retry 2393 address page_start = 2394 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2395 bool res = os::protect_memory((char*) page_start, page_size, 2396 os::MEM_PROT_RWX); 2397 2398 if (PrintMiscellaneous && Verbose) { 2399 char buf[256]; 2400 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2401 "at " INTPTR_FORMAT 2402 ", unguarding " INTPTR_FORMAT ": %s", addr, 2403 page_start, (res ? "success" : strerror(errno))); 2404 tty->print_raw_cr(buf); 2405 } 2406 2407 // Set last_addr so if we fault again at the same address, we don't 2408 // end up in an endless loop. 2409 // 2410 // There are two potential complications here. Two threads trapping 2411 // at the same address at the same time could cause one of the 2412 // threads to think it already unguarded, and abort the VM. Likely 2413 // very rare. 2414 // 2415 // The other race involves two threads alternately trapping at 2416 // different addresses and failing to unguard the page, resulting in 2417 // an endless loop. This condition is probably even more unlikely 2418 // than the first. 2419 // 2420 // Although both cases could be avoided by using locks or thread 2421 // local last_addr, these solutions are unnecessary complication: 2422 // this handler is a best-effort safety net, not a complete solution. 2423 // It is disabled by default and should only be used as a workaround 2424 // in case we missed any no-execute-unsafe VM code. 2425 2426 last_addr = addr; 2427 2428 return EXCEPTION_CONTINUE_EXECUTION; 2429 } 2430 } 2431 2432 // Last unguard failed or not unguarding 2433 tty->print_raw_cr("Execution protection violation"); 2434 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2435 exceptionInfo->ContextRecord); 2436 return EXCEPTION_CONTINUE_SEARCH; 2437 } 2438 } 2439 #endif // _WIN64 2440 2441 // Check to see if we caught the safepoint code in the 2442 // process of write protecting the memory serialization page. 2443 // It write enables the page immediately after protecting it 2444 // so just return. 2445 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2446 JavaThread* thread = (JavaThread*) t; 2447 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2448 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2449 if (os::is_memory_serialize_page(thread, addr)) { 2450 // Block current thread until the memory serialize page permission restored. 2451 os::block_on_serialize_page_trap(); 2452 return EXCEPTION_CONTINUE_EXECUTION; 2453 } 2454 } 2455 2456 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2457 VM_Version::is_cpuinfo_segv_addr(pc)) { 2458 // Verify that OS save/restore AVX registers. 2459 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2460 } 2461 2462 if (t != NULL && t->is_Java_thread()) { 2463 JavaThread* thread = (JavaThread*) t; 2464 bool in_java = thread->thread_state() == _thread_in_Java; 2465 2466 // Handle potential stack overflows up front. 2467 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2468 if (os::uses_stack_guard_pages()) { 2469 #ifdef _M_IA64 2470 // Use guard page for register stack. 2471 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2472 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2473 // Check for a register stack overflow on Itanium 2474 if (thread->addr_inside_register_stack_red_zone(addr)) { 2475 // Fatal red zone violation happens if the Java program 2476 // catches a StackOverflow error and does so much processing 2477 // that it runs beyond the unprotected yellow guard zone. As 2478 // a result, we are out of here. 2479 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2480 } else if(thread->addr_inside_register_stack(addr)) { 2481 // Disable the yellow zone which sets the state that 2482 // we've got a stack overflow problem. 2483 if (thread->stack_yellow_zone_enabled()) { 2484 thread->disable_stack_yellow_zone(); 2485 } 2486 // Give us some room to process the exception. 2487 thread->disable_register_stack_guard(); 2488 // Tracing with +Verbose. 2489 if (Verbose) { 2490 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2491 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2492 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2493 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2494 thread->register_stack_base(), 2495 thread->register_stack_base() + thread->stack_size()); 2496 } 2497 2498 // Reguard the permanent register stack red zone just to be sure. 2499 // We saw Windows silently disabling this without telling us. 2500 thread->enable_register_stack_red_zone(); 2501 2502 return Handle_Exception(exceptionInfo, 2503 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2504 } 2505 #endif 2506 if (thread->stack_yellow_zone_enabled()) { 2507 // Yellow zone violation. The o/s has unprotected the first yellow 2508 // zone page for us. Note: must call disable_stack_yellow_zone to 2509 // update the enabled status, even if the zone contains only one page. 2510 thread->disable_stack_yellow_zone(); 2511 // If not in java code, return and hope for the best. 2512 return in_java 2513 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2514 : EXCEPTION_CONTINUE_EXECUTION; 2515 } else { 2516 // Fatal red zone violation. 2517 thread->disable_stack_red_zone(); 2518 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2519 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2520 exceptionInfo->ContextRecord); 2521 return EXCEPTION_CONTINUE_SEARCH; 2522 } 2523 } else if (in_java) { 2524 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2525 // a one-time-only guard page, which it has released to us. The next 2526 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2527 return Handle_Exception(exceptionInfo, 2528 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2529 } else { 2530 // Can only return and hope for the best. Further stack growth will 2531 // result in an ACCESS_VIOLATION. 2532 return EXCEPTION_CONTINUE_EXECUTION; 2533 } 2534 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2535 // Either stack overflow or null pointer exception. 2536 if (in_java) { 2537 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2538 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2539 address stack_end = thread->stack_base() - thread->stack_size(); 2540 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2541 // Stack overflow. 2542 assert(!os::uses_stack_guard_pages(), 2543 "should be caught by red zone code above."); 2544 return Handle_Exception(exceptionInfo, 2545 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2546 } 2547 // Check for safepoint polling and implicit null 2548 // We only expect null pointers in the stubs (vtable) 2549 // the rest are checked explicitly now. 2550 CodeBlob* cb = CodeCache::find_blob(pc); 2551 if (cb != NULL) { 2552 if (os::is_poll_address(addr)) { 2553 address stub = SharedRuntime::get_poll_stub(pc); 2554 return Handle_Exception(exceptionInfo, stub); 2555 } 2556 } 2557 { 2558 #ifdef _WIN64 2559 // If it's a legal stack address map the entire region in 2560 // 2561 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2562 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2563 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) { 2564 addr = (address)((uintptr_t)addr & 2565 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2566 os::commit_memory((char *)addr, thread->stack_base() - addr, 2567 !ExecMem); 2568 return EXCEPTION_CONTINUE_EXECUTION; 2569 } else 2570 #endif 2571 { 2572 // Null pointer exception. 2573 #ifdef _M_IA64 2574 // Process implicit null checks in compiled code. Note: Implicit null checks 2575 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2576 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2577 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2578 // Handle implicit null check in UEP method entry 2579 if (cb && (cb->is_frame_complete_at(pc) || 2580 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2581 if (Verbose) { 2582 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2583 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2584 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2585 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2586 *(bundle_start + 1), *bundle_start); 2587 } 2588 return Handle_Exception(exceptionInfo, 2589 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2590 } 2591 } 2592 2593 // Implicit null checks were processed above. Hence, we should not reach 2594 // here in the usual case => die! 2595 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2596 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2597 exceptionInfo->ContextRecord); 2598 return EXCEPTION_CONTINUE_SEARCH; 2599 2600 #else // !IA64 2601 2602 // Windows 98 reports faulting addresses incorrectly 2603 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2604 !os::win32::is_nt()) { 2605 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2606 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2607 } 2608 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2609 exceptionInfo->ContextRecord); 2610 return EXCEPTION_CONTINUE_SEARCH; 2611 #endif 2612 } 2613 } 2614 } 2615 2616 #ifdef _WIN64 2617 // Special care for fast JNI field accessors. 2618 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2619 // in and the heap gets shrunk before the field access. 2620 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2621 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2622 if (addr != (address)-1) { 2623 return Handle_Exception(exceptionInfo, addr); 2624 } 2625 } 2626 #endif 2627 2628 // Stack overflow or null pointer exception in native code. 2629 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2630 exceptionInfo->ContextRecord); 2631 return EXCEPTION_CONTINUE_SEARCH; 2632 } // /EXCEPTION_ACCESS_VIOLATION 2633 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2634 #if defined _M_IA64 2635 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2636 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2637 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2638 2639 // Compiled method patched to be non entrant? Following conditions must apply: 2640 // 1. must be first instruction in bundle 2641 // 2. must be a break instruction with appropriate code 2642 if ((((uint64_t) pc & 0x0F) == 0) && 2643 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2644 return Handle_Exception(exceptionInfo, 2645 (address)SharedRuntime::get_handle_wrong_method_stub()); 2646 } 2647 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2648 #endif 2649 2650 2651 if (in_java) { 2652 switch (exception_code) { 2653 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2654 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2655 2656 case EXCEPTION_INT_OVERFLOW: 2657 return Handle_IDiv_Exception(exceptionInfo); 2658 2659 } // switch 2660 } 2661 if (((thread->thread_state() == _thread_in_Java) || 2662 (thread->thread_state() == _thread_in_native)) && 2663 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2664 LONG result=Handle_FLT_Exception(exceptionInfo); 2665 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2666 } 2667 } 2668 2669 if (exception_code != EXCEPTION_BREAKPOINT) { 2670 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2671 exceptionInfo->ContextRecord); 2672 } 2673 return EXCEPTION_CONTINUE_SEARCH; 2674 } 2675 2676 #ifndef _WIN64 2677 // Special care for fast JNI accessors. 2678 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2679 // the heap gets shrunk before the field access. 2680 // Need to install our own structured exception handler since native code may 2681 // install its own. 2682 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2683 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2684 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2685 address pc = (address) exceptionInfo->ContextRecord->Eip; 2686 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2687 if (addr != (address)-1) { 2688 return Handle_Exception(exceptionInfo, addr); 2689 } 2690 } 2691 return EXCEPTION_CONTINUE_SEARCH; 2692 } 2693 2694 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2695 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2696 jobject obj, \ 2697 jfieldID fieldID) { \ 2698 __try { \ 2699 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2700 obj, \ 2701 fieldID); \ 2702 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2703 _exception_info())) { \ 2704 } \ 2705 return 0; \ 2706 } 2707 2708 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2709 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2710 DEFINE_FAST_GETFIELD(jchar, char, Char) 2711 DEFINE_FAST_GETFIELD(jshort, short, Short) 2712 DEFINE_FAST_GETFIELD(jint, int, Int) 2713 DEFINE_FAST_GETFIELD(jlong, long, Long) 2714 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2715 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2716 2717 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2718 switch (type) { 2719 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2720 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2721 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2722 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2723 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2724 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2725 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2726 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2727 default: ShouldNotReachHere(); 2728 } 2729 return (address)-1; 2730 } 2731 #endif 2732 2733 // Virtual Memory 2734 2735 int os::vm_page_size() { return os::win32::vm_page_size(); } 2736 int os::vm_allocation_granularity() { 2737 return os::win32::vm_allocation_granularity(); 2738 } 2739 2740 // Windows large page support is available on Windows 2003. In order to use 2741 // large page memory, the administrator must first assign additional privilege 2742 // to the user: 2743 // + select Control Panel -> Administrative Tools -> Local Security Policy 2744 // + select Local Policies -> User Rights Assignment 2745 // + double click "Lock pages in memory", add users and/or groups 2746 // + reboot 2747 // Note the above steps are needed for administrator as well, as administrators 2748 // by default do not have the privilege to lock pages in memory. 2749 // 2750 // Note about Windows 2003: although the API supports committing large page 2751 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2752 // scenario, I found through experiment it only uses large page if the entire 2753 // memory region is reserved and committed in a single VirtualAlloc() call. 2754 // This makes Windows large page support more or less like Solaris ISM, in 2755 // that the entire heap must be committed upfront. This probably will change 2756 // in the future, if so the code below needs to be revisited. 2757 2758 #ifndef MEM_LARGE_PAGES 2759 #define MEM_LARGE_PAGES 0x20000000 2760 #endif 2761 2762 static HANDLE _hProcess; 2763 static HANDLE _hToken; 2764 2765 // Container for NUMA node list info 2766 class NUMANodeListHolder { 2767 private: 2768 int *_numa_used_node_list; // allocated below 2769 int _numa_used_node_count; 2770 2771 void free_node_list() { 2772 if (_numa_used_node_list != NULL) { 2773 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2774 } 2775 } 2776 2777 public: 2778 NUMANodeListHolder() { 2779 _numa_used_node_count = 0; 2780 _numa_used_node_list = NULL; 2781 // do rest of initialization in build routine (after function pointers are set up) 2782 } 2783 2784 ~NUMANodeListHolder() { 2785 free_node_list(); 2786 } 2787 2788 bool build() { 2789 DWORD_PTR proc_aff_mask; 2790 DWORD_PTR sys_aff_mask; 2791 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2792 ULONG highest_node_number; 2793 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2794 free_node_list(); 2795 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2796 for (unsigned int i = 0; i <= highest_node_number; i++) { 2797 ULONGLONG proc_mask_numa_node; 2798 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2799 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2800 _numa_used_node_list[_numa_used_node_count++] = i; 2801 } 2802 } 2803 return (_numa_used_node_count > 1); 2804 } 2805 2806 int get_count() { return _numa_used_node_count; } 2807 int get_node_list_entry(int n) { 2808 // for indexes out of range, returns -1 2809 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2810 } 2811 2812 } numa_node_list_holder; 2813 2814 2815 2816 static size_t _large_page_size = 0; 2817 2818 static bool resolve_functions_for_large_page_init() { 2819 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2820 os::Advapi32Dll::AdvapiAvailable(); 2821 } 2822 2823 static bool request_lock_memory_privilege() { 2824 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2825 os::current_process_id()); 2826 2827 LUID luid; 2828 if (_hProcess != NULL && 2829 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2830 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2831 2832 TOKEN_PRIVILEGES tp; 2833 tp.PrivilegeCount = 1; 2834 tp.Privileges[0].Luid = luid; 2835 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2836 2837 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2838 // privilege. Check GetLastError() too. See MSDN document. 2839 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2840 (GetLastError() == ERROR_SUCCESS)) { 2841 return true; 2842 } 2843 } 2844 2845 return false; 2846 } 2847 2848 static void cleanup_after_large_page_init() { 2849 if (_hProcess) CloseHandle(_hProcess); 2850 _hProcess = NULL; 2851 if (_hToken) CloseHandle(_hToken); 2852 _hToken = NULL; 2853 } 2854 2855 static bool numa_interleaving_init() { 2856 bool success = false; 2857 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2858 2859 // print a warning if UseNUMAInterleaving flag is specified on command line 2860 bool warn_on_failure = use_numa_interleaving_specified; 2861 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2862 2863 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2864 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2865 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2866 2867 if (os::Kernel32Dll::NumaCallsAvailable()) { 2868 if (numa_node_list_holder.build()) { 2869 if (PrintMiscellaneous && Verbose) { 2870 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2871 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2872 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2873 } 2874 tty->print("\n"); 2875 } 2876 success = true; 2877 } else { 2878 WARN("Process does not cover multiple NUMA nodes."); 2879 } 2880 } else { 2881 WARN("NUMA Interleaving is not supported by the operating system."); 2882 } 2883 if (!success) { 2884 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2885 } 2886 return success; 2887 #undef WARN 2888 } 2889 2890 // this routine is used whenever we need to reserve a contiguous VA range 2891 // but we need to make separate VirtualAlloc calls for each piece of the range 2892 // Reasons for doing this: 2893 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2894 // * UseNUMAInterleaving requires a separate node for each piece 2895 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2896 DWORD prot, 2897 bool should_inject_error = false) { 2898 char * p_buf; 2899 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2900 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2901 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2902 2903 // first reserve enough address space in advance since we want to be 2904 // able to break a single contiguous virtual address range into multiple 2905 // large page commits but WS2003 does not allow reserving large page space 2906 // so we just use 4K pages for reserve, this gives us a legal contiguous 2907 // address space. then we will deallocate that reservation, and re alloc 2908 // using large pages 2909 const size_t size_of_reserve = bytes + chunk_size; 2910 if (bytes > size_of_reserve) { 2911 // Overflowed. 2912 return NULL; 2913 } 2914 p_buf = (char *) VirtualAlloc(addr, 2915 size_of_reserve, // size of Reserve 2916 MEM_RESERVE, 2917 PAGE_READWRITE); 2918 // If reservation failed, return NULL 2919 if (p_buf == NULL) return NULL; 2920 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2921 os::release_memory(p_buf, bytes + chunk_size); 2922 2923 // we still need to round up to a page boundary (in case we are using large pages) 2924 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2925 // instead we handle this in the bytes_to_rq computation below 2926 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2927 2928 // now go through and allocate one chunk at a time until all bytes are 2929 // allocated 2930 size_t bytes_remaining = bytes; 2931 // An overflow of align_size_up() would have been caught above 2932 // in the calculation of size_of_reserve. 2933 char * next_alloc_addr = p_buf; 2934 HANDLE hProc = GetCurrentProcess(); 2935 2936 #ifdef ASSERT 2937 // Variable for the failure injection 2938 long ran_num = os::random(); 2939 size_t fail_after = ran_num % bytes; 2940 #endif 2941 2942 int count=0; 2943 while (bytes_remaining) { 2944 // select bytes_to_rq to get to the next chunk_size boundary 2945 2946 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2947 // Note allocate and commit 2948 char * p_new; 2949 2950 #ifdef ASSERT 2951 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2952 #else 2953 const bool inject_error_now = false; 2954 #endif 2955 2956 if (inject_error_now) { 2957 p_new = NULL; 2958 } else { 2959 if (!UseNUMAInterleaving) { 2960 p_new = (char *) VirtualAlloc(next_alloc_addr, 2961 bytes_to_rq, 2962 flags, 2963 prot); 2964 } else { 2965 // get the next node to use from the used_node_list 2966 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2967 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2968 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2969 next_alloc_addr, 2970 bytes_to_rq, 2971 flags, 2972 prot, 2973 node); 2974 } 2975 } 2976 2977 if (p_new == NULL) { 2978 // Free any allocated pages 2979 if (next_alloc_addr > p_buf) { 2980 // Some memory was committed so release it. 2981 size_t bytes_to_release = bytes - bytes_remaining; 2982 // NMT has yet to record any individual blocks, so it 2983 // need to create a dummy 'reserve' record to match 2984 // the release. 2985 MemTracker::record_virtual_memory_reserve((address)p_buf, 2986 bytes_to_release, CALLER_PC); 2987 os::release_memory(p_buf, bytes_to_release); 2988 } 2989 #ifdef ASSERT 2990 if (should_inject_error) { 2991 if (TracePageSizes && Verbose) { 2992 tty->print_cr("Reserving pages individually failed."); 2993 } 2994 } 2995 #endif 2996 return NULL; 2997 } 2998 2999 bytes_remaining -= bytes_to_rq; 3000 next_alloc_addr += bytes_to_rq; 3001 count++; 3002 } 3003 // Although the memory is allocated individually, it is returned as one. 3004 // NMT records it as one block. 3005 if ((flags & MEM_COMMIT) != 0) { 3006 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 3007 } else { 3008 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 3009 } 3010 3011 // made it this far, success 3012 return p_buf; 3013 } 3014 3015 3016 3017 void os::large_page_init() { 3018 if (!UseLargePages) return; 3019 3020 // print a warning if any large page related flag is specified on command line 3021 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3022 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3023 bool success = false; 3024 3025 #define WARN(msg) if (warn_on_failure) { warning(msg); } 3026 if (resolve_functions_for_large_page_init()) { 3027 if (request_lock_memory_privilege()) { 3028 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3029 if (s) { 3030 #if defined(IA32) || defined(AMD64) 3031 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3032 WARN("JVM cannot use large pages bigger than 4mb."); 3033 } else { 3034 #endif 3035 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3036 _large_page_size = LargePageSizeInBytes; 3037 } else { 3038 _large_page_size = s; 3039 } 3040 success = true; 3041 #if defined(IA32) || defined(AMD64) 3042 } 3043 #endif 3044 } else { 3045 WARN("Large page is not supported by the processor."); 3046 } 3047 } else { 3048 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3049 } 3050 } else { 3051 WARN("Large page is not supported by the operating system."); 3052 } 3053 #undef WARN 3054 3055 const size_t default_page_size = (size_t) vm_page_size(); 3056 if (success && _large_page_size > default_page_size) { 3057 _page_sizes[0] = _large_page_size; 3058 _page_sizes[1] = default_page_size; 3059 _page_sizes[2] = 0; 3060 } 3061 3062 cleanup_after_large_page_init(); 3063 UseLargePages = success; 3064 } 3065 3066 // On win32, one cannot release just a part of reserved memory, it's an 3067 // all or nothing deal. When we split a reservation, we must break the 3068 // reservation into two reservations. 3069 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3070 bool realloc) { 3071 if (size > 0) { 3072 release_memory(base, size); 3073 if (realloc) { 3074 reserve_memory(split, base); 3075 } 3076 if (size != split) { 3077 reserve_memory(size - split, base + split); 3078 } 3079 } 3080 } 3081 3082 // Multiple threads can race in this code but it's not possible to unmap small sections of 3083 // virtual space to get requested alignment, like posix-like os's. 3084 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3085 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3086 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3087 "Alignment must be a multiple of allocation granularity (page size)"); 3088 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3089 3090 size_t extra_size = size + alignment; 3091 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3092 3093 char* aligned_base = NULL; 3094 3095 do { 3096 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3097 if (extra_base == NULL) { 3098 return NULL; 3099 } 3100 // Do manual alignment 3101 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3102 3103 os::release_memory(extra_base, extra_size); 3104 3105 aligned_base = os::reserve_memory(size, aligned_base); 3106 3107 } while (aligned_base == NULL); 3108 3109 return aligned_base; 3110 } 3111 3112 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3113 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3114 "reserve alignment"); 3115 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3116 char* res; 3117 // note that if UseLargePages is on, all the areas that require interleaving 3118 // will go thru reserve_memory_special rather than thru here. 3119 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3120 if (!use_individual) { 3121 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3122 } else { 3123 elapsedTimer reserveTimer; 3124 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3125 // in numa interleaving, we have to allocate pages individually 3126 // (well really chunks of NUMAInterleaveGranularity size) 3127 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3128 if (res == NULL) { 3129 warning("NUMA page allocation failed"); 3130 } 3131 if (Verbose && PrintMiscellaneous) { 3132 reserveTimer.stop(); 3133 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3134 reserveTimer.milliseconds(), reserveTimer.ticks()); 3135 } 3136 } 3137 assert(res == NULL || addr == NULL || addr == res, 3138 "Unexpected address from reserve."); 3139 3140 return res; 3141 } 3142 3143 // Reserve memory at an arbitrary address, only if that area is 3144 // available (and not reserved for something else). 3145 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3146 // Windows os::reserve_memory() fails of the requested address range is 3147 // not avilable. 3148 return reserve_memory(bytes, requested_addr); 3149 } 3150 3151 size_t os::large_page_size() { 3152 return _large_page_size; 3153 } 3154 3155 bool os::can_commit_large_page_memory() { 3156 // Windows only uses large page memory when the entire region is reserved 3157 // and committed in a single VirtualAlloc() call. This may change in the 3158 // future, but with Windows 2003 it's not possible to commit on demand. 3159 return false; 3160 } 3161 3162 bool os::can_execute_large_page_memory() { 3163 return true; 3164 } 3165 3166 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3167 bool exec) { 3168 assert(UseLargePages, "only for large pages"); 3169 3170 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3171 return NULL; // Fallback to small pages. 3172 } 3173 3174 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3175 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3176 3177 // with large pages, there are two cases where we need to use Individual Allocation 3178 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3179 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3180 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3181 if (TracePageSizes && Verbose) { 3182 tty->print_cr("Reserving large pages individually."); 3183 } 3184 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3185 if (p_buf == NULL) { 3186 // give an appropriate warning message 3187 if (UseNUMAInterleaving) { 3188 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3189 } 3190 if (UseLargePagesIndividualAllocation) { 3191 warning("Individually allocated large pages failed, " 3192 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3193 } 3194 return NULL; 3195 } 3196 3197 return p_buf; 3198 3199 } else { 3200 if (TracePageSizes && Verbose) { 3201 tty->print_cr("Reserving large pages in a single large chunk."); 3202 } 3203 // normal policy just allocate it all at once 3204 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3205 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3206 if (res != NULL) { 3207 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3208 } 3209 3210 return res; 3211 } 3212 } 3213 3214 bool os::release_memory_special(char* base, size_t bytes) { 3215 assert(base != NULL, "Sanity check"); 3216 return release_memory(base, bytes); 3217 } 3218 3219 void os::print_statistics() { 3220 } 3221 3222 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3223 int err = os::get_last_error(); 3224 char buf[256]; 3225 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3226 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3227 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3228 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3229 } 3230 3231 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3232 if (bytes == 0) { 3233 // Don't bother the OS with noops. 3234 return true; 3235 } 3236 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3237 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3238 // Don't attempt to print anything if the OS call fails. We're 3239 // probably low on resources, so the print itself may cause crashes. 3240 3241 // unless we have NUMAInterleaving enabled, the range of a commit 3242 // is always within a reserve covered by a single VirtualAlloc 3243 // in that case we can just do a single commit for the requested size 3244 if (!UseNUMAInterleaving) { 3245 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3246 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3247 return false; 3248 } 3249 if (exec) { 3250 DWORD oldprot; 3251 // Windows doc says to use VirtualProtect to get execute permissions 3252 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3253 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3254 return false; 3255 } 3256 } 3257 return true; 3258 } else { 3259 3260 // when NUMAInterleaving is enabled, the commit might cover a range that 3261 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3262 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3263 // returns represents the number of bytes that can be committed in one step. 3264 size_t bytes_remaining = bytes; 3265 char * next_alloc_addr = addr; 3266 while (bytes_remaining > 0) { 3267 MEMORY_BASIC_INFORMATION alloc_info; 3268 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3269 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3270 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3271 PAGE_READWRITE) == NULL) { 3272 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3273 exec);) 3274 return false; 3275 } 3276 if (exec) { 3277 DWORD oldprot; 3278 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3279 PAGE_EXECUTE_READWRITE, &oldprot)) { 3280 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3281 exec);) 3282 return false; 3283 } 3284 } 3285 bytes_remaining -= bytes_to_rq; 3286 next_alloc_addr += bytes_to_rq; 3287 } 3288 } 3289 // if we made it this far, return true 3290 return true; 3291 } 3292 3293 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3294 bool exec) { 3295 // alignment_hint is ignored on this OS 3296 return pd_commit_memory(addr, size, exec); 3297 } 3298 3299 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3300 const char* mesg) { 3301 assert(mesg != NULL, "mesg must be specified"); 3302 if (!pd_commit_memory(addr, size, exec)) { 3303 warn_fail_commit_memory(addr, size, exec); 3304 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3305 } 3306 } 3307 3308 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3309 size_t alignment_hint, bool exec, 3310 const char* mesg) { 3311 // alignment_hint is ignored on this OS 3312 pd_commit_memory_or_exit(addr, size, exec, mesg); 3313 } 3314 3315 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3316 if (bytes == 0) { 3317 // Don't bother the OS with noops. 3318 return true; 3319 } 3320 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3321 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3322 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3323 } 3324 3325 bool os::pd_release_memory(char* addr, size_t bytes) { 3326 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3327 } 3328 3329 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3330 return os::commit_memory(addr, size, !ExecMem); 3331 } 3332 3333 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3334 return os::uncommit_memory(addr, size); 3335 } 3336 3337 // Set protections specified 3338 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3339 bool is_committed) { 3340 unsigned int p = 0; 3341 switch (prot) { 3342 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3343 case MEM_PROT_READ: p = PAGE_READONLY; break; 3344 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3345 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3346 default: 3347 ShouldNotReachHere(); 3348 } 3349 3350 DWORD old_status; 3351 3352 // Strange enough, but on Win32 one can change protection only for committed 3353 // memory, not a big deal anyway, as bytes less or equal than 64K 3354 if (!is_committed) { 3355 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3356 "cannot commit protection page"); 3357 } 3358 // One cannot use os::guard_memory() here, as on Win32 guard page 3359 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3360 // 3361 // Pages in the region become guard pages. Any attempt to access a guard page 3362 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3363 // the guard page status. Guard pages thus act as a one-time access alarm. 3364 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3365 } 3366 3367 bool os::guard_memory(char* addr, size_t bytes) { 3368 DWORD old_status; 3369 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3370 } 3371 3372 bool os::unguard_memory(char* addr, size_t bytes) { 3373 DWORD old_status; 3374 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3375 } 3376 3377 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3378 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3379 void os::numa_make_global(char *addr, size_t bytes) { } 3380 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3381 bool os::numa_topology_changed() { return false; } 3382 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3383 int os::numa_get_group_id() { return 0; } 3384 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3385 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3386 // Provide an answer for UMA systems 3387 ids[0] = 0; 3388 return 1; 3389 } else { 3390 // check for size bigger than actual groups_num 3391 size = MIN2(size, numa_get_groups_num()); 3392 for (int i = 0; i < (int)size; i++) { 3393 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3394 } 3395 return size; 3396 } 3397 } 3398 3399 bool os::get_page_info(char *start, page_info* info) { 3400 return false; 3401 } 3402 3403 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3404 page_info* page_found) { 3405 return end; 3406 } 3407 3408 char* os::non_memory_address_word() { 3409 // Must never look like an address returned by reserve_memory, 3410 // even in its subfields (as defined by the CPU immediate fields, 3411 // if the CPU splits constants across multiple instructions). 3412 return (char*)-1; 3413 } 3414 3415 #define MAX_ERROR_COUNT 100 3416 #define SYS_THREAD_ERROR 0xffffffffUL 3417 3418 void os::pd_start_thread(Thread* thread) { 3419 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3420 // Returns previous suspend state: 3421 // 0: Thread was not suspended 3422 // 1: Thread is running now 3423 // >1: Thread is still suspended. 3424 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3425 } 3426 3427 class HighResolutionInterval : public CHeapObj<mtThread> { 3428 // The default timer resolution seems to be 10 milliseconds. 3429 // (Where is this written down?) 3430 // If someone wants to sleep for only a fraction of the default, 3431 // then we set the timer resolution down to 1 millisecond for 3432 // the duration of their interval. 3433 // We carefully set the resolution back, since otherwise we 3434 // seem to incur an overhead (3%?) that we don't need. 3435 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3436 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3437 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3438 // timeBeginPeriod() if the relative error exceeded some threshold. 3439 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3440 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3441 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3442 // resolution timers running. 3443 private: 3444 jlong resolution; 3445 public: 3446 HighResolutionInterval(jlong ms) { 3447 resolution = ms % 10L; 3448 if (resolution != 0) { 3449 MMRESULT result = timeBeginPeriod(1L); 3450 } 3451 } 3452 ~HighResolutionInterval() { 3453 if (resolution != 0) { 3454 MMRESULT result = timeEndPeriod(1L); 3455 } 3456 resolution = 0L; 3457 } 3458 }; 3459 3460 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3461 jlong limit = (jlong) MAXDWORD; 3462 3463 while (ms > limit) { 3464 int res; 3465 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3466 return res; 3467 } 3468 ms -= limit; 3469 } 3470 3471 assert(thread == Thread::current(), "thread consistency check"); 3472 OSThread* osthread = thread->osthread(); 3473 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3474 int result; 3475 if (interruptable) { 3476 assert(thread->is_Java_thread(), "must be java thread"); 3477 JavaThread *jt = (JavaThread *) thread; 3478 ThreadBlockInVM tbivm(jt); 3479 3480 jt->set_suspend_equivalent(); 3481 // cleared by handle_special_suspend_equivalent_condition() or 3482 // java_suspend_self() via check_and_wait_while_suspended() 3483 3484 HANDLE events[1]; 3485 events[0] = osthread->interrupt_event(); 3486 HighResolutionInterval *phri=NULL; 3487 if (!ForceTimeHighResolution) { 3488 phri = new HighResolutionInterval(ms); 3489 } 3490 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3491 result = OS_TIMEOUT; 3492 } else { 3493 ResetEvent(osthread->interrupt_event()); 3494 osthread->set_interrupted(false); 3495 result = OS_INTRPT; 3496 } 3497 delete phri; //if it is NULL, harmless 3498 3499 // were we externally suspended while we were waiting? 3500 jt->check_and_wait_while_suspended(); 3501 } else { 3502 assert(!thread->is_Java_thread(), "must not be java thread"); 3503 Sleep((long) ms); 3504 result = OS_TIMEOUT; 3505 } 3506 return result; 3507 } 3508 3509 // Short sleep, direct OS call. 3510 // 3511 // ms = 0, means allow others (if any) to run. 3512 // 3513 void os::naked_short_sleep(jlong ms) { 3514 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3515 Sleep(ms); 3516 } 3517 3518 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3519 void os::infinite_sleep() { 3520 while (true) { // sleep forever ... 3521 Sleep(100000); // ... 100 seconds at a time 3522 } 3523 } 3524 3525 typedef BOOL (WINAPI * STTSignature)(void); 3526 3527 void os::naked_yield() { 3528 // Use either SwitchToThread() or Sleep(0) 3529 // Consider passing back the return value from SwitchToThread(). 3530 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3531 SwitchToThread(); 3532 } else { 3533 Sleep(0); 3534 } 3535 } 3536 3537 // Win32 only gives you access to seven real priorities at a time, 3538 // so we compress Java's ten down to seven. It would be better 3539 // if we dynamically adjusted relative priorities. 3540 3541 int os::java_to_os_priority[CriticalPriority + 1] = { 3542 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3543 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3544 THREAD_PRIORITY_LOWEST, // 2 3545 THREAD_PRIORITY_BELOW_NORMAL, // 3 3546 THREAD_PRIORITY_BELOW_NORMAL, // 4 3547 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3548 THREAD_PRIORITY_NORMAL, // 6 3549 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3550 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3551 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3552 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3553 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3554 }; 3555 3556 int prio_policy1[CriticalPriority + 1] = { 3557 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3558 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3559 THREAD_PRIORITY_LOWEST, // 2 3560 THREAD_PRIORITY_BELOW_NORMAL, // 3 3561 THREAD_PRIORITY_BELOW_NORMAL, // 4 3562 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3563 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3564 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3565 THREAD_PRIORITY_HIGHEST, // 8 3566 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3567 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3568 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3569 }; 3570 3571 static int prio_init() { 3572 // If ThreadPriorityPolicy is 1, switch tables 3573 if (ThreadPriorityPolicy == 1) { 3574 int i; 3575 for (i = 0; i < CriticalPriority + 1; i++) { 3576 os::java_to_os_priority[i] = prio_policy1[i]; 3577 } 3578 } 3579 if (UseCriticalJavaThreadPriority) { 3580 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3581 } 3582 return 0; 3583 } 3584 3585 OSReturn os::set_native_priority(Thread* thread, int priority) { 3586 if (!UseThreadPriorities) return OS_OK; 3587 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3588 return ret ? OS_OK : OS_ERR; 3589 } 3590 3591 OSReturn os::get_native_priority(const Thread* const thread, 3592 int* priority_ptr) { 3593 if (!UseThreadPriorities) { 3594 *priority_ptr = java_to_os_priority[NormPriority]; 3595 return OS_OK; 3596 } 3597 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3598 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3599 assert(false, "GetThreadPriority failed"); 3600 return OS_ERR; 3601 } 3602 *priority_ptr = os_prio; 3603 return OS_OK; 3604 } 3605 3606 3607 // Hint to the underlying OS that a task switch would not be good. 3608 // Void return because it's a hint and can fail. 3609 void os::hint_no_preempt() {} 3610 3611 void os::interrupt(Thread* thread) { 3612 assert(!thread->is_Java_thread() || Thread::current() == thread || 3613 Threads_lock->owned_by_self(), 3614 "possibility of dangling Thread pointer"); 3615 3616 OSThread* osthread = thread->osthread(); 3617 osthread->set_interrupted(true); 3618 // More than one thread can get here with the same value of osthread, 3619 // resulting in multiple notifications. We do, however, want the store 3620 // to interrupted() to be visible to other threads before we post 3621 // the interrupt event. 3622 OrderAccess::release(); 3623 SetEvent(osthread->interrupt_event()); 3624 // For JSR166: unpark after setting status 3625 if (thread->is_Java_thread()) { 3626 ((JavaThread*)thread)->parker()->unpark(); 3627 } 3628 3629 ParkEvent * ev = thread->_ParkEvent; 3630 if (ev != NULL) ev->unpark(); 3631 } 3632 3633 3634 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3635 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3636 "possibility of dangling Thread pointer"); 3637 3638 OSThread* osthread = thread->osthread(); 3639 // There is no synchronization between the setting of the interrupt 3640 // and it being cleared here. It is critical - see 6535709 - that 3641 // we only clear the interrupt state, and reset the interrupt event, 3642 // if we are going to report that we were indeed interrupted - else 3643 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3644 // depending on the timing. By checking thread interrupt event to see 3645 // if the thread gets real interrupt thus prevent spurious wakeup. 3646 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3647 if (interrupted && clear_interrupted) { 3648 osthread->set_interrupted(false); 3649 ResetEvent(osthread->interrupt_event()); 3650 } // Otherwise leave the interrupted state alone 3651 3652 return interrupted; 3653 } 3654 3655 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3656 ExtendedPC os::get_thread_pc(Thread* thread) { 3657 CONTEXT context; 3658 context.ContextFlags = CONTEXT_CONTROL; 3659 HANDLE handle = thread->osthread()->thread_handle(); 3660 #ifdef _M_IA64 3661 assert(0, "Fix get_thread_pc"); 3662 return ExtendedPC(NULL); 3663 #else 3664 if (GetThreadContext(handle, &context)) { 3665 #ifdef _M_AMD64 3666 return ExtendedPC((address) context.Rip); 3667 #else 3668 return ExtendedPC((address) context.Eip); 3669 #endif 3670 } else { 3671 return ExtendedPC(NULL); 3672 } 3673 #endif 3674 } 3675 3676 // GetCurrentThreadId() returns DWORD 3677 intx os::current_thread_id() { return GetCurrentThreadId(); } 3678 3679 static int _initial_pid = 0; 3680 3681 int os::current_process_id() { 3682 return (_initial_pid ? _initial_pid : _getpid()); 3683 } 3684 3685 int os::win32::_vm_page_size = 0; 3686 int os::win32::_vm_allocation_granularity = 0; 3687 int os::win32::_processor_type = 0; 3688 // Processor level is not available on non-NT systems, use vm_version instead 3689 int os::win32::_processor_level = 0; 3690 julong os::win32::_physical_memory = 0; 3691 size_t os::win32::_default_stack_size = 0; 3692 3693 intx os::win32::_os_thread_limit = 0; 3694 volatile intx os::win32::_os_thread_count = 0; 3695 3696 bool os::win32::_is_nt = false; 3697 bool os::win32::_is_windows_2003 = false; 3698 bool os::win32::_is_windows_server = false; 3699 3700 // 6573254 3701 // Currently, the bug is observed across all the supported Windows releases, 3702 // including the latest one (as of this writing - Windows Server 2012 R2) 3703 bool os::win32::_has_exit_bug = true; 3704 bool os::win32::_has_performance_count = 0; 3705 3706 void os::win32::initialize_system_info() { 3707 SYSTEM_INFO si; 3708 GetSystemInfo(&si); 3709 _vm_page_size = si.dwPageSize; 3710 _vm_allocation_granularity = si.dwAllocationGranularity; 3711 _processor_type = si.dwProcessorType; 3712 _processor_level = si.wProcessorLevel; 3713 set_processor_count(si.dwNumberOfProcessors); 3714 3715 MEMORYSTATUSEX ms; 3716 ms.dwLength = sizeof(ms); 3717 3718 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3719 // dwMemoryLoad (% of memory in use) 3720 GlobalMemoryStatusEx(&ms); 3721 _physical_memory = ms.ullTotalPhys; 3722 3723 OSVERSIONINFOEX oi; 3724 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3725 GetVersionEx((OSVERSIONINFO*)&oi); 3726 switch (oi.dwPlatformId) { 3727 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3728 case VER_PLATFORM_WIN32_NT: 3729 _is_nt = true; 3730 { 3731 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3732 if (os_vers == 5002) { 3733 _is_windows_2003 = true; 3734 } 3735 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3736 oi.wProductType == VER_NT_SERVER) { 3737 _is_windows_server = true; 3738 } 3739 } 3740 break; 3741 default: fatal("Unknown platform"); 3742 } 3743 3744 _default_stack_size = os::current_stack_size(); 3745 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3746 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3747 "stack size not a multiple of page size"); 3748 3749 initialize_performance_counter(); 3750 3751 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3752 // known to deadlock the system, if the VM issues to thread operations with 3753 // a too high frequency, e.g., such as changing the priorities. 3754 // The 6000 seems to work well - no deadlocks has been notices on the test 3755 // programs that we have seen experience this problem. 3756 if (!os::win32::is_nt()) { 3757 StarvationMonitorInterval = 6000; 3758 } 3759 } 3760 3761 3762 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3763 int ebuflen) { 3764 char path[MAX_PATH]; 3765 DWORD size; 3766 DWORD pathLen = (DWORD)sizeof(path); 3767 HINSTANCE result = NULL; 3768 3769 // only allow library name without path component 3770 assert(strchr(name, '\\') == NULL, "path not allowed"); 3771 assert(strchr(name, ':') == NULL, "path not allowed"); 3772 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3773 jio_snprintf(ebuf, ebuflen, 3774 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3775 return NULL; 3776 } 3777 3778 // search system directory 3779 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3780 if (size >= pathLen) { 3781 return NULL; // truncated 3782 } 3783 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3784 return NULL; // truncated 3785 } 3786 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3787 return result; 3788 } 3789 } 3790 3791 // try Windows directory 3792 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3793 if (size >= pathLen) { 3794 return NULL; // truncated 3795 } 3796 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3797 return NULL; // truncated 3798 } 3799 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3800 return result; 3801 } 3802 } 3803 3804 jio_snprintf(ebuf, ebuflen, 3805 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3806 return NULL; 3807 } 3808 3809 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3810 3811 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3812 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3813 return TRUE; 3814 } 3815 3816 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3817 // Basic approach: 3818 // - Each exiting thread registers its intent to exit and then does so. 3819 // - A thread trying to terminate the process must wait for all 3820 // threads currently exiting to complete their exit. 3821 3822 if (os::win32::has_exit_bug()) { 3823 // The array holds handles of the threads that have started exiting by calling 3824 // _endthreadex(). 3825 // Should be large enough to avoid blocking the exiting thread due to lack of 3826 // a free slot. 3827 static HANDLE handles[MAXIMUM_WAIT_OBJECTS]; 3828 static int handle_count = 0; 3829 3830 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3831 static CRITICAL_SECTION crit_sect; 3832 static volatile jint process_exiting = 0; 3833 int i, j; 3834 DWORD res; 3835 HANDLE hproc, hthr; 3836 3837 // The first thread that reached this point, initializes the critical section. 3838 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3839 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3840 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3841 EnterCriticalSection(&crit_sect); 3842 3843 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3844 // Remove from the array those handles of the threads that have completed exiting. 3845 for (i = 0, j = 0; i < handle_count; ++i) { 3846 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3847 if (res == WAIT_TIMEOUT) { 3848 handles[j++] = handles[i]; 3849 } else { 3850 if (res == WAIT_FAILED) { 3851 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3852 GetLastError(), __FILE__, __LINE__); 3853 } 3854 // Don't keep the handle, if we failed waiting for it. 3855 CloseHandle(handles[i]); 3856 } 3857 } 3858 3859 // If there's no free slot in the array of the kept handles, we'll have to 3860 // wait until at least one thread completes exiting. 3861 if ((handle_count = j) == MAXIMUM_WAIT_OBJECTS) { 3862 // Raise the priority of the oldest exiting thread to increase its chances 3863 // to complete sooner. 3864 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3865 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3866 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3867 i = (res - WAIT_OBJECT_0); 3868 handle_count = MAXIMUM_WAIT_OBJECTS - 1; 3869 for (; i < handle_count; ++i) { 3870 handles[i] = handles[i + 1]; 3871 } 3872 } else { 3873 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3874 (res == WAIT_FAILED ? "failed" : "timed out"), 3875 GetLastError(), __FILE__, __LINE__); 3876 // Don't keep handles, if we failed waiting for them. 3877 for (i = 0; i < MAXIMUM_WAIT_OBJECTS; ++i) { 3878 CloseHandle(handles[i]); 3879 } 3880 handle_count = 0; 3881 } 3882 } 3883 3884 // Store a duplicate of the current thread handle in the array of handles. 3885 hproc = GetCurrentProcess(); 3886 hthr = GetCurrentThread(); 3887 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3888 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3889 warning("DuplicateHandle failed (%u) in %s: %d\n", 3890 GetLastError(), __FILE__, __LINE__); 3891 } else { 3892 ++handle_count; 3893 } 3894 3895 // The current exiting thread has stored its handle in the array, and now 3896 // should leave the critical section before calling _endthreadex(). 3897 3898 } else if (what != EPT_THREAD) { 3899 if (handle_count > 0) { 3900 // Before ending the process, make sure all the threads that had called 3901 // _endthreadex() completed. 3902 3903 // Set the priority level of the current thread to the same value as 3904 // the priority level of exiting threads. 3905 // This is to ensure it will be given a fair chance to execute if 3906 // the timeout expires. 3907 hthr = GetCurrentThread(); 3908 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3909 for (i = 0; i < handle_count; ++i) { 3910 SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL); 3911 } 3912 res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT); 3913 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3914 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3915 (res == WAIT_FAILED ? "failed" : "timed out"), 3916 GetLastError(), __FILE__, __LINE__); 3917 } 3918 for (i = 0; i < handle_count; ++i) { 3919 CloseHandle(handles[i]); 3920 } 3921 handle_count = 0; 3922 } 3923 3924 OrderAccess::release_store(&process_exiting, 1); 3925 } 3926 3927 LeaveCriticalSection(&crit_sect); 3928 } 3929 3930 if (what == EPT_THREAD) { 3931 while (OrderAccess::load_acquire(&process_exiting) != 0) { 3932 // Some other thread is about to call exit(), so we 3933 // don't let the current thread proceed to _endthreadex() 3934 SuspendThread(GetCurrentThread()); 3935 // Avoid busy-wait loop, if SuspendThread() failed. 3936 Sleep(EXIT_TIMEOUT); 3937 } 3938 } 3939 } 3940 3941 // We are here if either 3942 // - there's no 'race at exit' bug on this OS release; 3943 // - initialization of the critical section failed (unlikely); 3944 // - the current thread has stored its handle and left the critical section; 3945 // - the process-exiting thread has raised the flag and left the critical section. 3946 if (what == EPT_THREAD) { 3947 _endthreadex((unsigned)exit_code); 3948 } else if (what == EPT_PROCESS) { 3949 ::exit(exit_code); 3950 } else { 3951 _exit(exit_code); 3952 } 3953 3954 // Should not reach here 3955 return exit_code; 3956 } 3957 3958 #undef EXIT_TIMEOUT 3959 3960 void os::win32::setmode_streams() { 3961 _setmode(_fileno(stdin), _O_BINARY); 3962 _setmode(_fileno(stdout), _O_BINARY); 3963 _setmode(_fileno(stderr), _O_BINARY); 3964 } 3965 3966 3967 bool os::is_debugger_attached() { 3968 return IsDebuggerPresent() ? true : false; 3969 } 3970 3971 3972 void os::wait_for_keypress_at_exit(void) { 3973 if (PauseAtExit) { 3974 fprintf(stderr, "Press any key to continue...\n"); 3975 fgetc(stdin); 3976 } 3977 } 3978 3979 3980 int os::message_box(const char* title, const char* message) { 3981 int result = MessageBox(NULL, message, title, 3982 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3983 return result == IDYES; 3984 } 3985 3986 int os::allocate_thread_local_storage() { 3987 return TlsAlloc(); 3988 } 3989 3990 3991 void os::free_thread_local_storage(int index) { 3992 TlsFree(index); 3993 } 3994 3995 3996 void os::thread_local_storage_at_put(int index, void* value) { 3997 TlsSetValue(index, value); 3998 assert(thread_local_storage_at(index) == value, "Just checking"); 3999 } 4000 4001 4002 void* os::thread_local_storage_at(int index) { 4003 return TlsGetValue(index); 4004 } 4005 4006 4007 #ifndef PRODUCT 4008 #ifndef _WIN64 4009 // Helpers to check whether NX protection is enabled 4010 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 4011 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 4012 pex->ExceptionRecord->NumberParameters > 0 && 4013 pex->ExceptionRecord->ExceptionInformation[0] == 4014 EXCEPTION_INFO_EXEC_VIOLATION) { 4015 return EXCEPTION_EXECUTE_HANDLER; 4016 } 4017 return EXCEPTION_CONTINUE_SEARCH; 4018 } 4019 4020 void nx_check_protection() { 4021 // If NX is enabled we'll get an exception calling into code on the stack 4022 char code[] = { (char)0xC3 }; // ret 4023 void *code_ptr = (void *)code; 4024 __try { 4025 __asm call code_ptr 4026 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 4027 tty->print_raw_cr("NX protection detected."); 4028 } 4029 } 4030 #endif // _WIN64 4031 #endif // PRODUCT 4032 4033 // this is called _before_ the global arguments have been parsed 4034 void os::init(void) { 4035 _initial_pid = _getpid(); 4036 4037 init_random(1234567); 4038 4039 win32::initialize_system_info(); 4040 win32::setmode_streams(); 4041 init_page_sizes((size_t) win32::vm_page_size()); 4042 4043 // This may be overridden later when argument processing is done. 4044 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 4045 os::win32::is_windows_2003()); 4046 4047 // Initialize main_process and main_thread 4048 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4049 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4050 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4051 fatal("DuplicateHandle failed\n"); 4052 } 4053 main_thread_id = (int) GetCurrentThreadId(); 4054 } 4055 4056 // To install functions for atexit processing 4057 extern "C" { 4058 static void perfMemory_exit_helper() { 4059 perfMemory_exit(); 4060 } 4061 } 4062 4063 static jint initSock(); 4064 4065 // this is called _after_ the global arguments have been parsed 4066 jint os::init_2(void) { 4067 // Allocate a single page and mark it as readable for safepoint polling 4068 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4069 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 4070 4071 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4072 guarantee(return_page != NULL, "Commit Failed for polling page"); 4073 4074 os::set_polling_page(polling_page); 4075 4076 #ifndef PRODUCT 4077 if (Verbose && PrintMiscellaneous) { 4078 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", 4079 (intptr_t)polling_page); 4080 } 4081 #endif 4082 4083 if (!UseMembar) { 4084 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4085 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4086 4087 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4088 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 4089 4090 os::set_memory_serialize_page(mem_serialize_page); 4091 4092 #ifndef PRODUCT 4093 if (Verbose && PrintMiscellaneous) { 4094 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", 4095 (intptr_t)mem_serialize_page); 4096 } 4097 #endif 4098 } 4099 4100 // Setup Windows Exceptions 4101 4102 // for debugging float code generation bugs 4103 if (ForceFloatExceptions) { 4104 #ifndef _WIN64 4105 static long fp_control_word = 0; 4106 __asm { fstcw fp_control_word } 4107 // see Intel PPro Manual, Vol. 2, p 7-16 4108 const long precision = 0x20; 4109 const long underflow = 0x10; 4110 const long overflow = 0x08; 4111 const long zero_div = 0x04; 4112 const long denorm = 0x02; 4113 const long invalid = 0x01; 4114 fp_control_word |= invalid; 4115 __asm { fldcw fp_control_word } 4116 #endif 4117 } 4118 4119 // If stack_commit_size is 0, windows will reserve the default size, 4120 // but only commit a small portion of it. 4121 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4122 size_t default_reserve_size = os::win32::default_stack_size(); 4123 size_t actual_reserve_size = stack_commit_size; 4124 if (stack_commit_size < default_reserve_size) { 4125 // If stack_commit_size == 0, we want this too 4126 actual_reserve_size = default_reserve_size; 4127 } 4128 4129 // Check minimum allowable stack size for thread creation and to initialize 4130 // the java system classes, including StackOverflowError - depends on page 4131 // size. Add a page for compiler2 recursion in main thread. 4132 // Add in 2*BytesPerWord times page size to account for VM stack during 4133 // class initialization depending on 32 or 64 bit VM. 4134 size_t min_stack_allowed = 4135 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4136 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 4137 if (actual_reserve_size < min_stack_allowed) { 4138 tty->print_cr("\nThe stack size specified is too small, " 4139 "Specify at least %dk", 4140 min_stack_allowed / K); 4141 return JNI_ERR; 4142 } 4143 4144 JavaThread::set_stack_size_at_create(stack_commit_size); 4145 4146 // Calculate theoretical max. size of Threads to guard gainst artifical 4147 // out-of-memory situations, where all available address-space has been 4148 // reserved by thread stacks. 4149 assert(actual_reserve_size != 0, "Must have a stack"); 4150 4151 // Calculate the thread limit when we should start doing Virtual Memory 4152 // banging. Currently when the threads will have used all but 200Mb of space. 4153 // 4154 // TODO: consider performing a similar calculation for commit size instead 4155 // as reserve size, since on a 64-bit platform we'll run into that more 4156 // often than running out of virtual memory space. We can use the 4157 // lower value of the two calculations as the os_thread_limit. 4158 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4159 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4160 4161 // at exit methods are called in the reverse order of their registration. 4162 // there is no limit to the number of functions registered. atexit does 4163 // not set errno. 4164 4165 if (PerfAllowAtExitRegistration) { 4166 // only register atexit functions if PerfAllowAtExitRegistration is set. 4167 // atexit functions can be delayed until process exit time, which 4168 // can be problematic for embedded VM situations. Embedded VMs should 4169 // call DestroyJavaVM() to assure that VM resources are released. 4170 4171 // note: perfMemory_exit_helper atexit function may be removed in 4172 // the future if the appropriate cleanup code can be added to the 4173 // VM_Exit VMOperation's doit method. 4174 if (atexit(perfMemory_exit_helper) != 0) { 4175 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4176 } 4177 } 4178 4179 #ifndef _WIN64 4180 // Print something if NX is enabled (win32 on AMD64) 4181 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4182 #endif 4183 4184 // initialize thread priority policy 4185 prio_init(); 4186 4187 if (UseNUMA && !ForceNUMA) { 4188 UseNUMA = false; // We don't fully support this yet 4189 } 4190 4191 if (UseNUMAInterleaving) { 4192 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4193 bool success = numa_interleaving_init(); 4194 if (!success) UseNUMAInterleaving = false; 4195 } 4196 4197 if (initSock() != JNI_OK) { 4198 return JNI_ERR; 4199 } 4200 4201 return JNI_OK; 4202 } 4203 4204 // Mark the polling page as unreadable 4205 void os::make_polling_page_unreadable(void) { 4206 DWORD old_status; 4207 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4208 PAGE_NOACCESS, &old_status)) { 4209 fatal("Could not disable polling page"); 4210 } 4211 } 4212 4213 // Mark the polling page as readable 4214 void os::make_polling_page_readable(void) { 4215 DWORD old_status; 4216 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4217 PAGE_READONLY, &old_status)) { 4218 fatal("Could not enable polling page"); 4219 } 4220 } 4221 4222 4223 int os::stat(const char *path, struct stat *sbuf) { 4224 char pathbuf[MAX_PATH]; 4225 if (strlen(path) > MAX_PATH - 1) { 4226 errno = ENAMETOOLONG; 4227 return -1; 4228 } 4229 os::native_path(strcpy(pathbuf, path)); 4230 int ret = ::stat(pathbuf, sbuf); 4231 if (sbuf != NULL && UseUTCFileTimestamp) { 4232 // Fix for 6539723. st_mtime returned from stat() is dependent on 4233 // the system timezone and so can return different values for the 4234 // same file if/when daylight savings time changes. This adjustment 4235 // makes sure the same timestamp is returned regardless of the TZ. 4236 // 4237 // See: 4238 // http://msdn.microsoft.com/library/ 4239 // default.asp?url=/library/en-us/sysinfo/base/ 4240 // time_zone_information_str.asp 4241 // and 4242 // http://msdn.microsoft.com/library/default.asp?url= 4243 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4244 // 4245 // NOTE: there is a insidious bug here: If the timezone is changed 4246 // after the call to stat() but before 'GetTimeZoneInformation()', then 4247 // the adjustment we do here will be wrong and we'll return the wrong 4248 // value (which will likely end up creating an invalid class data 4249 // archive). Absent a better API for this, or some time zone locking 4250 // mechanism, we'll have to live with this risk. 4251 TIME_ZONE_INFORMATION tz; 4252 DWORD tzid = GetTimeZoneInformation(&tz); 4253 int daylightBias = 4254 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4255 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4256 } 4257 return ret; 4258 } 4259 4260 4261 #define FT2INT64(ft) \ 4262 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4263 4264 4265 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4266 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4267 // of a thread. 4268 // 4269 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4270 // the fast estimate available on the platform. 4271 4272 // current_thread_cpu_time() is not optimized for Windows yet 4273 jlong os::current_thread_cpu_time() { 4274 // return user + sys since the cost is the same 4275 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4276 } 4277 4278 jlong os::thread_cpu_time(Thread* thread) { 4279 // consistent with what current_thread_cpu_time() returns. 4280 return os::thread_cpu_time(thread, true /* user+sys */); 4281 } 4282 4283 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4284 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4285 } 4286 4287 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4288 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4289 // If this function changes, os::is_thread_cpu_time_supported() should too 4290 if (os::win32::is_nt()) { 4291 FILETIME CreationTime; 4292 FILETIME ExitTime; 4293 FILETIME KernelTime; 4294 FILETIME UserTime; 4295 4296 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4297 &ExitTime, &KernelTime, &UserTime) == 0) { 4298 return -1; 4299 } else if (user_sys_cpu_time) { 4300 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4301 } else { 4302 return FT2INT64(UserTime) * 100; 4303 } 4304 } else { 4305 return (jlong) timeGetTime() * 1000000; 4306 } 4307 } 4308 4309 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4310 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4311 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4312 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4313 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4314 } 4315 4316 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4317 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4318 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4319 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4320 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4321 } 4322 4323 bool os::is_thread_cpu_time_supported() { 4324 // see os::thread_cpu_time 4325 if (os::win32::is_nt()) { 4326 FILETIME CreationTime; 4327 FILETIME ExitTime; 4328 FILETIME KernelTime; 4329 FILETIME UserTime; 4330 4331 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4332 &KernelTime, &UserTime) == 0) { 4333 return false; 4334 } else { 4335 return true; 4336 } 4337 } else { 4338 return false; 4339 } 4340 } 4341 4342 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4343 // It does have primitives (PDH API) to get CPU usage and run queue length. 4344 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4345 // If we wanted to implement loadavg on Windows, we have a few options: 4346 // 4347 // a) Query CPU usage and run queue length and "fake" an answer by 4348 // returning the CPU usage if it's under 100%, and the run queue 4349 // length otherwise. It turns out that querying is pretty slow 4350 // on Windows, on the order of 200 microseconds on a fast machine. 4351 // Note that on the Windows the CPU usage value is the % usage 4352 // since the last time the API was called (and the first call 4353 // returns 100%), so we'd have to deal with that as well. 4354 // 4355 // b) Sample the "fake" answer using a sampling thread and store 4356 // the answer in a global variable. The call to loadavg would 4357 // just return the value of the global, avoiding the slow query. 4358 // 4359 // c) Sample a better answer using exponential decay to smooth the 4360 // value. This is basically the algorithm used by UNIX kernels. 4361 // 4362 // Note that sampling thread starvation could affect both (b) and (c). 4363 int os::loadavg(double loadavg[], int nelem) { 4364 return -1; 4365 } 4366 4367 4368 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4369 bool os::dont_yield() { 4370 return DontYieldALot; 4371 } 4372 4373 // This method is a slightly reworked copy of JDK's sysOpen 4374 // from src/windows/hpi/src/sys_api_md.c 4375 4376 int os::open(const char *path, int oflag, int mode) { 4377 char pathbuf[MAX_PATH]; 4378 4379 if (strlen(path) > MAX_PATH - 1) { 4380 errno = ENAMETOOLONG; 4381 return -1; 4382 } 4383 os::native_path(strcpy(pathbuf, path)); 4384 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4385 } 4386 4387 FILE* os::open(int fd, const char* mode) { 4388 return ::_fdopen(fd, mode); 4389 } 4390 4391 // Is a (classpath) directory empty? 4392 bool os::dir_is_empty(const char* path) { 4393 WIN32_FIND_DATA fd; 4394 HANDLE f = FindFirstFile(path, &fd); 4395 if (f == INVALID_HANDLE_VALUE) { 4396 return true; 4397 } 4398 FindClose(f); 4399 return false; 4400 } 4401 4402 // create binary file, rewriting existing file if required 4403 int os::create_binary_file(const char* path, bool rewrite_existing) { 4404 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4405 if (!rewrite_existing) { 4406 oflags |= _O_EXCL; 4407 } 4408 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4409 } 4410 4411 // return current position of file pointer 4412 jlong os::current_file_offset(int fd) { 4413 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4414 } 4415 4416 // move file pointer to the specified offset 4417 jlong os::seek_to_file_offset(int fd, jlong offset) { 4418 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4419 } 4420 4421 4422 jlong os::lseek(int fd, jlong offset, int whence) { 4423 return (jlong) ::_lseeki64(fd, offset, whence); 4424 } 4425 4426 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4427 OVERLAPPED ov; 4428 DWORD nread; 4429 BOOL result; 4430 4431 ZeroMemory(&ov, sizeof(ov)); 4432 ov.Offset = (DWORD)offset; 4433 ov.OffsetHigh = (DWORD)(offset >> 32); 4434 4435 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4436 4437 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4438 4439 return result ? nread : 0; 4440 } 4441 4442 4443 // This method is a slightly reworked copy of JDK's sysNativePath 4444 // from src/windows/hpi/src/path_md.c 4445 4446 // Convert a pathname to native format. On win32, this involves forcing all 4447 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4448 // sometimes rejects '/') and removing redundant separators. The input path is 4449 // assumed to have been converted into the character encoding used by the local 4450 // system. Because this might be a double-byte encoding, care is taken to 4451 // treat double-byte lead characters correctly. 4452 // 4453 // This procedure modifies the given path in place, as the result is never 4454 // longer than the original. There is no error return; this operation always 4455 // succeeds. 4456 char * os::native_path(char *path) { 4457 char *src = path, *dst = path, *end = path; 4458 char *colon = NULL; // If a drive specifier is found, this will 4459 // point to the colon following the drive letter 4460 4461 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4462 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4463 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4464 4465 // Check for leading separators 4466 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4467 while (isfilesep(*src)) { 4468 src++; 4469 } 4470 4471 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4472 // Remove leading separators if followed by drive specifier. This 4473 // hack is necessary to support file URLs containing drive 4474 // specifiers (e.g., "file://c:/path"). As a side effect, 4475 // "/c:/path" can be used as an alternative to "c:/path". 4476 *dst++ = *src++; 4477 colon = dst; 4478 *dst++ = ':'; 4479 src++; 4480 } else { 4481 src = path; 4482 if (isfilesep(src[0]) && isfilesep(src[1])) { 4483 // UNC pathname: Retain first separator; leave src pointed at 4484 // second separator so that further separators will be collapsed 4485 // into the second separator. The result will be a pathname 4486 // beginning with "\\\\" followed (most likely) by a host name. 4487 src = dst = path + 1; 4488 path[0] = '\\'; // Force first separator to '\\' 4489 } 4490 } 4491 4492 end = dst; 4493 4494 // Remove redundant separators from remainder of path, forcing all 4495 // separators to be '\\' rather than '/'. Also, single byte space 4496 // characters are removed from the end of the path because those 4497 // are not legal ending characters on this operating system. 4498 // 4499 while (*src != '\0') { 4500 if (isfilesep(*src)) { 4501 *dst++ = '\\'; src++; 4502 while (isfilesep(*src)) src++; 4503 if (*src == '\0') { 4504 // Check for trailing separator 4505 end = dst; 4506 if (colon == dst - 2) break; // "z:\\" 4507 if (dst == path + 1) break; // "\\" 4508 if (dst == path + 2 && isfilesep(path[0])) { 4509 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4510 // beginning of a UNC pathname. Even though it is not, by 4511 // itself, a valid UNC pathname, we leave it as is in order 4512 // to be consistent with the path canonicalizer as well 4513 // as the win32 APIs, which treat this case as an invalid 4514 // UNC pathname rather than as an alias for the root 4515 // directory of the current drive. 4516 break; 4517 } 4518 end = --dst; // Path does not denote a root directory, so 4519 // remove trailing separator 4520 break; 4521 } 4522 end = dst; 4523 } else { 4524 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4525 *dst++ = *src++; 4526 if (*src) *dst++ = *src++; 4527 end = dst; 4528 } else { // Copy a single-byte character 4529 char c = *src++; 4530 *dst++ = c; 4531 // Space is not a legal ending character 4532 if (c != ' ') end = dst; 4533 } 4534 } 4535 } 4536 4537 *end = '\0'; 4538 4539 // For "z:", add "." to work around a bug in the C runtime library 4540 if (colon == dst - 1) { 4541 path[2] = '.'; 4542 path[3] = '\0'; 4543 } 4544 4545 return path; 4546 } 4547 4548 // This code is a copy of JDK's sysSetLength 4549 // from src/windows/hpi/src/sys_api_md.c 4550 4551 int os::ftruncate(int fd, jlong length) { 4552 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4553 long high = (long)(length >> 32); 4554 DWORD ret; 4555 4556 if (h == (HANDLE)(-1)) { 4557 return -1; 4558 } 4559 4560 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4561 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4562 return -1; 4563 } 4564 4565 if (::SetEndOfFile(h) == FALSE) { 4566 return -1; 4567 } 4568 4569 return 0; 4570 } 4571 4572 4573 // This code is a copy of JDK's sysSync 4574 // from src/windows/hpi/src/sys_api_md.c 4575 // except for the legacy workaround for a bug in Win 98 4576 4577 int os::fsync(int fd) { 4578 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4579 4580 if ((!::FlushFileBuffers(handle)) && 4581 (GetLastError() != ERROR_ACCESS_DENIED)) { 4582 // from winerror.h 4583 return -1; 4584 } 4585 return 0; 4586 } 4587 4588 static int nonSeekAvailable(int, long *); 4589 static int stdinAvailable(int, long *); 4590 4591 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4592 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4593 4594 // This code is a copy of JDK's sysAvailable 4595 // from src/windows/hpi/src/sys_api_md.c 4596 4597 int os::available(int fd, jlong *bytes) { 4598 jlong cur, end; 4599 struct _stati64 stbuf64; 4600 4601 if (::_fstati64(fd, &stbuf64) >= 0) { 4602 int mode = stbuf64.st_mode; 4603 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4604 int ret; 4605 long lpbytes; 4606 if (fd == 0) { 4607 ret = stdinAvailable(fd, &lpbytes); 4608 } else { 4609 ret = nonSeekAvailable(fd, &lpbytes); 4610 } 4611 (*bytes) = (jlong)(lpbytes); 4612 return ret; 4613 } 4614 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4615 return FALSE; 4616 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4617 return FALSE; 4618 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4619 return FALSE; 4620 } 4621 *bytes = end - cur; 4622 return TRUE; 4623 } else { 4624 return FALSE; 4625 } 4626 } 4627 4628 // This code is a copy of JDK's nonSeekAvailable 4629 // from src/windows/hpi/src/sys_api_md.c 4630 4631 static int nonSeekAvailable(int fd, long *pbytes) { 4632 // This is used for available on non-seekable devices 4633 // (like both named and anonymous pipes, such as pipes 4634 // connected to an exec'd process). 4635 // Standard Input is a special case. 4636 HANDLE han; 4637 4638 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4639 return FALSE; 4640 } 4641 4642 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4643 // PeekNamedPipe fails when at EOF. In that case we 4644 // simply make *pbytes = 0 which is consistent with the 4645 // behavior we get on Solaris when an fd is at EOF. 4646 // The only alternative is to raise an Exception, 4647 // which isn't really warranted. 4648 // 4649 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4650 return FALSE; 4651 } 4652 *pbytes = 0; 4653 } 4654 return TRUE; 4655 } 4656 4657 #define MAX_INPUT_EVENTS 2000 4658 4659 // This code is a copy of JDK's stdinAvailable 4660 // from src/windows/hpi/src/sys_api_md.c 4661 4662 static int stdinAvailable(int fd, long *pbytes) { 4663 HANDLE han; 4664 DWORD numEventsRead = 0; // Number of events read from buffer 4665 DWORD numEvents = 0; // Number of events in buffer 4666 DWORD i = 0; // Loop index 4667 DWORD curLength = 0; // Position marker 4668 DWORD actualLength = 0; // Number of bytes readable 4669 BOOL error = FALSE; // Error holder 4670 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4671 4672 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4673 return FALSE; 4674 } 4675 4676 // Construct an array of input records in the console buffer 4677 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4678 if (error == 0) { 4679 return nonSeekAvailable(fd, pbytes); 4680 } 4681 4682 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4683 if (numEvents > MAX_INPUT_EVENTS) { 4684 numEvents = MAX_INPUT_EVENTS; 4685 } 4686 4687 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4688 if (lpBuffer == NULL) { 4689 return FALSE; 4690 } 4691 4692 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4693 if (error == 0) { 4694 os::free(lpBuffer); 4695 return FALSE; 4696 } 4697 4698 // Examine input records for the number of bytes available 4699 for (i=0; i<numEvents; i++) { 4700 if (lpBuffer[i].EventType == KEY_EVENT) { 4701 4702 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4703 &(lpBuffer[i].Event); 4704 if (keyRecord->bKeyDown == TRUE) { 4705 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4706 curLength++; 4707 if (*keyPressed == '\r') { 4708 actualLength = curLength; 4709 } 4710 } 4711 } 4712 } 4713 4714 if (lpBuffer != NULL) { 4715 os::free(lpBuffer); 4716 } 4717 4718 *pbytes = (long) actualLength; 4719 return TRUE; 4720 } 4721 4722 // Map a block of memory. 4723 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4724 char *addr, size_t bytes, bool read_only, 4725 bool allow_exec) { 4726 HANDLE hFile; 4727 char* base; 4728 4729 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4730 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4731 if (hFile == NULL) { 4732 if (PrintMiscellaneous && Verbose) { 4733 DWORD err = GetLastError(); 4734 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4735 } 4736 return NULL; 4737 } 4738 4739 if (allow_exec) { 4740 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4741 // unless it comes from a PE image (which the shared archive is not.) 4742 // Even VirtualProtect refuses to give execute access to mapped memory 4743 // that was not previously executable. 4744 // 4745 // Instead, stick the executable region in anonymous memory. Yuck. 4746 // Penalty is that ~4 pages will not be shareable - in the future 4747 // we might consider DLLizing the shared archive with a proper PE 4748 // header so that mapping executable + sharing is possible. 4749 4750 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4751 PAGE_READWRITE); 4752 if (base == NULL) { 4753 if (PrintMiscellaneous && Verbose) { 4754 DWORD err = GetLastError(); 4755 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4756 } 4757 CloseHandle(hFile); 4758 return NULL; 4759 } 4760 4761 DWORD bytes_read; 4762 OVERLAPPED overlapped; 4763 overlapped.Offset = (DWORD)file_offset; 4764 overlapped.OffsetHigh = 0; 4765 overlapped.hEvent = NULL; 4766 // ReadFile guarantees that if the return value is true, the requested 4767 // number of bytes were read before returning. 4768 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4769 if (!res) { 4770 if (PrintMiscellaneous && Verbose) { 4771 DWORD err = GetLastError(); 4772 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4773 } 4774 release_memory(base, bytes); 4775 CloseHandle(hFile); 4776 return NULL; 4777 } 4778 } else { 4779 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4780 NULL /* file_name */); 4781 if (hMap == NULL) { 4782 if (PrintMiscellaneous && Verbose) { 4783 DWORD err = GetLastError(); 4784 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4785 } 4786 CloseHandle(hFile); 4787 return NULL; 4788 } 4789 4790 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4791 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4792 (DWORD)bytes, addr); 4793 if (base == NULL) { 4794 if (PrintMiscellaneous && Verbose) { 4795 DWORD err = GetLastError(); 4796 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4797 } 4798 CloseHandle(hMap); 4799 CloseHandle(hFile); 4800 return NULL; 4801 } 4802 4803 if (CloseHandle(hMap) == 0) { 4804 if (PrintMiscellaneous && Verbose) { 4805 DWORD err = GetLastError(); 4806 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4807 } 4808 CloseHandle(hFile); 4809 return base; 4810 } 4811 } 4812 4813 if (allow_exec) { 4814 DWORD old_protect; 4815 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4816 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4817 4818 if (!res) { 4819 if (PrintMiscellaneous && Verbose) { 4820 DWORD err = GetLastError(); 4821 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4822 } 4823 // Don't consider this a hard error, on IA32 even if the 4824 // VirtualProtect fails, we should still be able to execute 4825 CloseHandle(hFile); 4826 return base; 4827 } 4828 } 4829 4830 if (CloseHandle(hFile) == 0) { 4831 if (PrintMiscellaneous && Verbose) { 4832 DWORD err = GetLastError(); 4833 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4834 } 4835 return base; 4836 } 4837 4838 return base; 4839 } 4840 4841 4842 // Remap a block of memory. 4843 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4844 char *addr, size_t bytes, bool read_only, 4845 bool allow_exec) { 4846 // This OS does not allow existing memory maps to be remapped so we 4847 // have to unmap the memory before we remap it. 4848 if (!os::unmap_memory(addr, bytes)) { 4849 return NULL; 4850 } 4851 4852 // There is a very small theoretical window between the unmap_memory() 4853 // call above and the map_memory() call below where a thread in native 4854 // code may be able to access an address that is no longer mapped. 4855 4856 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4857 read_only, allow_exec); 4858 } 4859 4860 4861 // Unmap a block of memory. 4862 // Returns true=success, otherwise false. 4863 4864 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4865 BOOL result = UnmapViewOfFile(addr); 4866 if (result == 0) { 4867 if (PrintMiscellaneous && Verbose) { 4868 DWORD err = GetLastError(); 4869 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4870 } 4871 return false; 4872 } 4873 return true; 4874 } 4875 4876 void os::pause() { 4877 char filename[MAX_PATH]; 4878 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4879 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4880 } else { 4881 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4882 } 4883 4884 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4885 if (fd != -1) { 4886 struct stat buf; 4887 ::close(fd); 4888 while (::stat(filename, &buf) == 0) { 4889 Sleep(100); 4890 } 4891 } else { 4892 jio_fprintf(stderr, 4893 "Could not open pause file '%s', continuing immediately.\n", filename); 4894 } 4895 } 4896 4897 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4898 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4899 } 4900 4901 // See the caveats for this class in os_windows.hpp 4902 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4903 // into this method and returns false. If no OS EXCEPTION was raised, returns 4904 // true. 4905 // The callback is supposed to provide the method that should be protected. 4906 // 4907 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4908 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4909 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4910 "crash_protection already set?"); 4911 4912 bool success = true; 4913 __try { 4914 WatcherThread::watcher_thread()->set_crash_protection(this); 4915 cb.call(); 4916 } __except(EXCEPTION_EXECUTE_HANDLER) { 4917 // only for protection, nothing to do 4918 success = false; 4919 } 4920 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4921 return success; 4922 } 4923 4924 // An Event wraps a win32 "CreateEvent" kernel handle. 4925 // 4926 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4927 // 4928 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4929 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4930 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4931 // In addition, an unpark() operation might fetch the handle field, but the 4932 // event could recycle between the fetch and the SetEvent() operation. 4933 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4934 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4935 // on an stale but recycled handle would be harmless, but in practice this might 4936 // confuse other non-Sun code, so it's not a viable approach. 4937 // 4938 // 2: Once a win32 event handle is associated with an Event, it remains associated 4939 // with the Event. The event handle is never closed. This could be construed 4940 // as handle leakage, but only up to the maximum # of threads that have been extant 4941 // at any one time. This shouldn't be an issue, as windows platforms typically 4942 // permit a process to have hundreds of thousands of open handles. 4943 // 4944 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4945 // and release unused handles. 4946 // 4947 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4948 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4949 // 4950 // 5. Use an RCU-like mechanism (Read-Copy Update). 4951 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4952 // 4953 // We use (2). 4954 // 4955 // TODO-FIXME: 4956 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4957 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4958 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4959 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4960 // into a single win32 CreateEvent() handle. 4961 // 4962 // Assumption: 4963 // Only one parker can exist on an event, which is why we allocate 4964 // them per-thread. Multiple unparkers can coexist. 4965 // 4966 // _Event transitions in park() 4967 // -1 => -1 : illegal 4968 // 1 => 0 : pass - return immediately 4969 // 0 => -1 : block; then set _Event to 0 before returning 4970 // 4971 // _Event transitions in unpark() 4972 // 0 => 1 : just return 4973 // 1 => 1 : just return 4974 // -1 => either 0 or 1; must signal target thread 4975 // That is, we can safely transition _Event from -1 to either 4976 // 0 or 1. 4977 // 4978 // _Event serves as a restricted-range semaphore. 4979 // -1 : thread is blocked, i.e. there is a waiter 4980 // 0 : neutral: thread is running or ready, 4981 // could have been signaled after a wait started 4982 // 1 : signaled - thread is running or ready 4983 // 4984 // Another possible encoding of _Event would be with 4985 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4986 // 4987 4988 int os::PlatformEvent::park(jlong Millis) { 4989 // Transitions for _Event: 4990 // -1 => -1 : illegal 4991 // 1 => 0 : pass - return immediately 4992 // 0 => -1 : block; then set _Event to 0 before returning 4993 4994 guarantee(_ParkHandle != NULL , "Invariant"); 4995 guarantee(Millis > 0 , "Invariant"); 4996 4997 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4998 // the initial park() operation. 4999 // Consider: use atomic decrement instead of CAS-loop 5000 5001 int v; 5002 for (;;) { 5003 v = _Event; 5004 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5005 } 5006 guarantee((v == 0) || (v == 1), "invariant"); 5007 if (v != 0) return OS_OK; 5008 5009 // Do this the hard way by blocking ... 5010 // TODO: consider a brief spin here, gated on the success of recent 5011 // spin attempts by this thread. 5012 // 5013 // We decompose long timeouts into series of shorter timed waits. 5014 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5015 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5016 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5017 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5018 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5019 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5020 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5021 // for the already waited time. This policy does not admit any new outcomes. 5022 // In the future, however, we might want to track the accumulated wait time and 5023 // adjust Millis accordingly if we encounter a spurious wakeup. 5024 5025 const int MAXTIMEOUT = 0x10000000; 5026 DWORD rv = WAIT_TIMEOUT; 5027 while (_Event < 0 && Millis > 0) { 5028 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5029 if (Millis > MAXTIMEOUT) { 5030 prd = MAXTIMEOUT; 5031 } 5032 rv = ::WaitForSingleObject(_ParkHandle, prd); 5033 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5034 if (rv == WAIT_TIMEOUT) { 5035 Millis -= prd; 5036 } 5037 } 5038 v = _Event; 5039 _Event = 0; 5040 // see comment at end of os::PlatformEvent::park() below: 5041 OrderAccess::fence(); 5042 // If we encounter a nearly simultanous timeout expiry and unpark() 5043 // we return OS_OK indicating we awoke via unpark(). 5044 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5045 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5046 } 5047 5048 void os::PlatformEvent::park() { 5049 // Transitions for _Event: 5050 // -1 => -1 : illegal 5051 // 1 => 0 : pass - return immediately 5052 // 0 => -1 : block; then set _Event to 0 before returning 5053 5054 guarantee(_ParkHandle != NULL, "Invariant"); 5055 // Invariant: Only the thread associated with the Event/PlatformEvent 5056 // may call park(). 5057 // Consider: use atomic decrement instead of CAS-loop 5058 int v; 5059 for (;;) { 5060 v = _Event; 5061 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5062 } 5063 guarantee((v == 0) || (v == 1), "invariant"); 5064 if (v != 0) return; 5065 5066 // Do this the hard way by blocking ... 5067 // TODO: consider a brief spin here, gated on the success of recent 5068 // spin attempts by this thread. 5069 while (_Event < 0) { 5070 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5071 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5072 } 5073 5074 // Usually we'll find _Event == 0 at this point, but as 5075 // an optional optimization we clear it, just in case can 5076 // multiple unpark() operations drove _Event up to 1. 5077 _Event = 0; 5078 OrderAccess::fence(); 5079 guarantee(_Event >= 0, "invariant"); 5080 } 5081 5082 void os::PlatformEvent::unpark() { 5083 guarantee(_ParkHandle != NULL, "Invariant"); 5084 5085 // Transitions for _Event: 5086 // 0 => 1 : just return 5087 // 1 => 1 : just return 5088 // -1 => either 0 or 1; must signal target thread 5089 // That is, we can safely transition _Event from -1 to either 5090 // 0 or 1. 5091 // See also: "Semaphores in Plan 9" by Mullender & Cox 5092 // 5093 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5094 // that it will take two back-to-back park() calls for the owning 5095 // thread to block. This has the benefit of forcing a spurious return 5096 // from the first park() call after an unpark() call which will help 5097 // shake out uses of park() and unpark() without condition variables. 5098 5099 if (Atomic::xchg(1, &_Event) >= 0) return; 5100 5101 ::SetEvent(_ParkHandle); 5102 } 5103 5104 5105 // JSR166 5106 // ------------------------------------------------------- 5107 5108 // The Windows implementation of Park is very straightforward: Basic 5109 // operations on Win32 Events turn out to have the right semantics to 5110 // use them directly. We opportunistically resuse the event inherited 5111 // from Monitor. 5112 5113 void Parker::park(bool isAbsolute, jlong time) { 5114 guarantee(_ParkEvent != NULL, "invariant"); 5115 // First, demultiplex/decode time arguments 5116 if (time < 0) { // don't wait 5117 return; 5118 } else if (time == 0 && !isAbsolute) { 5119 time = INFINITE; 5120 } else if (isAbsolute) { 5121 time -= os::javaTimeMillis(); // convert to relative time 5122 if (time <= 0) { // already elapsed 5123 return; 5124 } 5125 } else { // relative 5126 time /= 1000000; // Must coarsen from nanos to millis 5127 if (time == 0) { // Wait for the minimal time unit if zero 5128 time = 1; 5129 } 5130 } 5131 5132 JavaThread* thread = (JavaThread*)(Thread::current()); 5133 assert(thread->is_Java_thread(), "Must be JavaThread"); 5134 JavaThread *jt = (JavaThread *)thread; 5135 5136 // Don't wait if interrupted or already triggered 5137 if (Thread::is_interrupted(thread, false) || 5138 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5139 ResetEvent(_ParkEvent); 5140 return; 5141 } else { 5142 ThreadBlockInVM tbivm(jt); 5143 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5144 jt->set_suspend_equivalent(); 5145 5146 WaitForSingleObject(_ParkEvent, time); 5147 ResetEvent(_ParkEvent); 5148 5149 // If externally suspended while waiting, re-suspend 5150 if (jt->handle_special_suspend_equivalent_condition()) { 5151 jt->java_suspend_self(); 5152 } 5153 } 5154 } 5155 5156 void Parker::unpark() { 5157 guarantee(_ParkEvent != NULL, "invariant"); 5158 SetEvent(_ParkEvent); 5159 } 5160 5161 // Run the specified command in a separate process. Return its exit value, 5162 // or -1 on failure (e.g. can't create a new process). 5163 int os::fork_and_exec(char* cmd) { 5164 STARTUPINFO si; 5165 PROCESS_INFORMATION pi; 5166 5167 memset(&si, 0, sizeof(si)); 5168 si.cb = sizeof(si); 5169 memset(&pi, 0, sizeof(pi)); 5170 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5171 cmd, // command line 5172 NULL, // process security attribute 5173 NULL, // thread security attribute 5174 TRUE, // inherits system handles 5175 0, // no creation flags 5176 NULL, // use parent's environment block 5177 NULL, // use parent's starting directory 5178 &si, // (in) startup information 5179 &pi); // (out) process information 5180 5181 if (rslt) { 5182 // Wait until child process exits. 5183 WaitForSingleObject(pi.hProcess, INFINITE); 5184 5185 DWORD exit_code; 5186 GetExitCodeProcess(pi.hProcess, &exit_code); 5187 5188 // Close process and thread handles. 5189 CloseHandle(pi.hProcess); 5190 CloseHandle(pi.hThread); 5191 5192 return (int)exit_code; 5193 } else { 5194 return -1; 5195 } 5196 } 5197 5198 //-------------------------------------------------------------------------------------------------- 5199 // Non-product code 5200 5201 static int mallocDebugIntervalCounter = 0; 5202 static int mallocDebugCounter = 0; 5203 bool os::check_heap(bool force) { 5204 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5205 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5206 // Note: HeapValidate executes two hardware breakpoints when it finds something 5207 // wrong; at these points, eax contains the address of the offending block (I think). 5208 // To get to the exlicit error message(s) below, just continue twice. 5209 HANDLE heap = GetProcessHeap(); 5210 5211 // If we fail to lock the heap, then gflags.exe has been used 5212 // or some other special heap flag has been set that prevents 5213 // locking. We don't try to walk a heap we can't lock. 5214 if (HeapLock(heap) != 0) { 5215 PROCESS_HEAP_ENTRY phe; 5216 phe.lpData = NULL; 5217 while (HeapWalk(heap, &phe) != 0) { 5218 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5219 !HeapValidate(heap, 0, phe.lpData)) { 5220 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5221 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5222 fatal("corrupted C heap"); 5223 } 5224 } 5225 DWORD err = GetLastError(); 5226 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5227 fatal(err_msg("heap walk aborted with error %d", err)); 5228 } 5229 HeapUnlock(heap); 5230 } 5231 mallocDebugIntervalCounter = 0; 5232 } 5233 return true; 5234 } 5235 5236 5237 bool os::find(address addr, outputStream* st) { 5238 // Nothing yet 5239 return false; 5240 } 5241 5242 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5243 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5244 5245 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5246 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5247 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5248 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5249 5250 if (os::is_memory_serialize_page(thread, addr)) { 5251 return EXCEPTION_CONTINUE_EXECUTION; 5252 } 5253 } 5254 5255 return EXCEPTION_CONTINUE_SEARCH; 5256 } 5257 5258 // We don't build a headless jre for Windows 5259 bool os::is_headless_jre() { return false; } 5260 5261 static jint initSock() { 5262 WSADATA wsadata; 5263 5264 if (!os::WinSock2Dll::WinSock2Available()) { 5265 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5266 ::GetLastError()); 5267 return JNI_ERR; 5268 } 5269 5270 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5271 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5272 ::GetLastError()); 5273 return JNI_ERR; 5274 } 5275 return JNI_OK; 5276 } 5277 5278 struct hostent* os::get_host_by_name(char* name) { 5279 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5280 } 5281 5282 int os::socket_close(int fd) { 5283 return ::closesocket(fd); 5284 } 5285 5286 int os::socket(int domain, int type, int protocol) { 5287 return ::socket(domain, type, protocol); 5288 } 5289 5290 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5291 return ::connect(fd, him, len); 5292 } 5293 5294 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5295 return ::recv(fd, buf, (int)nBytes, flags); 5296 } 5297 5298 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5299 return ::send(fd, buf, (int)nBytes, flags); 5300 } 5301 5302 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5303 return ::send(fd, buf, (int)nBytes, flags); 5304 } 5305 5306 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5307 #if defined(IA32) 5308 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5309 #elif defined (AMD64) 5310 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5311 #endif 5312 5313 // returns true if thread could be suspended, 5314 // false otherwise 5315 static bool do_suspend(HANDLE* h) { 5316 if (h != NULL) { 5317 if (SuspendThread(*h) != ~0) { 5318 return true; 5319 } 5320 } 5321 return false; 5322 } 5323 5324 // resume the thread 5325 // calling resume on an active thread is a no-op 5326 static void do_resume(HANDLE* h) { 5327 if (h != NULL) { 5328 ResumeThread(*h); 5329 } 5330 } 5331 5332 // retrieve a suspend/resume context capable handle 5333 // from the tid. Caller validates handle return value. 5334 void get_thread_handle_for_extended_context(HANDLE* h, 5335 OSThread::thread_id_t tid) { 5336 if (h != NULL) { 5337 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5338 } 5339 } 5340 5341 // Thread sampling implementation 5342 // 5343 void os::SuspendedThreadTask::internal_do_task() { 5344 CONTEXT ctxt; 5345 HANDLE h = NULL; 5346 5347 // get context capable handle for thread 5348 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5349 5350 // sanity 5351 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5352 return; 5353 } 5354 5355 // suspend the thread 5356 if (do_suspend(&h)) { 5357 ctxt.ContextFlags = sampling_context_flags; 5358 // get thread context 5359 GetThreadContext(h, &ctxt); 5360 SuspendedThreadTaskContext context(_thread, &ctxt); 5361 // pass context to Thread Sampling impl 5362 do_task(context); 5363 // resume thread 5364 do_resume(&h); 5365 } 5366 5367 // close handle 5368 CloseHandle(h); 5369 } 5370 5371 5372 // Kernel32 API 5373 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5374 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn)(HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5375 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn)(PULONG); 5376 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn)(UCHAR, PULONGLONG); 5377 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5378 5379 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5380 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5381 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5382 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5383 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5384 5385 5386 BOOL os::Kernel32Dll::initialized = FALSE; 5387 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5388 assert(initialized && _GetLargePageMinimum != NULL, 5389 "GetLargePageMinimumAvailable() not yet called"); 5390 return _GetLargePageMinimum(); 5391 } 5392 5393 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5394 if (!initialized) { 5395 initialize(); 5396 } 5397 return _GetLargePageMinimum != NULL; 5398 } 5399 5400 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5401 if (!initialized) { 5402 initialize(); 5403 } 5404 return _VirtualAllocExNuma != NULL; 5405 } 5406 5407 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, 5408 SIZE_T bytes, DWORD flags, 5409 DWORD prot, DWORD node) { 5410 assert(initialized && _VirtualAllocExNuma != NULL, 5411 "NUMACallsAvailable() not yet called"); 5412 5413 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5414 } 5415 5416 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5417 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5418 "NUMACallsAvailable() not yet called"); 5419 5420 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5421 } 5422 5423 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, 5424 PULONGLONG proc_mask) { 5425 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5426 "NUMACallsAvailable() not yet called"); 5427 5428 return _GetNumaNodeProcessorMask(node, proc_mask); 5429 } 5430 5431 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5432 ULONG FrameToCapture, 5433 PVOID* BackTrace, 5434 PULONG BackTraceHash) { 5435 if (!initialized) { 5436 initialize(); 5437 } 5438 5439 if (_RtlCaptureStackBackTrace != NULL) { 5440 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5441 BackTrace, BackTraceHash); 5442 } else { 5443 return 0; 5444 } 5445 } 5446 5447 void os::Kernel32Dll::initializeCommon() { 5448 if (!initialized) { 5449 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5450 assert(handle != NULL, "Just check"); 5451 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5452 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5453 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5454 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5455 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5456 initialized = TRUE; 5457 } 5458 } 5459 5460 5461 5462 #ifndef JDK6_OR_EARLIER 5463 5464 void os::Kernel32Dll::initialize() { 5465 initializeCommon(); 5466 } 5467 5468 5469 // Kernel32 API 5470 inline BOOL os::Kernel32Dll::SwitchToThread() { 5471 return ::SwitchToThread(); 5472 } 5473 5474 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5475 return true; 5476 } 5477 5478 // Help tools 5479 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5480 return true; 5481 } 5482 5483 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, 5484 DWORD th32ProcessId) { 5485 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5486 } 5487 5488 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot, 5489 LPMODULEENTRY32 lpme) { 5490 return ::Module32First(hSnapshot, lpme); 5491 } 5492 5493 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, 5494 LPMODULEENTRY32 lpme) { 5495 return ::Module32Next(hSnapshot, lpme); 5496 } 5497 5498 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5499 ::GetNativeSystemInfo(lpSystemInfo); 5500 } 5501 5502 // PSAPI API 5503 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, 5504 HMODULE *lpModule, DWORD cb, 5505 LPDWORD lpcbNeeded) { 5506 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5507 } 5508 5509 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, 5510 HMODULE hModule, 5511 LPTSTR lpFilename, 5512 DWORD nSize) { 5513 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5514 } 5515 5516 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, 5517 HMODULE hModule, 5518 LPMODULEINFO lpmodinfo, 5519 DWORD cb) { 5520 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5521 } 5522 5523 inline BOOL os::PSApiDll::PSApiAvailable() { 5524 return true; 5525 } 5526 5527 5528 // WinSock2 API 5529 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, 5530 LPWSADATA lpWSAData) { 5531 return ::WSAStartup(wVersionRequested, lpWSAData); 5532 } 5533 5534 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5535 return ::gethostbyname(name); 5536 } 5537 5538 inline BOOL os::WinSock2Dll::WinSock2Available() { 5539 return true; 5540 } 5541 5542 // Advapi API 5543 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5544 BOOL DisableAllPrivileges, 5545 PTOKEN_PRIVILEGES NewState, 5546 DWORD BufferLength, 5547 PTOKEN_PRIVILEGES PreviousState, 5548 PDWORD ReturnLength) { 5549 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5550 BufferLength, PreviousState, ReturnLength); 5551 } 5552 5553 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, 5554 DWORD DesiredAccess, 5555 PHANDLE TokenHandle) { 5556 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5557 } 5558 5559 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, 5560 LPCTSTR lpName, 5561 PLUID lpLuid) { 5562 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5563 } 5564 5565 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5566 return true; 5567 } 5568 5569 void* os::get_default_process_handle() { 5570 return (void*)GetModuleHandle(NULL); 5571 } 5572 5573 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5574 // which is used to find statically linked in agents. 5575 // Additionally for windows, takes into account __stdcall names. 5576 // Parameters: 5577 // sym_name: Symbol in library we are looking for 5578 // lib_name: Name of library to look in, NULL for shared libs. 5579 // is_absolute_path == true if lib_name is absolute path to agent 5580 // such as "C:/a/b/L.dll" 5581 // == false if only the base name of the library is passed in 5582 // such as "L" 5583 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5584 bool is_absolute_path) { 5585 char *agent_entry_name; 5586 size_t len; 5587 size_t name_len; 5588 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5589 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5590 const char *start; 5591 5592 if (lib_name != NULL) { 5593 len = name_len = strlen(lib_name); 5594 if (is_absolute_path) { 5595 // Need to strip path, prefix and suffix 5596 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5597 lib_name = ++start; 5598 } else { 5599 // Need to check for drive prefix 5600 if ((start = strchr(lib_name, ':')) != NULL) { 5601 lib_name = ++start; 5602 } 5603 } 5604 if (len <= (prefix_len + suffix_len)) { 5605 return NULL; 5606 } 5607 lib_name += prefix_len; 5608 name_len = strlen(lib_name) - suffix_len; 5609 } 5610 } 5611 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5612 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5613 if (agent_entry_name == NULL) { 5614 return NULL; 5615 } 5616 if (lib_name != NULL) { 5617 const char *p = strrchr(sym_name, '@'); 5618 if (p != NULL && p != sym_name) { 5619 // sym_name == _Agent_OnLoad@XX 5620 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5621 agent_entry_name[(p-sym_name)] = '\0'; 5622 // agent_entry_name == _Agent_OnLoad 5623 strcat(agent_entry_name, "_"); 5624 strncat(agent_entry_name, lib_name, name_len); 5625 strcat(agent_entry_name, p); 5626 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5627 } else { 5628 strcpy(agent_entry_name, sym_name); 5629 strcat(agent_entry_name, "_"); 5630 strncat(agent_entry_name, lib_name, name_len); 5631 } 5632 } else { 5633 strcpy(agent_entry_name, sym_name); 5634 } 5635 return agent_entry_name; 5636 } 5637 5638 #else 5639 // Kernel32 API 5640 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5641 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD, DWORD); 5642 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE, LPMODULEENTRY32); 5643 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE, LPMODULEENTRY32); 5644 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5645 5646 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5647 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5648 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5649 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5650 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5651 5652 void os::Kernel32Dll::initialize() { 5653 if (!initialized) { 5654 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5655 assert(handle != NULL, "Just check"); 5656 5657 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5658 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5659 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5660 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5661 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5662 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5663 initializeCommon(); // resolve the functions that always need resolving 5664 5665 initialized = TRUE; 5666 } 5667 } 5668 5669 BOOL os::Kernel32Dll::SwitchToThread() { 5670 assert(initialized && _SwitchToThread != NULL, 5671 "SwitchToThreadAvailable() not yet called"); 5672 return _SwitchToThread(); 5673 } 5674 5675 5676 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5677 if (!initialized) { 5678 initialize(); 5679 } 5680 return _SwitchToThread != NULL; 5681 } 5682 5683 // Help tools 5684 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5685 if (!initialized) { 5686 initialize(); 5687 } 5688 return _CreateToolhelp32Snapshot != NULL && 5689 _Module32First != NULL && 5690 _Module32Next != NULL; 5691 } 5692 5693 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, 5694 DWORD th32ProcessId) { 5695 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5696 "HelpToolsAvailable() not yet called"); 5697 5698 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5699 } 5700 5701 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5702 assert(initialized && _Module32First != NULL, 5703 "HelpToolsAvailable() not yet called"); 5704 5705 return _Module32First(hSnapshot, lpme); 5706 } 5707 5708 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, 5709 LPMODULEENTRY32 lpme) { 5710 assert(initialized && _Module32Next != NULL, 5711 "HelpToolsAvailable() not yet called"); 5712 5713 return _Module32Next(hSnapshot, lpme); 5714 } 5715 5716 5717 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5718 if (!initialized) { 5719 initialize(); 5720 } 5721 return _GetNativeSystemInfo != NULL; 5722 } 5723 5724 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5725 assert(initialized && _GetNativeSystemInfo != NULL, 5726 "GetNativeSystemInfoAvailable() not yet called"); 5727 5728 _GetNativeSystemInfo(lpSystemInfo); 5729 } 5730 5731 // PSAPI API 5732 5733 5734 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5735 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD); 5736 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5737 5738 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5739 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5740 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5741 BOOL os::PSApiDll::initialized = FALSE; 5742 5743 void os::PSApiDll::initialize() { 5744 if (!initialized) { 5745 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5746 if (handle != NULL) { 5747 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5748 "EnumProcessModules"); 5749 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5750 "GetModuleFileNameExA"); 5751 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5752 "GetModuleInformation"); 5753 } 5754 initialized = TRUE; 5755 } 5756 } 5757 5758 5759 5760 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, 5761 DWORD cb, LPDWORD lpcbNeeded) { 5762 assert(initialized && _EnumProcessModules != NULL, 5763 "PSApiAvailable() not yet called"); 5764 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5765 } 5766 5767 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, 5768 LPTSTR lpFilename, DWORD nSize) { 5769 assert(initialized && _GetModuleFileNameEx != NULL, 5770 "PSApiAvailable() not yet called"); 5771 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5772 } 5773 5774 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, 5775 LPMODULEINFO lpmodinfo, DWORD cb) { 5776 assert(initialized && _GetModuleInformation != NULL, 5777 "PSApiAvailable() not yet called"); 5778 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5779 } 5780 5781 BOOL os::PSApiDll::PSApiAvailable() { 5782 if (!initialized) { 5783 initialize(); 5784 } 5785 return _EnumProcessModules != NULL && 5786 _GetModuleFileNameEx != NULL && 5787 _GetModuleInformation != NULL; 5788 } 5789 5790 5791 // WinSock2 API 5792 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5793 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5794 5795 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5796 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5797 BOOL os::WinSock2Dll::initialized = FALSE; 5798 5799 void os::WinSock2Dll::initialize() { 5800 if (!initialized) { 5801 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5802 if (handle != NULL) { 5803 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5804 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5805 } 5806 initialized = TRUE; 5807 } 5808 } 5809 5810 5811 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5812 assert(initialized && _WSAStartup != NULL, 5813 "WinSock2Available() not yet called"); 5814 return _WSAStartup(wVersionRequested, lpWSAData); 5815 } 5816 5817 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5818 assert(initialized && _gethostbyname != NULL, 5819 "WinSock2Available() not yet called"); 5820 return _gethostbyname(name); 5821 } 5822 5823 BOOL os::WinSock2Dll::WinSock2Available() { 5824 if (!initialized) { 5825 initialize(); 5826 } 5827 return _WSAStartup != NULL && 5828 _gethostbyname != NULL; 5829 } 5830 5831 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5832 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5833 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5834 5835 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5836 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5837 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5838 BOOL os::Advapi32Dll::initialized = FALSE; 5839 5840 void os::Advapi32Dll::initialize() { 5841 if (!initialized) { 5842 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5843 if (handle != NULL) { 5844 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5845 "AdjustTokenPrivileges"); 5846 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5847 "OpenProcessToken"); 5848 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5849 "LookupPrivilegeValueA"); 5850 } 5851 initialized = TRUE; 5852 } 5853 } 5854 5855 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5856 BOOL DisableAllPrivileges, 5857 PTOKEN_PRIVILEGES NewState, 5858 DWORD BufferLength, 5859 PTOKEN_PRIVILEGES PreviousState, 5860 PDWORD ReturnLength) { 5861 assert(initialized && _AdjustTokenPrivileges != NULL, 5862 "AdvapiAvailable() not yet called"); 5863 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5864 BufferLength, PreviousState, ReturnLength); 5865 } 5866 5867 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, 5868 DWORD DesiredAccess, 5869 PHANDLE TokenHandle) { 5870 assert(initialized && _OpenProcessToken != NULL, 5871 "AdvapiAvailable() not yet called"); 5872 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5873 } 5874 5875 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, 5876 LPCTSTR lpName, PLUID lpLuid) { 5877 assert(initialized && _LookupPrivilegeValue != NULL, 5878 "AdvapiAvailable() not yet called"); 5879 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5880 } 5881 5882 BOOL os::Advapi32Dll::AdvapiAvailable() { 5883 if (!initialized) { 5884 initialize(); 5885 } 5886 return _AdjustTokenPrivileges != NULL && 5887 _OpenProcessToken != NULL && 5888 _LookupPrivilegeValue != NULL; 5889 } 5890 5891 #endif 5892 5893 #ifndef PRODUCT 5894 5895 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5896 // contiguous memory block at a particular address. 5897 // The test first tries to find a good approximate address to allocate at by using the same 5898 // method to allocate some memory at any address. The test then tries to allocate memory in 5899 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5900 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5901 // the previously allocated memory is available for allocation. The only actual failure 5902 // that is reported is when the test tries to allocate at a particular location but gets a 5903 // different valid one. A NULL return value at this point is not considered an error but may 5904 // be legitimate. 5905 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5906 void TestReserveMemorySpecial_test() { 5907 if (!UseLargePages) { 5908 if (VerboseInternalVMTests) { 5909 gclog_or_tty->print("Skipping test because large pages are disabled"); 5910 } 5911 return; 5912 } 5913 // save current value of globals 5914 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5915 bool old_use_numa_interleaving = UseNUMAInterleaving; 5916 5917 // set globals to make sure we hit the correct code path 5918 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5919 5920 // do an allocation at an address selected by the OS to get a good one. 5921 const size_t large_allocation_size = os::large_page_size() * 4; 5922 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5923 if (result == NULL) { 5924 if (VerboseInternalVMTests) { 5925 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5926 large_allocation_size); 5927 } 5928 } else { 5929 os::release_memory_special(result, large_allocation_size); 5930 5931 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5932 // we managed to get it once. 5933 const size_t expected_allocation_size = os::large_page_size(); 5934 char* expected_location = result + os::large_page_size(); 5935 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5936 if (actual_location == NULL) { 5937 if (VerboseInternalVMTests) { 5938 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5939 expected_location, large_allocation_size); 5940 } 5941 } else { 5942 // release memory 5943 os::release_memory_special(actual_location, expected_allocation_size); 5944 // only now check, after releasing any memory to avoid any leaks. 5945 assert(actual_location == expected_location, 5946 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5947 expected_location, expected_allocation_size, actual_location)); 5948 } 5949 } 5950 5951 // restore globals 5952 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5953 UseNUMAInterleaving = old_use_numa_interleaving; 5954 } 5955 #endif // PRODUCT