< prev index next >

src/os/aix/vm/os_aix.cpp

Print this page
rev 7960 : 8075506: aix: improve handling of native memory
   1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2014 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


  96 #include <sys/resource.h>
  97 #include <sys/select.h>
  98 #include <sys/shm.h>
  99 #include <sys/socket.h>
 100 #include <sys/stat.h>
 101 #include <sys/sysinfo.h>
 102 #include <sys/systemcfg.h>
 103 #include <sys/time.h>
 104 #include <sys/times.h>
 105 #include <sys/types.h>
 106 #include <sys/utsname.h>
 107 #include <sys/vminfo.h>
 108 #include <sys/wait.h>
 109 
 110 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 111 // getrusage() is prepared to handle the associated failure.
 112 #ifndef RUSAGE_THREAD
 113 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 114 #endif
 115 




 116 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 117 #if !defined(_AIXVERSION_610)
 118 extern "C" {
 119   int getthrds64(pid_t ProcessIdentifier,
 120                  struct thrdentry64* ThreadBuffer,
 121                  int ThreadSize,
 122                  tid64_t* IndexPointer,
 123                  int Count);
 124 }
 125 #endif
 126 
 127 #define MAX_PATH (2 * K)
 128 
 129 // for timer info max values which include all bits
 130 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 131 // for multipage initialization error analysis (in 'g_multipage_error')
 132 #define ERROR_MP_OS_TOO_OLD                          100
 133 #define ERROR_MP_EXTSHM_ACTIVE                       101
 134 #define ERROR_MP_VMGETINFO_FAILED                    102
 135 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103


 151 #define PV_7_Compat 0x208000   /* Power PC 7 */
 152 #endif
 153 #ifndef PV_8
 154 #define PV_8 0x300000          /* Power PC 8 */
 155 #define PV_8_Compat 0x308000   /* Power PC 8 */
 156 #endif
 157 
 158 #define trcVerbose(fmt, ...) { /* PPC port */  \
 159   if (Verbose) { \
 160     fprintf(stderr, fmt, ##__VA_ARGS__); \
 161     fputc('\n', stderr); fflush(stderr); \
 162   } \
 163 }
 164 #define trc(fmt, ...)        /* PPC port */
 165 
 166 #define ERRBYE(s) { \
 167     trcVerbose(s); \
 168     return -1; \
 169 }
 170 
 171 // query dimensions of the stack of the calling thread
 172 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 173 
 174 // function to check a given stack pointer against given stack limits
 175 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 176   if (((uintptr_t)sp) & 0x7) {
 177     return false;
 178   }
 179   if (sp > stack_base) {
 180     return false;
 181   }
 182   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 183     return false;
 184   }
 185   return true;
 186 }
 187 
 188 // returns true if function is a valid codepointer
 189 inline bool is_valid_codepointer(codeptr_t p) {
 190   if (!p) {
 191     return false;
 192   }


 203 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 204     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 205 }
 206 
 207 // Macro to check the current stack pointer against given stacklimits.
 208 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 209   address sp; \
 210   sp = os::current_stack_pointer(); \
 211   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 212 }
 213 
 214 ////////////////////////////////////////////////////////////////////////////////
 215 // global variables (for a description see os_aix.hpp)
 216 
 217 julong    os::Aix::_physical_memory = 0;
 218 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 219 int       os::Aix::_page_size = -1;
 220 int       os::Aix::_on_pase = -1;
 221 int       os::Aix::_os_version = -1;
 222 int       os::Aix::_stack_page_size = -1;
 223 size_t    os::Aix::_shm_default_page_size = -1;
 224 int       os::Aix::_can_use_64K_pages = -1;
 225 int       os::Aix::_can_use_16M_pages = -1;
 226 int       os::Aix::_xpg_sus_mode = -1;
 227 int       os::Aix::_extshm = -1;
 228 int       os::Aix::_logical_cpus = -1;
 229 
 230 ////////////////////////////////////////////////////////////////////////////////
 231 // local variables
 232 
 233 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 234 static jlong    initial_time_count = 0;
 235 static int      clock_tics_per_sec = 100;
 236 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 237 static bool     check_signals      = true;
 238 static pid_t    _initial_pid       = 0;
 239 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 240 static sigset_t SR_sigset;
 241 static pthread_mutex_t dl_mutex;              // Used to protect dlsym() calls.
 242 

























































 243 julong os::available_memory() {
 244   return Aix::available_memory();
 245 }
 246 
 247 julong os::Aix::available_memory() {
 248   os::Aix::meminfo_t mi;
 249   if (os::Aix::get_meminfo(&mi)) {
 250     return mi.real_free;
 251   } else {
 252     return 0xFFFFFFFFFFFFFFFFLL;
 253   }
 254 }
 255 
 256 julong os::physical_memory() {
 257   return Aix::physical_memory();
 258 }
 259 
 260 ////////////////////////////////////////////////////////////////////////////////
 261 // environment support
 262 


 274 
 275 bool os::have_special_privileges() {
 276   static bool init = false;
 277   static bool privileges = false;
 278   if (!init) {
 279     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 280     init = true;
 281   }
 282   return privileges;
 283 }
 284 
 285 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 286 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 287 static bool my_disclaim64(char* addr, size_t size) {
 288 
 289   if (size == 0) {
 290     return true;
 291   }
 292 
 293   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 294   const unsigned int maxDisclaimSize = 0x80000000;
 295 
 296   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 297   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 298 
 299   char* p = addr;
 300 
 301   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 302     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 303       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 304       return false;
 305     }
 306     p += maxDisclaimSize;
 307   }
 308 
 309   if (lastDisclaimSize > 0) {
 310     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 311       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 312       return false;
 313     }
 314   }


 351   // Get the number of online(logical) cpus instead of configured.
 352   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 353   assert(_processor_count > 0, "_processor_count must be > 0");
 354 
 355   // Retrieve total physical storage.
 356   os::Aix::meminfo_t mi;
 357   if (!os::Aix::get_meminfo(&mi)) {
 358     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 359     assert(false, "os::Aix::get_meminfo failed.");
 360   }
 361   _physical_memory = (julong) mi.real_total;
 362 }
 363 
 364 // Helper function for tracing page sizes.
 365 static const char* describe_pagesize(size_t pagesize) {
 366   switch (pagesize) {
 367     case SIZE_4K : return "4K";
 368     case SIZE_64K: return "64K";
 369     case SIZE_16M: return "16M";
 370     case SIZE_16G: return "16G";

 371     default:
 372       assert(false, "surprise");
 373       return "??";
 374   }
 375 }
 376 
 377 // Retrieve information about multipage size support. Will initialize
 378 // Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
 379 // Aix::_can_use_16M_pages.
 380 // Must be called before calling os::large_page_init().
 381 void os::Aix::query_multipage_support() {
 382 
 383   guarantee(_page_size == -1 &&
 384             _stack_page_size == -1 &&
 385             _can_use_64K_pages == -1 &&
 386             _can_use_16M_pages == -1 &&
 387             g_multipage_error == -1,
 388             "do not call twice");
 389 
 390   _page_size = ::sysconf(_SC_PAGESIZE);
 391 
 392   // This really would surprise me.
 393   assert(_page_size == SIZE_4K, "surprise!");
 394 
 395 
 396   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 397   // Default data page size is influenced either by linker options (-bdatapsize)
 398   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 399   // default should be 4K.
 400   size_t data_page_size = SIZE_4K;
 401   {
 402     void* p = os::malloc(SIZE_16M, mtInternal);
 403     guarantee(p != NULL, "malloc failed");
 404     data_page_size = os::Aix::query_pagesize(p);
 405     os::free(p);
 406   }
 407 
 408   // query default shm page size (LDR_CNTRL SHMPSIZE)
 409   {
 410     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 411     guarantee(shmid != -1, "shmget failed");
 412     void* p = ::shmat(shmid, NULL, 0);
 413     ::shmctl(shmid, IPC_RMID, NULL);
 414     guarantee(p != (void*) -1, "shmat failed");
 415     _shm_default_page_size = os::Aix::query_pagesize(p);
 416     ::shmdt(p);
 417   }
 418 
 419   // before querying the stack page size, make sure we are not running as primordial
 420   // thread (because primordial thread's stack may have different page size than
 421   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 422   // number of reasons so we may just as well guarantee it here
 423   guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
 424 
 425   // query stack page size
 426   {
 427     int dummy = 0;
 428     _stack_page_size = os::Aix::query_pagesize(&dummy);
 429     // everything else would surprise me and should be looked into
 430     guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
 431     // also, just for completeness: pthread stacks are allocated from C heap, so
 432     // stack page size should be the same as data page size
 433     guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
 434   }
 435 
 436   // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
 437   // for system V shm.
 438   if (Aix::extshm()) {
 439     if (Verbose) {
 440       fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
 441                       "Please make sure EXTSHM is OFF for large page support.\n");
 442     }
 443     g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
 444     _can_use_64K_pages = _can_use_16M_pages = 0;





 445     goto query_multipage_support_end;
 446   }
 447 
 448   // now check which page sizes the OS claims it supports, and of those, which actually can be used.
 449   {
 450     const int MAX_PAGE_SIZES = 4;
 451     psize_t sizes[MAX_PAGE_SIZES];
 452     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 453     if (num_psizes == -1) {
 454       if (Verbose) {
 455         fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 456         fprintf(stderr, "disabling multipage support.\n");
 457       }
 458       g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
 459       _can_use_64K_pages = _can_use_16M_pages = 0;
 460       goto query_multipage_support_end;
 461     }
 462     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 463     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 464     if (Verbose) {
 465       fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
 466       for (int i = 0; i < num_psizes; i ++) {
 467         fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
 468       }
 469       fprintf(stderr, " .\n");
 470     }
 471 
 472     // Can we use 64K, 16M pages?
 473     _can_use_64K_pages = 0;
 474     _can_use_16M_pages = 0;
 475     for (int i = 0; i < num_psizes; i ++) {
 476       if (sizes[i] == SIZE_64K) {
 477         _can_use_64K_pages = 1;
 478       } else if (sizes[i] == SIZE_16M) {
 479         _can_use_16M_pages = 1;
 480       }
 481     }
 482 
 483     if (!_can_use_64K_pages) {
 484       g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
 485     }
 486 
 487     // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
 488     // there must be an actual 16M page pool, and we must run with enough rights.
 489     if (_can_use_16M_pages) {
 490       const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
 491       guarantee(shmid != -1, "shmget failed");
 492       struct shmid_ds shm_buf = { 0 };
 493       shm_buf.shm_pagesize = SIZE_16M;
 494       const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
 495       const int en = errno;
 496       ::shmctl(shmid, IPC_RMID, NULL);
 497       if (!can_set_pagesize) {
 498         if (Verbose) {
 499           fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
 500                           "Will deactivate 16M support.\n", en, strerror(en));










 501         }
 502         _can_use_16M_pages = 0;




 503       }
 504     }
 505 
 506   } // end: check which pages can be used for shared memory
 507 
 508 query_multipage_support_end:
 509 
 510   guarantee(_page_size != -1 &&
 511             _stack_page_size != -1 &&
 512             _can_use_64K_pages != -1 &&
 513             _can_use_16M_pages != -1, "Page sizes not properly initialized");
 514 
 515   if (_can_use_64K_pages) {
 516     g_multipage_error = 0;
 517   }
 518 
 519   if (Verbose) {
 520     fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
 521     fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
 522     fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
 523     fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
 524     fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
 525     fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
 526   }






 527 
 528 } // end os::Aix::query_multipage_support()
 529 
 530 void os::init_system_properties_values() {
 531 
 532 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 533 #define EXTENSIONS_DIR  "/lib/ext"
 534 
 535   // Buffer that fits several sprintfs.
 536   // Note that the space for the trailing null is provided
 537   // by the nulls included by the sizeof operator.
 538   const size_t bufsize =
 539     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 540          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 541   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 542 
 543   // sysclasspath, java_home, dll_dir
 544   {
 545     char *pslash;
 546     os::jvm_path(buf, bufsize);


1555   st->print(", DATA ");
1556   getrlimit(RLIMIT_DATA, &rlim);
1557   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1558   else st->print("%uk", rlim.rlim_cur >> 10);
1559   st->cr();
1560 
1561   // load average
1562   st->print("load average:");
1563   double loadavg[3] = {-1.L, -1.L, -1.L};
1564   os::loadavg(loadavg, 3);
1565   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1566   st->cr();
1567 }
1568 
1569 void os::print_memory_info(outputStream* st) {
1570 
1571   st->print_cr("Memory:");
1572 
1573   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1574   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1575   st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1576   st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1577   st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));



1578   if (g_multipage_error != 0) {
1579     st->print_cr("  multipage error: %d", g_multipage_error);
1580   }
1581 
1582   // print out LDR_CNTRL because it affects the default page sizes
1583   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1584   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1585 
1586   const char* const extshm = ::getenv("EXTSHM");
1587   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");



1588 
1589   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1590   os::Aix::meminfo_t mi;
1591   if (os::Aix::get_meminfo(&mi)) {
1592     char buffer[256];
1593     if (os::Aix::on_aix()) {
1594       jio_snprintf(buffer, sizeof(buffer),
1595                    "  physical total : %llu\n"
1596                    "  physical free  : %llu\n"
1597                    "  swap total     : %llu\n"
1598                    "  swap free      : %llu\n",
1599                    mi.real_total,
1600                    mi.real_free,
1601                    mi.pgsp_total,
1602                    mi.pgsp_free);
1603     } else {
1604       Unimplemented();
1605     }
1606     st->print_raw(buffer);
1607   } else {


1810         //
1811         ::sem_post(&sig_sem);
1812 
1813         thread->java_suspend_self();
1814       }
1815     } while (threadIsSuspended);
1816   }
1817 }
1818 
1819 int os::signal_lookup() {
1820   return check_pending_signals(false);
1821 }
1822 
1823 int os::signal_wait() {
1824   return check_pending_signals(true);
1825 }
1826 
1827 ////////////////////////////////////////////////////////////////////////////////
1828 // Virtual Memory
1829 
1830 // AddrRange describes an immutable address range
1831 //
1832 // This is a helper class for the 'shared memory bookkeeping' below.
1833 class AddrRange {
1834   friend class ShmBkBlock;
1835 
1836   char* _start;
1837   size_t _size;
1838 
1839 public:
1840 
1841   AddrRange(char* start, size_t size)
1842     : _start(start), _size(size)
1843   {}
1844 
1845   AddrRange(const AddrRange& r)
1846     : _start(r.start()), _size(r.size())
1847   {}
1848 
1849   char* start() const { return _start; }
1850   size_t size() const { return _size; }
1851   char* end() const { return _start + _size; }
1852   bool is_empty() const { return _size == 0 ? true : false; }
1853 
1854   static AddrRange empty_range() { return AddrRange(NULL, 0); }

1855 
1856   bool contains(const char* p) const {
1857     return start() <= p && end() > p;


































1858   }
1859 
1860   bool contains(const AddrRange& range) const {
1861     return start() <= range.start() && end() >= range.end();
1862   }

1863 
1864   bool intersects(const AddrRange& range) const {
1865     return (range.start() <= start() && range.end() > start()) ||
1866            (range.start() < end() && range.end() >= end()) ||
1867            contains(range);











1868   }

1869 
1870   bool is_same_range(const AddrRange& range) const {
1871     return start() == range.start() && size() == range.size();




1872   }


1873 
1874   // return the closest inside range consisting of whole pages
1875   AddrRange find_closest_aligned_range(size_t pagesize) const {
1876     if (pagesize == 0 || is_empty()) {
1877       return empty_range();





1878     }
1879     char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1880     char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1881     if (from > to) {
1882       return empty_range();
1883     }
1884     return AddrRange(from, to - from);







1885   }
1886 };
1887 
1888 ////////////////////////////////////////////////////////////////////////////
1889 // shared memory bookkeeping
1890 //
1891 // the os::reserve_memory() API and friends hand out different kind of memory, depending
1892 // on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1893 //
1894 // But these memory types have to be treated differently. For example, to uncommit
1895 // mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1896 // disclaim64() is needed.
1897 //
1898 // Therefore we need to keep track of the allocated memory segments and their
1899 // properties.
1900 
1901 // ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1902 class ShmBkBlock : public CHeapObj<mtInternal> {
1903 
1904   ShmBkBlock* _next;
1905 
1906 protected:
1907 
1908   AddrRange _range;
1909   const size_t _pagesize;
1910   const bool _pinned;






1911 
1912 public:




1913 
1914   ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1915     : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1916 
1917     assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1918     assert(!_range.is_empty(), "invalid range");



1919   }
1920 
1921   virtual void print(outputStream* st) const {
1922     st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1923               _range.start(), _range.end(), _range.size(),
1924               _range.size() / _pagesize, describe_pagesize(_pagesize),
1925               _pinned ? "pinned" : "");
1926   }
1927 
1928   enum Type { MMAP, SHMAT };
1929   virtual Type getType() = 0;







1930 
1931   char* base() const { return _range.start(); }
1932   size_t size() const { return _range.size(); }




1933 
1934   void setAddrRange(AddrRange range) {
1935     _range = range;


1936   }
1937 
1938   bool containsAddress(const char* p) const {
1939     return _range.contains(p);


1940   }
1941 
1942   bool containsRange(const char* p, size_t size) const {
1943     return _range.contains(AddrRange((char*)p, size));



1944   }
1945 
1946   bool isSameRange(const char* p, size_t size) const {
1947     return _range.is_same_range(AddrRange((char*)p, size));







1948   }
1949 
1950   virtual bool disclaim(char* p, size_t size) = 0;
1951   virtual bool release() = 0;
1952 
1953   // blocks live in a list.
1954   ShmBkBlock* next() const { return _next; }
1955   void set_next(ShmBkBlock* blk) { _next = blk; }
1956 
1957 }; // end: ShmBkBlock
1958 


1959 
1960 // ShmBkMappedBlock: describes an block allocated with mmap()
1961 class ShmBkMappedBlock : public ShmBkBlock {
1962 public:
1963 
1964   ShmBkMappedBlock(AddrRange range)
1965     : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1966 
1967   void print(outputStream* st) const {
1968     ShmBkBlock::print(st);
1969     st->print_cr(" - mmap'ed");
1970   }
1971 
1972   Type getType() {
1973     return MMAP;




1974   }


1975 
1976   bool disclaim(char* p, size_t size) {
1977 
1978     AddrRange r(p, size);
1979 
1980     guarantee(_range.contains(r), "invalid disclaim");
1981 
1982     // only disclaim whole ranges.
1983     const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1984     if (r2.is_empty()) {
1985       return true;
1986     }


1987 
1988     const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1989 
1990     if (rc != 0) {
1991       warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);











1992     }
1993 
1994     return rc == 0 ? true : false;






1995   }
1996 
1997   bool release() {
1998     // mmap'ed blocks are released using munmap
1999     if (::munmap(_range.start(), _range.size()) != 0) {
2000       warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
2001       return false;
2002     }
2003     return true;
2004   }
2005 }; // end: ShmBkMappedBlock
2006 
2007 // ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
2008 class ShmBkShmatedBlock : public ShmBkBlock {
2009 public:














2010 
2011   ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
2012     : ShmBkBlock(range, pagesize, pinned) {}

2013 
2014   void print(outputStream* st) const {
2015     ShmBkBlock::print(st);
2016     st->print_cr(" - shmat'ed");







2017   }
2018 
2019   Type getType() {
2020     return SHMAT;
2021   }
2022 
2023   bool disclaim(char* p, size_t size) {

2024 
2025     AddrRange r(p, size);
2026 
2027     if (_pinned) {
2028       return true;
2029     }
2030 
2031     // shmat'ed blocks are disclaimed using disclaim64
2032     guarantee(_range.contains(r), "invalid disclaim");










2033 
2034     // only disclaim whole ranges.
2035     const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2036     if (r2.is_empty()) {
2037       return true;





2038     }
2039 
2040     const bool rc = my_disclaim64(r2.start(), r2.size());

2041 
2042     if (Verbose && !rc) {
2043       warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2044     }
2045 
2046     return rc;
2047   }




2048 
2049   bool release() {

2050     bool rc = false;
2051     if (::shmdt(_range.start()) != 0) {
2052       warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);


2053     } else {

2054       rc = true;
2055     }

2056     return rc;
2057   }
2058 
2059 }; // end: ShmBkShmatedBlock
2060 
2061 static ShmBkBlock* g_shmbk_list = NULL;
2062 static volatile jint g_shmbk_table_lock = 0;
2063 
2064 // keep some usage statistics
2065 static struct {
2066   int nodes;    // number of nodes in list
2067   size_t bytes; // reserved - not committed - bytes.
2068   int reserves; // how often reserve was called
2069   int lookups;  // how often a lookup was made
2070 } g_shmbk_stats = { 0, 0, 0, 0 };
2071 
2072 // add information about a shared memory segment to the bookkeeping
2073 static void shmbk_register(ShmBkBlock* p_block) {
2074   guarantee(p_block, "logic error");
2075   p_block->set_next(g_shmbk_list);
2076   g_shmbk_list = p_block;
2077   g_shmbk_stats.reserves ++;
2078   g_shmbk_stats.bytes += p_block->size();
2079   g_shmbk_stats.nodes ++;
2080 }
2081 
2082 // remove information about a shared memory segment by its starting address
2083 static void shmbk_unregister(ShmBkBlock* p_block) {
2084   ShmBkBlock* p = g_shmbk_list;
2085   ShmBkBlock* prev = NULL;
2086   while (p) {
2087     if (p == p_block) {
2088       if (prev) {
2089         prev->set_next(p->next());
2090       } else {
2091         g_shmbk_list = p->next();
2092       }
2093       g_shmbk_stats.nodes --;
2094       g_shmbk_stats.bytes -= p->size();
2095       return;
2096     }
2097     prev = p;
2098     p = p->next();
2099   }
2100   assert(false, "should not happen");
2101 }
2102 
2103 // given a pointer, return shared memory bookkeeping record for the segment it points into
2104 // using the returned block info must happen under lock protection
2105 static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2106   g_shmbk_stats.lookups ++;
2107   ShmBkBlock* p = g_shmbk_list;
2108   while (p) {
2109     if (p->containsAddress(addr)) {
2110       return p;
2111     }
2112     p = p->next();
2113   }
2114   return NULL;
2115 }
2116 
2117 // dump all information about all memory segments allocated with os::reserve_memory()
2118 void shmbk_dump_info() {
2119   tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2120     "total reserves: %d total lookups: %d)",
2121     g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2122   const ShmBkBlock* p = g_shmbk_list;
2123   int i = 0;
2124   while (p) {
2125     p->print(tty);
2126     p = p->next();
2127     i ++;
2128   }
2129 }
2130 
2131 #define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
2132 #define UNLOCK_SHMBK   }
2133 
2134 // End: shared memory bookkeeping
2135 ////////////////////////////////////////////////////////////////////////////////////////////////////
2136 
2137 int os::vm_page_size() {
2138   // Seems redundant as all get out
2139   assert(os::Aix::page_size() != -1, "must call os::init");
2140   return os::Aix::page_size();
2141 }
2142 
2143 // Aix allocates memory by pages.
2144 int os::vm_allocation_granularity() {
2145   assert(os::Aix::page_size() != -1, "must call os::init");
2146   return os::Aix::page_size();
2147 }
2148 
2149 int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2150 
2151   // Commit is a noop. There is no explicit commit
2152   // needed on AIX. Memory is committed when touched.
2153   //
2154   // Debug : check address range for validity
2155 #ifdef ASSERT
2156   LOCK_SHMBK
2157     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2158     if (!block) {
2159       fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2160       shmbk_dump_info();
2161       assert(false, "invalid pointer");
2162       return false;
2163     } else if (!block->containsRange(addr, size)) {
2164       fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2165       shmbk_dump_info();
2166       assert(false, "invalid range");
2167       return false;
2168     }
2169   UNLOCK_SHMBK
2170 #endif // ASSERT
2171 
2172   return 0;
2173 }
2174 
2175 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2176   return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2177 }

2178 
2179 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2180                                   const char* mesg) {
2181   assert(mesg != NULL, "mesg must be specified");
2182   os::Aix::commit_memory_impl(addr, size, exec);




2183 }
2184 
2185 int os::Aix::commit_memory_impl(char* addr, size_t size,
2186                                 size_t alignment_hint, bool exec) {
2187   return os::Aix::commit_memory_impl(addr, size, exec);









2188 }
2189 
2190 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2191                           bool exec) {
2192   return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2193 }
2194 
2195 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2196                                   size_t alignment_hint, bool exec,
2197                                   const char* mesg) {
2198   os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);

2199 }
2200 
2201 bool os::pd_uncommit_memory(char* addr, size_t size) {


2202 
2203   // Delegate to ShmBkBlock class which knows how to uncommit its memory.



2204 
2205   bool rc = false;
2206   LOCK_SHMBK
2207     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2208     if (!block) {
2209       fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2210       shmbk_dump_info();
2211       assert(false, "invalid pointer");
2212       return false;
2213     } else if (!block->containsRange(addr, size)) {
2214       fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2215       shmbk_dump_info();
2216       assert(false, "invalid range");
2217       return false;
2218     }
2219     rc = block->disclaim(addr, size);
2220   UNLOCK_SHMBK
2221 
2222   if (Verbose && !rc) {
2223     warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2224   }
2225   return rc;
2226 }
2227 
2228 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2229   return os::guard_memory(addr, size);


2230 }
2231 
2232 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2233   return os::unguard_memory(addr, size);


2234 }
2235 
2236 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2237 }
2238 
2239 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2240 }
2241 
2242 void os::numa_make_global(char *addr, size_t bytes) {
2243 }
2244 
2245 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2246 }
2247 
2248 bool os::numa_topology_changed() {
2249   return false;
2250 }
2251 
2252 size_t os::numa_get_groups_num() {
2253   return 1;


2256 int os::numa_get_group_id() {
2257   return 0;
2258 }
2259 
2260 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2261   if (size > 0) {
2262     ids[0] = 0;
2263     return 1;
2264   }
2265   return 0;
2266 }
2267 
2268 bool os::get_page_info(char *start, page_info* info) {
2269   return false;
2270 }
2271 
2272 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2273   return end;
2274 }
2275 
2276 // Flags for reserve_shmatted_memory:
2277 #define RESSHM_WISHADDR_OR_FAIL                     1
2278 #define RESSHM_TRY_16M_PAGES                        2
2279 #define RESSHM_16M_PAGES_OR_FAIL                    4
2280 
2281 // Result of reserve_shmatted_memory:
2282 struct shmatted_memory_info_t {
2283   char* addr;
2284   size_t pagesize;
2285   bool pinned;
2286 };
2287 
2288 // Reserve a section of shmatted memory.
2289 // params:
2290 // bytes [in]: size of memory, in bytes
2291 // requested_addr [in]: wish address.
2292 //                      NULL = no wish.
2293 //                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2294 //                      be obtained, function will fail. Otherwise wish address is treated as hint and
2295 //                      another pointer is returned.
2296 // flags [in]:          some flags. Valid flags are:
2297 //                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2298 //                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2299 //                          (requires UseLargePages and Use16MPages)
2300 //                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2301 //                          Otherwise any other page size will do.
2302 // p_info [out] :       holds information about the created shared memory segment.
2303 static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2304 
2305   assert(p_info, "parameter error");
2306 
2307   // init output struct.
2308   p_info->addr = NULL;
2309 
2310   // neither should we be here for EXTSHM=ON.
2311   if (os::Aix::extshm()) {
2312     ShouldNotReachHere();
2313   }
2314 
2315   // extract flags. sanity checks.
2316   const bool wishaddr_or_fail =
2317     flags & RESSHM_WISHADDR_OR_FAIL;
2318   const bool try_16M_pages =
2319     flags & RESSHM_TRY_16M_PAGES;
2320   const bool f16M_pages_or_fail =
2321     flags & RESSHM_16M_PAGES_OR_FAIL;
2322 
2323   // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2324   // shmat will fail anyway, so save some cycles by failing right away
2325   if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2326     if (wishaddr_or_fail) {
2327       return false;
2328     } else {
2329       requested_addr = NULL;
2330     }
2331   }
2332 
2333   char* addr = NULL;
2334 
2335   // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2336   // pagesize dynamically.
2337   const size_t size = align_size_up(bytes, SIZE_16M);
2338 
2339   // reserve the shared segment
2340   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2341   if (shmid == -1) {
2342     warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2343     return false;
2344   }
2345 
2346   // Important note:
2347   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2348   // We must right after attaching it remove it from the system. System V shm segments are global and
2349   // survive the process.
2350   // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2351 
2352   // try forcing the page size
2353   size_t pagesize = -1; // unknown so far
2354 
2355   if (UseLargePages) {
2356 
2357     struct shmid_ds shmbuf;
2358     memset(&shmbuf, 0, sizeof(shmbuf));
2359 
2360     // First, try to take from 16M page pool if...
2361     if (os::Aix::can_use_16M_pages()  // we can ...
2362         && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2363         && try_16M_pages) {           // caller wants us to.
2364       shmbuf.shm_pagesize = SIZE_16M;
2365       if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2366         pagesize = SIZE_16M;
2367       } else {
2368         warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2369                 size / SIZE_16M, errno);
2370         if (f16M_pages_or_fail) {
2371           goto cleanup_shm;
2372         }
2373       }
2374     }
2375 
2376     // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2377     // because the 64K page pool may also be exhausted.
2378     if (pagesize == -1) {
2379       shmbuf.shm_pagesize = SIZE_64K;
2380       if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2381         pagesize = SIZE_64K;
2382       } else {
2383         warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2384                 size / SIZE_64K, errno);
2385         // here I give up. leave page_size -1 - later, after attaching, we will query the
2386         // real page size of the attached memory. (in theory, it may be something different
2387         // from 4K if LDR_CNTRL SHM_PSIZE is set)
2388       }
2389     }
2390   }
2391 
2392   // sanity point
2393   assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2394 
2395   // Now attach the shared segment.
2396   addr = (char*) shmat(shmid, requested_addr, 0);
2397   if (addr == (char*)-1) {
2398     // How to handle attach failure:
2399     // If it failed for a specific wish address, tolerate this: in that case, if wish address was
2400     // mandatory, fail, if not, retry anywhere.
2401     // If it failed for any other reason, treat that as fatal error.
2402     addr = NULL;
2403     if (requested_addr) {
2404       if (wishaddr_or_fail) {
2405         goto cleanup_shm;
2406       } else {
2407         addr = (char*) shmat(shmid, NULL, 0);
2408         if (addr == (char*)-1) { // fatal
2409           addr = NULL;
2410           warning("shmat failed (errno: %d)", errno);
2411           goto cleanup_shm;
2412         }
2413       }
2414     } else { // fatal
2415       addr = NULL;
2416       warning("shmat failed (errno: %d)", errno);
2417       goto cleanup_shm;
2418     }
2419   }
2420 
2421   // sanity point
2422   assert(addr && addr != (char*) -1, "wrong address");
2423 
2424   // after successful Attach remove the segment - right away.
2425   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2426     warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2427     guarantee(false, "failed to remove shared memory segment!");
2428   }
2429   shmid = -1;
2430 
2431   // query the real page size. In case setting the page size did not work (see above), the system
2432   // may have given us something other then 4K (LDR_CNTRL)
2433   {
2434     const size_t real_pagesize = os::Aix::query_pagesize(addr);
2435     if (pagesize != -1) {
2436       assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2437     } else {
2438       pagesize = real_pagesize;
2439     }
2440   }
2441 
2442   // Now register the reserved block with internal book keeping.
2443   LOCK_SHMBK
2444     const bool pinned = pagesize >= SIZE_16M ? true : false;
2445     ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2446     assert(p_block, "");
2447     shmbk_register(p_block);
2448   UNLOCK_SHMBK
2449 
2450 cleanup_shm:
2451 
2452   // if we have not done so yet, remove the shared memory segment. This is very important.
2453   if (shmid != -1) {
2454     if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2455       warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2456       guarantee(false, "failed to remove shared memory segment!");
2457     }
2458     shmid = -1;
2459   }
2460 
2461   // trace
2462   if (Verbose && !addr) {
2463     if (requested_addr != NULL) {
2464       warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);















2465     } else {
2466       warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2467     }
2468   }
2469 
2470   // hand info to caller
2471   if (addr) {
2472     p_info->addr = addr;
2473     p_info->pagesize = pagesize;
2474     p_info->pinned = pagesize == SIZE_16M ? true : false;
2475   }
2476 
2477   // sanity test:
2478   if (requested_addr && addr && wishaddr_or_fail) {
2479     guarantee(addr == requested_addr, "shmat error");
2480   }
2481 
2482   // just one more test to really make sure we have no dangling shm segments.
2483   guarantee(shmid == -1, "dangling shm segments");
2484 
2485   return addr ? true : false;
2486 
2487 } // end: reserve_shmatted_memory
2488 
2489 // Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2490 // will return NULL in case of an error.
2491 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2492 
2493   // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2494   if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2495     warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2496     return NULL;
2497   }
2498 
2499   const size_t size = align_size_up(bytes, SIZE_4K);
2500 
2501   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2502   // msync(MS_INVALIDATE) (see os::uncommit_memory)
2503   int flags = MAP_ANONYMOUS | MAP_SHARED;
2504 
2505   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2506   // it means if wishaddress is given but MAP_FIXED is not set.
2507   //
2508   // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2509   // clobbers the address range, which is probably not what the caller wants. That's
2510   // why I assert here (again) that the SPEC1170 compat mode is off.
2511   // If we want to be able to run under SPEC1170, we have to do some porting and
2512   // testing.
2513   if (requested_addr != NULL) {
2514     assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2515     flags |= MAP_FIXED;
2516   }
2517 
2518   char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2519 
2520   if (addr == MAP_FAILED) {
2521     // attach failed: tolerate for specific wish addresses. Not being able to attach
2522     // anywhere is a fatal error.
2523     if (requested_addr == NULL) {
2524       // It's ok to fail here if the machine has not enough memory.
2525       warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2526     }
2527     addr = NULL;
2528     goto cleanup_mmap;
2529   }
2530 
2531   // If we did request a specific address and that address was not available, fail.
2532   if (addr && requested_addr) {
2533     guarantee(addr == requested_addr, "unexpected");
2534   }
2535 
2536   // register this mmap'ed segment with book keeping
2537   LOCK_SHMBK
2538     ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2539     assert(p_block, "");
2540     shmbk_register(p_block);
2541   UNLOCK_SHMBK
2542 
2543 cleanup_mmap:
2544 
2545   // trace
2546   if (Verbose) {
2547     if (addr) {
2548       fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2549     }
2550     else {
2551       if (requested_addr != NULL) {
2552         warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2553       } else {
2554         warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2555       }
2556     }
2557   }
2558 
2559   return addr;
2560 
2561 } // end: reserve_mmaped_memory
2562 
2563 // Reserves and attaches a shared memory segment.
2564 // Will assert if a wish address is given and could not be obtained.
2565 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2566   return os::attempt_reserve_memory_at(bytes, requested_addr);
2567 }
2568 
2569 bool os::pd_release_memory(char* addr, size_t size) {
2570 
2571   // delegate to ShmBkBlock class which knows how to uncommit its memory.






2572 
2573   bool rc = false;
2574   LOCK_SHMBK
2575     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2576     if (!block) {
2577       fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2578       shmbk_dump_info();
2579       assert(false, "invalid pointer");
2580       return false;
2581     }
2582     else if (!block->isSameRange(addr, size)) {
2583       if (block->getType() == ShmBkBlock::MMAP) {
2584         // Release only the same range or a the beginning or the end of a range.
2585         if (block->base() == addr && size < block->size()) {
2586           ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2587           assert(b, "");
2588           shmbk_register(b);
2589           block->setAddrRange(AddrRange(addr, size));
2590         }
2591         else if (addr > block->base() && addr + size == block->base() + block->size()) {
2592           ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2593           assert(b, "");
2594           shmbk_register(b);
2595           block->setAddrRange(AddrRange(addr, size));
2596         }
2597         else {
2598           fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2599           shmbk_dump_info();
2600           assert(false, "invalid mmap range");
2601           return false;
2602         }
2603       }
2604       else {
2605         // Release only the same range. No partial release allowed.
2606         // Soften the requirement a bit, because the user may think he owns a smaller size
2607         // than the block is due to alignment etc.
2608         if (block->base() != addr || block->size() < size) {
2609           fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2610           shmbk_dump_info();
2611           assert(false, "invalid shmget range");
2612           return false;
2613         }
2614       }







2615     }
2616     rc = block->release();
2617     assert(rc, "release failed");
2618     // remove block from bookkeeping
2619     shmbk_unregister(block);
2620     delete block;
2621   UNLOCK_SHMBK
2622 
2623   if (!rc) {
2624     warning("failed to released %lu bytes at 0x%p", size, addr);

2625   }
2626 
2627   return rc;
2628 }
2629 
2630 static bool checked_mprotect(char* addr, size_t size, int prot) {
2631 
2632   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2633   // not tell me if protection failed when trying to protect an un-protectable range.
2634   //
2635   // This means if the memory was allocated using shmget/shmat, protection wont work
2636   // but mprotect will still return 0:
2637   //
2638   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2639 
2640   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2641 
2642   if (!rc) {
2643     const char* const s_errno = strerror(errno);
2644     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);


2685     ShouldNotReachHere();
2686   }
2687   // is_committed is unused.
2688   return checked_mprotect(addr, size, p);
2689 }
2690 
2691 bool os::guard_memory(char* addr, size_t size) {
2692   return checked_mprotect(addr, size, PROT_NONE);
2693 }
2694 
2695 bool os::unguard_memory(char* addr, size_t size) {
2696   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2697 }
2698 
2699 // Large page support
2700 
2701 static size_t _large_page_size = 0;
2702 
2703 // Enable large page support if OS allows that.
2704 void os::large_page_init() {
2705 
2706   // Note: os::Aix::query_multipage_support must run first.
2707 
2708   if (!UseLargePages) {
2709     return;
2710   }
2711 
2712   if (!Aix::can_use_64K_pages()) {
2713     assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2714     UseLargePages = false;
2715     return;
2716   }
2717 
2718   if (!Aix::can_use_16M_pages() && Use16MPages) {
2719     fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2720             " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2721   }
2722 
2723   // Do not report 16M page alignment as part of os::_page_sizes if we are
2724   // explicitly forbidden from using 16M pages. Doing so would increase the
2725   // alignment the garbage collector calculates with, slightly increasing
2726   // heap usage. We should only pay for 16M alignment if we really want to
2727   // use 16M pages.
2728   if (Use16MPages && Aix::can_use_16M_pages()) {
2729     _large_page_size = SIZE_16M;
2730     _page_sizes[0] = SIZE_16M;
2731     _page_sizes[1] = SIZE_64K;
2732     _page_sizes[2] = SIZE_4K;
2733     _page_sizes[3] = 0;
2734   } else if (Aix::can_use_64K_pages()) {
2735     _large_page_size = SIZE_64K;
2736     _page_sizes[0] = SIZE_64K;
2737     _page_sizes[1] = SIZE_4K;
2738     _page_sizes[2] = 0;
2739   }
2740 
2741   if (Verbose) {
2742     ("Default large page size is 0x%llX.", _large_page_size);
2743   }
2744 } // end: os::large_page_init()
2745 
2746 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2747   // "exec" is passed in but not used. Creating the shared image for
2748   // the code cache doesn't have an SHM_X executable permission to check.
2749   Unimplemented();
2750   return 0;
2751 }
2752 
2753 bool os::release_memory_special(char* base, size_t bytes) {
2754   // detaching the SHM segment will also delete it, see reserve_memory_special()
2755   Unimplemented();
2756   return false;
2757 }
2758 
2759 size_t os::large_page_size() {
2760   return _large_page_size;
2761 }
2762 
2763 bool os::can_commit_large_page_memory() {
2764   // Well, sadly we cannot commit anything at all (see comment in
2765   // os::commit_memory) but we claim to so we can make use of large pages
2766   return true;
2767 }
2768 
2769 bool os::can_execute_large_page_memory() {
2770   // We can do that
2771   return true;
2772 }
2773 
2774 // Reserve memory at an arbitrary address, only if that area is
2775 // available (and not reserved for something else).
2776 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {

2777 
2778   bool use_mmap = false;
2779 
2780   // mmap: smaller graining, no large page support
2781   // shm: large graining (256M), large page support, limited number of shm segments
2782   //
2783   // Prefer mmap wherever we either do not need large page support or have OS limits
2784 
2785   if (!UseLargePages || bytes < SIZE_16M) {
2786     use_mmap = true;
2787   }
2788 
2789   char* addr = NULL;
2790   if (use_mmap) {
2791     addr = reserve_mmaped_memory(bytes, requested_addr);




2792   } else {
2793     // shmat: wish address is mandatory, and do not try 16M pages here.
2794     shmatted_memory_info_t info;
2795     const int flags = RESSHM_WISHADDR_OR_FAIL;
2796     if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2797       addr = info.addr;
2798     }
2799   }
2800 
2801   return addr;
2802 }
2803 
2804 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2805   return ::read(fd, buf, nBytes);
2806 }
2807 
2808 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2809   return ::pread(fd, buf, nBytes, offset);
2810 }
2811 
2812 void os::naked_short_sleep(jlong ms) {
2813   struct timespec req;
2814 
2815   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2816   req.tv_sec = 0;
2817   if (ms > 0) {


3612     // signal
3613     if (!signal_name(exception_code, buf, size)) {
3614       jio_snprintf(buf, size, "SIG%d", exception_code);
3615     }
3616     return buf;
3617   } else {
3618     return NULL;
3619   }
3620 }
3621 
3622 // To install functions for atexit system call
3623 extern "C" {
3624   static void perfMemory_exit_helper() {
3625     perfMemory_exit();
3626   }
3627 }
3628 
3629 // This is called _before_ the most of global arguments have been parsed.
3630 void os::init(void) {
3631   // This is basic, we want to know if that ever changes.
3632   // (shared memory boundary is supposed to be a 256M aligned)
3633   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3634 
3635   // First off, we need to know whether we run on AIX or PASE, and
3636   // the OS level we run on.
3637   os::Aix::initialize_os_info();
3638 
3639   // Scan environment (SPEC1170 behaviour, etc)
3640   os::Aix::scan_environment();
3641 
3642   // Check which pages are supported by AIX.
3643   os::Aix::query_multipage_support();







































































3644 
3645   // Next, we need to initialize libo4 and libperfstat libraries.
3646   if (os::Aix::on_pase()) {
3647     os::Aix::initialize_libo4();
3648   } else {
3649     os::Aix::initialize_libperfstat();
3650   }
3651 
3652   // Reset the perfstat information provided by ODM.
3653   if (os::Aix::on_aix()) {
3654     libperfstat::perfstat_reset();
3655   }
3656 
3657   // Now initialze basic system properties. Note that for some of the values we
3658   // need libperfstat etc.
3659   os::Aix::initialize_system_info();
3660 
3661   // Initialize large page support.
3662   if (UseLargePages) {
3663     os::large_page_init();
3664     if (!UseLargePages) {
3665       // initialize os::_page_sizes
3666       _page_sizes[0] = Aix::page_size();
3667       _page_sizes[1] = 0;
3668       if (Verbose) {
3669         fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3670       }
3671     }
3672   } else {
3673     // initialize os::_page_sizes
3674     _page_sizes[0] = Aix::page_size();
3675     _page_sizes[1] = 0;
3676   }
3677 
3678   // debug trace
3679   if (Verbose) {
3680     fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3681     fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3682     fprintf(stderr, "os::_page_sizes = ( ");
3683     for (int i = 0; _page_sizes[i]; i ++) {
3684       fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3685     }
3686     fprintf(stderr, ")\n");
3687   }
3688 
3689   _initial_pid = getpid();
3690 
3691   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3692 
3693   init_random(1234567);
3694 
3695   ThreadCritical::initialize();
3696 
3697   // Main_thread points to the aboriginal thread.
3698   Aix::_main_thread = pthread_self();
3699 
3700   initial_time_count = os::elapsed_counter();
3701   pthread_mutex_init(&dl_mutex, NULL);









3702 }
3703 
3704 // This is called _after_ the global arguments have been parsed.
3705 jint os::init_2(void) {
3706 
3707   trcVerbose("processor count: %d", os::_processor_count);
3708   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3709 
3710   // Initially build up the loaded dll map.
3711   LoadedLibraries::reload();
3712 
3713   const int page_size = Aix::page_size();
3714   const int map_size = page_size;
3715 
3716   address map_address = (address) MAP_FAILED;
3717   const int prot  = PROT_READ;
3718   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3719 
3720   // use optimized addresses for the polling page,
3721   // e.g. map it to a special 32-bit address.
3722   if (OptimizePollingPageLocation) {
3723     // architecture-specific list of address wishes:
3724     address address_wishes[] = {
3725       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3726       // PPC64: all address wishes are non-negative 32 bit values where
3727       // the lower 16 bits are all zero. we can load these addresses
3728       // with a single ppc_lis instruction.
3729       (address) 0x30000000, (address) 0x31000000,
3730       (address) 0x32000000, (address) 0x33000000,
3731       (address) 0x40000000, (address) 0x41000000,
3732       (address) 0x42000000, (address) 0x43000000,
3733       (address) 0x50000000, (address) 0x51000000,
3734       (address) 0x52000000, (address) 0x53000000,
3735       (address) 0x60000000, (address) 0x61000000,
3736       (address) 0x62000000, (address) 0x63000000
3737     };
3738     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3739 
3740     // iterate over the list of address wishes:
3741     for (int i=0; i<address_wishes_length; i++) {
3742       // try to map with current address wish.
3743       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3744       // fail if the address is already mapped.
3745       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3746                                      map_size, prot,
3747                                      flags | MAP_FIXED,
3748                                      -1, 0);
3749       if (Verbose) {
3750         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3751                 address_wishes[i], map_address + (ssize_t)page_size);
3752       }
3753 
3754       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3755         // map succeeded and map_address is at wished address, exit loop.
3756         break;
3757       }
3758 
3759       if (map_address != (address) MAP_FAILED) {
3760         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3761         ::munmap(map_address, map_size);
3762         map_address = (address) MAP_FAILED;
3763       }
3764       // map failed, continue loop.
3765     }
3766   } // end OptimizePollingPageLocation
3767 
3768   if (map_address == (address) MAP_FAILED) {
3769     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3770   }
3771   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3772   os::set_polling_page(map_address);
3773 
3774   if (!UseMembar) {
3775     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3776     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3777     os::set_memory_serialize_page(mem_serialize_page);
3778 
3779 #ifndef PRODUCT
3780     if (Verbose && PrintMiscellaneous)
3781       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);

3782 #endif
3783   }
3784 
3785   // initialize suspend/resume support - must do this before signal_sets_init()
3786   if (SR_initialize() != 0) {
3787     perror("SR_initialize failed");
3788     return JNI_ERR;
3789   }
3790 
3791   Aix::signal_sets_init();
3792   Aix::install_signal_handlers();
3793 
3794   // Check minimum allowable stack size for thread creation and to initialize
3795   // the java system classes, including StackOverflowError - depends on page
3796   // size. Add a page for compiler2 recursion in main thread.
3797   // Add in 2*BytesPerWord times page size to account for VM stack during
3798   // class initialization depending on 32 or 64 bit VM.
3799   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3800             (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3801                      2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());


3802 
3803   size_t threadStackSizeInBytes = ThreadStackSize * K;
3804   if (threadStackSizeInBytes != 0 &&
3805       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3806         tty->print_cr("\nThe stack size specified is too small, "
3807                       "Specify at least %dk",
3808                       os::Aix::min_stack_allowed / K);
3809         return JNI_ERR;
3810   }
3811 
3812   // Make the stack size a multiple of the page size so that
3813   // the yellow/red zones can be guarded.
3814   // Note that this can be 0, if no default stacksize was set.
3815   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3816 
3817   Aix::libpthread_init();
3818 
3819   if (MaxFDLimit) {
3820     // set the number of file descriptors to max. print out error
3821     // if getrlimit/setrlimit fails but continue regardless.
3822     struct rlimit nbr_files;
3823     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3824     if (status != 0) {
3825       if (PrintMiscellaneous && (Verbose || WizardMode))
3826         perror("os::init_2 getrlimit failed");
3827     } else {
3828       nbr_files.rlim_cur = nbr_files.rlim_max;
3829       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3830       if (status != 0) {
3831         if (PrintMiscellaneous && (Verbose || WizardMode))
3832           perror("os::init_2 setrlimit failed");
3833       }
3834     }
3835   }
3836 
3837   if (PerfAllowAtExitRegistration) {
3838     // only register atexit functions if PerfAllowAtExitRegistration is set.
3839     // atexit functions can be delayed until process exit time, which
3840     // can be problematic for embedded VM situations. Embedded VMs should
3841     // call DestroyJavaVM() to assure that VM resources are released.
3842 
3843     // note: perfMemory_exit_helper atexit function may be removed in
3844     // the future if the appropriate cleanup code can be added to the
3845     // VM_Exit VMOperation's doit method.
3846     if (atexit(perfMemory_exit_helper) != 0) {
3847       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3848     }
3849   }
3850 
3851   return JNI_OK;
3852 }
3853 
3854 // Mark the polling page as unreadable
3855 void os::make_polling_page_unreadable(void) {
3856   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3857     fatal("Could not disable polling page");
3858   }
3859 };
3860 
3861 // Mark the polling page as readable
3862 void os::make_polling_page_readable(void) {
3863   // Changed according to os_linux.cpp.


4145   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4146     return 0;
4147   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4148     return 0;
4149   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4150     return 0;
4151   }
4152   *bytes = end - cur;
4153   return 1;
4154 }
4155 
4156 // Map a block of memory.
4157 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4158                         char *addr, size_t bytes, bool read_only,
4159                         bool allow_exec) {
4160   int prot;
4161   int flags = MAP_PRIVATE;
4162 
4163   if (read_only) {
4164     prot = PROT_READ;

4165   } else {
4166     prot = PROT_READ | PROT_WRITE;

4167   }
4168 
4169   if (allow_exec) {
4170     prot |= PROT_EXEC;
4171   }
4172 
4173   if (addr != NULL) {
4174     flags |= MAP_FIXED;
4175   }
4176 
4177   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,





4178                                      fd, file_offset);
4179   if (mapped_address == MAP_FAILED) {
4180     return NULL;
4181   }
4182   return mapped_address;
4183 }
4184 
4185 // Remap a block of memory.
4186 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4187                           char *addr, size_t bytes, bool read_only,
4188                           bool allow_exec) {
4189   // same as map_memory() on this OS
4190   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4191                         allow_exec);
4192 }
4193 
4194 // Unmap a block of memory.
4195 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4196   return munmap(addr, bytes) == 0;
4197 }


4415 
4416 // Scan environment for important settings which might effect the VM.
4417 // Trace out settings. Warn about invalid settings and/or correct them.
4418 //
4419 // Must run after os::Aix::initialue_os_info().
4420 void os::Aix::scan_environment() {
4421 
4422   char* p;
4423   int rc;
4424 
4425   // Warn explicity if EXTSHM=ON is used. That switch changes how
4426   // System V shared memory behaves. One effect is that page size of
4427   // shared memory cannot be change dynamically, effectivly preventing
4428   // large pages from working.
4429   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4430   // recommendation is (in OSS notes) to switch it off.
4431   p = ::getenv("EXTSHM");
4432   if (Verbose) {
4433     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4434   }
4435   if (p && strcmp(p, "ON") == 0) {
4436     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4437     _extshm = 1;
4438   } else {
4439     _extshm = 0;
4440   }
4441 
4442   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4443   // Not tested, not supported.
4444   //
4445   // Note that it might be worth the trouble to test and to require it, if only to
4446   // get useful return codes for mprotect.
4447   //
4448   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4449   // exec() ? before loading the libjvm ? ....)
4450   p = ::getenv("XPG_SUS_ENV");
4451   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4452   if (p && strcmp(p, "ON") == 0) {
4453     _xpg_sus_mode = 1;
4454     trc("Unsupported setting: XPG_SUS_ENV=ON");
4455     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to


4476 
4477 // AIX: initialize the libperfstat library (we load this dynamically
4478 // because it is only available on AIX.
4479 void os::Aix::initialize_libperfstat() {
4480 
4481   assert(os::Aix::on_aix(), "AIX only");
4482 
4483   if (!libperfstat::init()) {
4484     trc("libperfstat initialization failed.");
4485     assert(false, "libperfstat initialization failed");
4486   } else {
4487     if (Verbose) {
4488       fprintf(stderr, "libperfstat initialized.\n");
4489     }
4490   }
4491 } // end: os::Aix::initialize_libperfstat
4492 
4493 /////////////////////////////////////////////////////////////////////////////
4494 // thread stack
4495 
4496 // function to query the current stack size using pthread_getthrds_np
4497 //
4498 // ! do not change anything here unless you know what you are doing !
4499 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4500 
4501   // This only works when invoked on a pthread. As we agreed not to use
4502   // primordial threads anyway, I assert here
4503   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4504 
4505   // information about this api can be found (a) in the pthread.h header and
4506   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4507   //
4508   // The use of this API to find out the current stack is kind of undefined.
4509   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4510   // enough for cases where I let the pthread library create its stacks. For cases
4511   // where I create an own stack and pass this to pthread_create, it seems not to
4512   // work (the returned stack size in that case is 0).
4513 
4514   pthread_t tid = pthread_self();
4515   struct __pthrdsinfo pinfo;
4516   char dummy[1]; // we only need this to satisfy the api and to not get E
4517   int dummy_size = sizeof(dummy);
4518 
4519   memset(&pinfo, 0, sizeof(pinfo));
4520 
4521   const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4522                                       sizeof(pinfo), dummy, &dummy_size);
4523 
4524   if (rc != 0) {
4525     fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4526     guarantee(0, "pthread_getthrds_np failed");

4527   }

4528 
4529   guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4530 
4531   // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4532   // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4533   // Not sure what to do here - I feel inclined to forbid this use case completely.
4534   guarantee(pinfo.__pi_stacksize, "returned stack size invalid");


























4535 
4536   // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4537   if (p_stack_base) {
4538     (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4539   }
4540 
4541   if (p_stack_size) {
4542     (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4543   }
4544 
4545 #ifndef PRODUCT
4546   if (Verbose) {
4547     fprintf(stderr,
4548             "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4549             ", real stack_size=" INTPTR_FORMAT
4550             ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4551             (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4552             (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4553             pinfo.__pi_stacksize - os::Aix::stack_page_size());
4554   }
4555 #endif
4556 
4557 } // end query_stack_dimensions

4558 
4559 // get the current stack base from the OS (actually, the pthread library)
4560 address os::current_stack_base() {
4561   address p;
4562   query_stack_dimensions(&p, 0);
4563   return p;
4564 }
4565 
4566 // get the current stack size from the OS (actually, the pthread library)
4567 size_t os::current_stack_size() {
4568   size_t s;
4569   query_stack_dimensions(0, &s);
4570   return s;
4571 }
4572 
4573 // Refer to the comments in os_solaris.cpp park-unpark.
4574 //
4575 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4576 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4577 // For specifics regarding the bug see GLIBC BUGID 261237 :
4578 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4579 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4580 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4581 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4582 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4583 // and monitorenter when we're using 1-0 locking. All those operations may result in
4584 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4585 // of libpthread avoids the problem, but isn't practical.
4586 //


   1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *


  96 #include <sys/resource.h>
  97 #include <sys/select.h>
  98 #include <sys/shm.h>
  99 #include <sys/socket.h>
 100 #include <sys/stat.h>
 101 #include <sys/sysinfo.h>
 102 #include <sys/systemcfg.h>
 103 #include <sys/time.h>
 104 #include <sys/times.h>
 105 #include <sys/types.h>
 106 #include <sys/utsname.h>
 107 #include <sys/vminfo.h>
 108 #include <sys/wait.h>
 109 
 110 // If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 111 // getrusage() is prepared to handle the associated failure.
 112 #ifndef RUSAGE_THREAD
 113 #define RUSAGE_THREAD   (1)               /* only the calling thread */
 114 #endif
 115 
 116 // PPC port
 117 static const uintx Use64KPagesThreshold       = 1*M;
 118 static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
 119 
 120 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
 121 #if !defined(_AIXVERSION_610)
 122 extern "C" {
 123   int getthrds64(pid_t ProcessIdentifier,
 124                  struct thrdentry64* ThreadBuffer,
 125                  int ThreadSize,
 126                  tid64_t* IndexPointer,
 127                  int Count);
 128 }
 129 #endif
 130 
 131 #define MAX_PATH (2 * K)
 132 
 133 // for timer info max values which include all bits
 134 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 135 // for multipage initialization error analysis (in 'g_multipage_error')
 136 #define ERROR_MP_OS_TOO_OLD                          100
 137 #define ERROR_MP_EXTSHM_ACTIVE                       101
 138 #define ERROR_MP_VMGETINFO_FAILED                    102
 139 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103


 155 #define PV_7_Compat 0x208000   /* Power PC 7 */
 156 #endif
 157 #ifndef PV_8
 158 #define PV_8 0x300000          /* Power PC 8 */
 159 #define PV_8_Compat 0x308000   /* Power PC 8 */
 160 #endif
 161 
 162 #define trcVerbose(fmt, ...) { /* PPC port */  \
 163   if (Verbose) { \
 164     fprintf(stderr, fmt, ##__VA_ARGS__); \
 165     fputc('\n', stderr); fflush(stderr); \
 166   } \
 167 }
 168 #define trc(fmt, ...)        /* PPC port */
 169 
 170 #define ERRBYE(s) { \
 171     trcVerbose(s); \
 172     return -1; \
 173 }
 174 
 175 // Query dimensions of the stack of the calling thread.
 176 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 177 
 178 // function to check a given stack pointer against given stack limits
 179 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
 180   if (((uintptr_t)sp) & 0x7) {
 181     return false;
 182   }
 183   if (sp > stack_base) {
 184     return false;
 185   }
 186   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
 187     return false;
 188   }
 189   return true;
 190 }
 191 
 192 // returns true if function is a valid codepointer
 193 inline bool is_valid_codepointer(codeptr_t p) {
 194   if (!p) {
 195     return false;
 196   }


 207 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
 208     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 209 }
 210 
 211 // Macro to check the current stack pointer against given stacklimits.
 212 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
 213   address sp; \
 214   sp = os::current_stack_pointer(); \
 215   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 216 }
 217 
 218 ////////////////////////////////////////////////////////////////////////////////
 219 // global variables (for a description see os_aix.hpp)
 220 
 221 julong    os::Aix::_physical_memory = 0;
 222 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 223 int       os::Aix::_page_size = -1;
 224 int       os::Aix::_on_pase = -1;
 225 int       os::Aix::_os_version = -1;
 226 int       os::Aix::_stack_page_size = -1;



 227 int       os::Aix::_xpg_sus_mode = -1;
 228 int       os::Aix::_extshm = -1;
 229 int       os::Aix::_logical_cpus = -1;
 230 
 231 ////////////////////////////////////////////////////////////////////////////////
 232 // local variables
 233 
 234 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 235 static jlong    initial_time_count = 0;
 236 static int      clock_tics_per_sec = 100;
 237 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 238 static bool     check_signals      = true;
 239 static pid_t    _initial_pid       = 0;
 240 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 241 static sigset_t SR_sigset;
 242 static pthread_mutex_t dl_mutex;              // Used to protect dlsym() calls.
 243 
 244 // This describes the state of multipage support of the underlying
 245 // OS. Note that this is of no interest to the outsize world and
 246 // therefore should not be defined in AIX class.
 247 //
 248 // AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
 249 // latter two (16M "large" resp. 16G "huge" pages) require special
 250 // setup and are normally not available.
 251 //
 252 // AIX supports multiple page sizes per process, for:
 253 //  - Stack (of the primordial thread, so not relevant for us)
 254 //  - Data - data, bss, heap, for us also pthread stacks
 255 //  - Text - text code
 256 //  - shared memory
 257 //
 258 // Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
 259 // and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
 260 //
 261 // For shared memory, page size can be set dynamically via
 262 // shmctl(). Different shared memory regions can have different page
 263 // sizes.
 264 //
 265 // More information can be found at AIBM info center:
 266 //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
 267 //
 268 static struct {
 269   size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
 270   size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
 271   size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
 272   size_t pthr_stack_pagesize; // stack page size of pthread threads
 273   size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
 274   bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
 275   bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
 276   int error;                  // Error describing if something went wrong at multipage init.
 277 } g_multipage_support = {
 278   (size_t) -1,
 279   (size_t) -1,
 280   (size_t) -1,
 281   (size_t) -1,
 282   (size_t) -1,
 283   false, false,
 284   0
 285 };
 286 
 287 // We must not accidentally allocate memory close to the BRK - even if
 288 // that would work - because then we prevent the BRK segment from
 289 // growing which may result in a malloc OOM even though there is
 290 // enough memory. The problem only arises if we shmat() or mmap() at
 291 // a specific wish address, e.g. to place the heap in a
 292 // compressed-oops-friendly way.
 293 static bool is_close_to_brk(address a) {
 294   address a1 = (address) sbrk(0);
 295   if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
 296     return true;
 297   }
 298   return false;
 299 }
 300 
 301 julong os::available_memory() {
 302   return Aix::available_memory();
 303 }
 304 
 305 julong os::Aix::available_memory() {
 306   os::Aix::meminfo_t mi;
 307   if (os::Aix::get_meminfo(&mi)) {
 308     return mi.real_free;
 309   } else {
 310     return 0xFFFFFFFFFFFFFFFFLL;
 311   }
 312 }
 313 
 314 julong os::physical_memory() {
 315   return Aix::physical_memory();
 316 }
 317 
 318 ////////////////////////////////////////////////////////////////////////////////
 319 // environment support
 320 


 332 
 333 bool os::have_special_privileges() {
 334   static bool init = false;
 335   static bool privileges = false;
 336   if (!init) {
 337     privileges = (getuid() != geteuid()) || (getgid() != getegid());
 338     init = true;
 339   }
 340   return privileges;
 341 }
 342 
 343 // Helper function, emulates disclaim64 using multiple 32bit disclaims
 344 // because we cannot use disclaim64() on AS/400 and old AIX releases.
 345 static bool my_disclaim64(char* addr, size_t size) {
 346 
 347   if (size == 0) {
 348     return true;
 349   }
 350 
 351   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
 352   const unsigned int maxDisclaimSize = 0x40000000;
 353 
 354   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
 355   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
 356 
 357   char* p = addr;
 358 
 359   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
 360     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 361       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
 362       return false;
 363     }
 364     p += maxDisclaimSize;
 365   }
 366 
 367   if (lastDisclaimSize > 0) {
 368     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
 369       trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
 370       return false;
 371     }
 372   }


 409   // Get the number of online(logical) cpus instead of configured.
 410   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
 411   assert(_processor_count > 0, "_processor_count must be > 0");
 412 
 413   // Retrieve total physical storage.
 414   os::Aix::meminfo_t mi;
 415   if (!os::Aix::get_meminfo(&mi)) {
 416     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
 417     assert(false, "os::Aix::get_meminfo failed.");
 418   }
 419   _physical_memory = (julong) mi.real_total;
 420 }
 421 
 422 // Helper function for tracing page sizes.
 423 static const char* describe_pagesize(size_t pagesize) {
 424   switch (pagesize) {
 425     case SIZE_4K : return "4K";
 426     case SIZE_64K: return "64K";
 427     case SIZE_16M: return "16M";
 428     case SIZE_16G: return "16G";
 429     case -1:       return "not set";
 430     default:
 431       assert(false, "surprise");
 432       return "??";
 433   }
 434 }
 435 
 436 // Probe OS for multipage support.
 437 // Will fill the global g_multipage_support structure.

 438 // Must be called before calling os::large_page_init().
 439 static void query_multipage_support() {
 440 
 441   guarantee(g_multipage_support.pagesize == -1,




 442             "do not call twice");
 443 
 444   g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
 445 
 446   // This really would surprise me.
 447   assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");

 448 
 449   // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
 450   // Default data page size is defined either by linker options (-bdatapsize)
 451   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
 452   // default should be 4K.

 453   {
 454     void* p = ::malloc(SIZE_16M);
 455     g_multipage_support.datapsize = os::Aix::query_pagesize(p);
 456     ::free(p);

 457   }
 458 
 459   // Query default shm page size (LDR_CNTRL SHMPSIZE).
 460   {
 461     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
 462     guarantee(shmid != -1, "shmget failed");
 463     void* p = ::shmat(shmid, NULL, 0);
 464     ::shmctl(shmid, IPC_RMID, NULL);
 465     guarantee(p != (void*) -1, "shmat failed");
 466     g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
 467     ::shmdt(p);
 468   }
 469 
 470   // Before querying the stack page size, make sure we are not running as primordial
 471   // thread (because primordial thread's stack may have different page size than
 472   // pthread thread stacks). Running a VM on the primordial thread won't work for a
 473   // number of reasons so we may just as well guarantee it here.
 474   guarantee0(!os::Aix::is_primordial_thread());
 475 
 476   // Query pthread stack page size.
 477   {
 478     int dummy = 0;
 479     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);





 480   }
 481 
 482   // Query default text page size (LDR_CNTRL TEXTPSIZE).
 483   /* PPC port: so far unused.
 484   {
 485     address any_function =
 486       (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
 487     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
 488   }
 489   */
 490 
 491   // Now probe for support of 64K pages and 16M pages.
 492 
 493   // Before OS/400 V6R1, there is no support for pages other than 4K.
 494   if (os::Aix::on_pase_V5R4_or_older()) {
 495     Unimplemented();
 496     goto query_multipage_support_end;
 497   }
 498 
 499   // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
 500   {
 501     const int MAX_PAGE_SIZES = 4;
 502     psize_t sizes[MAX_PAGE_SIZES];
 503     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
 504     if (num_psizes == -1) {
 505       trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
 506       trc("disabling multipage support.\n");
 507       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;



 508       goto query_multipage_support_end;
 509     }
 510     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
 511     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
 512     trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);

 513     for (int i = 0; i < num_psizes; i ++) {
 514       trcVerbose(" %s ", describe_pagesize(sizes[i]));


 515     }
 516 
 517     // Can we use 64K, 16M pages?


 518     for (int i = 0; i < num_psizes; i ++) {
 519       const size_t pagesize = sizes[i];
 520       if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
 521         continue;
 522       }
 523       bool can_use = false;
 524       trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
 525       const int shmid = ::shmget(IPC_PRIVATE, pagesize,
 526         IPC_CREAT | S_IRUSR | S_IWUSR);
 527       guarantee0(shmid != -1); // Should always work.
 528       // Try to set pagesize.






 529       struct shmid_ds shm_buf = { 0 };
 530       shm_buf.shm_pagesize = pagesize;
 531       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
 532         const int en = errno;
 533         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 534         // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
 535         // PPC port  MiscUtils::describe_errno(en));
 536       } else {
 537         // Attach and double check pageisze.
 538         void* p = ::shmat(shmid, NULL, 0);
 539         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
 540         guarantee0(p != (void*) -1); // Should always work.
 541         const size_t real_pagesize = os::Aix::query_pagesize(p);
 542         if (real_pagesize != pagesize) {
 543           trcVerbose("real page size (0x%llX) differs.", real_pagesize);
 544         } else {
 545           can_use = true;
 546         }
 547         ::shmdt(p);
 548       }
 549       trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
 550       if (pagesize == SIZE_64K) {
 551         g_multipage_support.can_use_64K_pages = can_use;
 552       } else if (pagesize == SIZE_16M) {
 553         g_multipage_support.can_use_16M_pages = can_use;
 554       }
 555     }
 556 
 557   } // end: check which pages can be used for shared memory
 558 
 559 query_multipage_support_end:
 560 
 561   trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
 562       describe_pagesize(g_multipage_support.pagesize));
 563   trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
 564       describe_pagesize(g_multipage_support.datapsize));
 565   trcVerbose("Text page size: %s\n",
 566       describe_pagesize(g_multipage_support.textpsize));
 567   trcVerbose("Thread stack page size (pthread): %s\n",
 568       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
 569   trcVerbose("Default shared memory page size: %s\n",
 570       describe_pagesize(g_multipage_support.shmpsize));
 571   trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
 572       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
 573   trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
 574       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
 575   trcVerbose("Multipage error details: %d\n",
 576       g_multipage_support.error);
 577 
 578   // sanity checks
 579   assert0(g_multipage_support.pagesize == SIZE_4K);
 580   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
 581   // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
 582   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
 583   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 584 
 585 } // end os::Aix::query_multipage_support()
 586 
 587 void os::init_system_properties_values() {
 588 
 589 #define DEFAULT_LIBPATH "/usr/lib:/lib"
 590 #define EXTENSIONS_DIR  "/lib/ext"
 591 
 592   // Buffer that fits several sprintfs.
 593   // Note that the space for the trailing null is provided
 594   // by the nulls included by the sizeof operator.
 595   const size_t bufsize =
 596     MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
 597          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
 598   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
 599 
 600   // sysclasspath, java_home, dll_dir
 601   {
 602     char *pslash;
 603     os::jvm_path(buf, bufsize);


1612   st->print(", DATA ");
1613   getrlimit(RLIMIT_DATA, &rlim);
1614   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1615   else st->print("%uk", rlim.rlim_cur >> 10);
1616   st->cr();
1617 
1618   // load average
1619   st->print("load average:");
1620   double loadavg[3] = {-1.L, -1.L, -1.L};
1621   os::loadavg(loadavg, 3);
1622   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1623   st->cr();
1624 }
1625 
1626 void os::print_memory_info(outputStream* st) {
1627 
1628   st->print_cr("Memory:");
1629 
1630   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1631   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1632   st->print_cr("  Default shared memory page size:        %s",
1633     describe_pagesize(g_multipage_support.shmpsize));
1634   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1635     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1636   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1637     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1638   if (g_multipage_error != 0) {
1639     st->print_cr("  multipage error: %d", g_multipage_error);
1640   }
1641 
1642   // print out LDR_CNTRL because it affects the default page sizes
1643   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1644   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1645 
1646   const char* const extshm = ::getenv("EXTSHM");
1647   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1648   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1649     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1650   }
1651 
1652   // Call os::Aix::get_meminfo() to retrieve memory statistics.
1653   os::Aix::meminfo_t mi;
1654   if (os::Aix::get_meminfo(&mi)) {
1655     char buffer[256];
1656     if (os::Aix::on_aix()) {
1657       jio_snprintf(buffer, sizeof(buffer),
1658                    "  physical total : %llu\n"
1659                    "  physical free  : %llu\n"
1660                    "  swap total     : %llu\n"
1661                    "  swap free      : %llu\n",
1662                    mi.real_total,
1663                    mi.real_free,
1664                    mi.pgsp_total,
1665                    mi.pgsp_free);
1666     } else {
1667       Unimplemented();
1668     }
1669     st->print_raw(buffer);
1670   } else {


1873         //
1874         ::sem_post(&sig_sem);
1875 
1876         thread->java_suspend_self();
1877       }
1878     } while (threadIsSuspended);
1879   }
1880 }
1881 
1882 int os::signal_lookup() {
1883   return check_pending_signals(false);
1884 }
1885 
1886 int os::signal_wait() {
1887   return check_pending_signals(true);
1888 }
1889 
1890 ////////////////////////////////////////////////////////////////////////////////
1891 // Virtual Memory
1892 
1893 // We need to keep small simple bookkeeping for os::reserve_memory and friends.






















1894 
1895 #define VMEM_MAPPED  1
1896 #define VMEM_SHMATED 2
1897 
1898 struct vmembk_t {
1899   int type;         // 1 - mmap, 2 - shmat
1900   char* addr;
1901   size_t size;      // Real size, may be larger than usersize.
1902   size_t pagesize;  // page size of area
1903   vmembk_t* next;
1904 
1905   bool contains_addr(char* p) const {
1906     return p >= addr && p < (addr + size);
1907   }
1908 
1909   bool contains_range(char* p, size_t s) const {
1910     return contains_addr(p) && contains_addr(p + s - 1);
1911   }
1912 
1913   void print_on(outputStream* os) const {
1914     os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1915       " bytes, %d %s pages), %s",
1916       addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1917       (type == VMEM_SHMATED ? "shmat" : "mmap")
1918     );
1919   }
1920 
1921   // Check that range is a sub range of memory block (or equal to memory block);
1922   // also check that range is fully page aligned to the page size if the block.
1923   void assert_is_valid_subrange(char* p, size_t s) const {
1924     if (!contains_range(p, s)) {
1925       fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1926               "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1927               p, p + s - 1, addr, addr + size - 1);
1928       guarantee0(false);
1929     }
1930     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1931       fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1932               " aligned to pagesize (%s)\n", p, p + s);
1933       guarantee0(false);
1934     }



1935   }
1936 };
1937 
1938 static struct {
1939   vmembk_t* first;
1940   MiscUtils::CritSect cs;
1941 } vmem;
1942 
1943 static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1944   vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1945   assert0(p);
1946   if (p) {
1947     MiscUtils::AutoCritSect lck(&vmem.cs);
1948     p->addr = addr; p->size = size;
1949     p->pagesize = pagesize;
1950     p->type = type;
1951     p->next = vmem.first;
1952     vmem.first = p;
1953   }
1954 }
1955 
1956 static vmembk_t* vmembk_find(char* addr) {
1957   MiscUtils::AutoCritSect lck(&vmem.cs);
1958   for (vmembk_t* p = vmem.first; p; p = p->next) {
1959     if (p->addr <= addr && (p->addr + p->size) > addr) {
1960       return p;
1961     }
1962   }
1963   return NULL;
1964 }
1965 
1966 static void vmembk_remove(vmembk_t* p0) {
1967   MiscUtils::AutoCritSect lck(&vmem.cs);
1968   assert0(p0);
1969   assert0(vmem.first); // List should not be empty.
1970   for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1971     if (*pp == p0) {
1972       *pp = p0->next;
1973       ::free(p0);
1974       return;
1975     }




1976   }
1977   assert0(false); // Not found?
1978 }
1979 
1980 static void vmembk_print_on(outputStream* os) {
1981   MiscUtils::AutoCritSect lck(&vmem.cs);
1982   for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1983     vmi->print_on(os);
1984     os->cr();
1985   }
1986 }
1987 
1988 // Reserve and attach a section of System V memory.
1989 // If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1990 // address. Failing that, it will attach the memory anywhere.
1991 // If <requested_addr> is NULL, function will attach the memory anywhere.

1992 //
1993 // <alignment_hint> is being ignored by this function. It is very probable however that the
1994 // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1995 // Should this be not enogh, we can put more work into it.
1996 static char* reserve_shmated_memory (
1997   size_t bytes,
1998   char* requested_addr,
1999   size_t alignment_hint) {
2000 
2001   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
2002     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
2003     bytes, requested_addr, alignment_hint);
2004 
2005   // Either give me wish address or wish alignment but not both.
2006   assert0(!(requested_addr != NULL && alignment_hint != 0));
2007 
2008   // We must prevent anyone from attaching too close to the
2009   // BRK because that may cause malloc OOM.
2010   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2011     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2012       "Will attach anywhere.", requested_addr);
2013     // Act like the OS refused to attach there.
2014     requested_addr = NULL;
2015   }
2016 
2017   // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2018   // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2019   if (os::Aix::on_pase_V5R4_or_older()) {
2020     ShouldNotReachHere();
2021   }
2022 
2023   // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2024   const size_t size = align_size_up(bytes, SIZE_64K);
2025 
2026   // Reserve the shared segment.
2027   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2028   if (shmid == -1) {
2029     trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2030     return NULL;
2031   }
2032 
2033   // Important note:
2034   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2035   // We must right after attaching it remove it from the system. System V shm segments are global and
2036   // survive the process.
2037   // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).

2038 
2039   struct shmid_ds shmbuf;
2040   memset(&shmbuf, 0, sizeof(shmbuf));
2041   shmbuf.shm_pagesize = SIZE_64K;
2042   if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2043     trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2044                size / SIZE_64K, errno);
2045     // I want to know if this ever happens.
2046     assert(false, "failed to set page size for shmat");
2047   }
2048 
2049   // Now attach the shared segment.
2050   // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2051   // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2052   // were not a segment boundary.
2053   char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2054   const int errno_shmat = errno;
2055 
2056   // (A) Right after shmat and before handing shmat errors delete the shm segment.
2057   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2058     trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2059     assert(false, "failed to remove shared memory segment!");
2060   }
2061 
2062   // Handle shmat error. If we failed to attach, just return.
2063   if (addr == (char*)-1) {
2064     trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2065     return NULL;
2066   }
2067 
2068   // Just for info: query the real page size. In case setting the page size did not
2069   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2070   const size_t real_pagesize = os::Aix::query_pagesize(addr);
2071   if (real_pagesize != shmbuf.shm_pagesize) {
2072     trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2073   }
2074 
2075   if (addr) {
2076     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2077       addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2078   } else {
2079     if (requested_addr != NULL) {
2080       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2081     } else {
2082       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2083     }
2084   }
2085 
2086   // book-keeping
2087   vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2088   assert0(is_aligned_to(addr, os::vm_page_size()));





2089 
2090   return addr;
2091 }
2092 
2093 static bool release_shmated_memory(char* addr, size_t size) {


2094 
2095   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2096     addr, addr + size - 1);
2097 
2098   bool rc = false;



2099 
2100   // TODO: is there a way to verify shm size without doing bookkeeping?
2101   if (::shmdt(addr) != 0) {
2102     trcVerbose("error (%d).", errno);
2103   } else {
2104     trcVerbose("ok.");
2105     rc = true;
2106   }
2107   return rc;
2108 }
2109 
2110 static bool uncommit_shmated_memory(char* addr, size_t size) {
2111   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2112     addr, addr + size - 1);
2113 
2114   const bool rc = my_disclaim64(addr, size);
2115 
2116   if (!rc) {
2117     trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2118     return false;

2119   }
2120   return true;
2121 }
2122 
2123 // Reserve memory via mmap.
2124 // If <requested_addr> is given, an attempt is made to attach at the given address.
2125 // Failing that, memory is allocated at any address.
2126 // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2127 // allocate at an address aligned with the given alignment. Failing that, memory
2128 // is aligned anywhere.
2129 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2130   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2131     "alignment_hint " UINTX_FORMAT "...",
2132     bytes, requested_addr, alignment_hint);
2133 
2134   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2135   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2136     trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2137     return NULL;
2138   }
2139 
2140   // We must prevent anyone from attaching too close to the
2141   // BRK because that may cause malloc OOM.
2142   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2143     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2144       "Will attach anywhere.", requested_addr);
2145     // Act like the OS refused to attach there.
2146     requested_addr = NULL;
2147   }
2148 
2149   // Specify one or the other but not both.
2150   assert0(!(requested_addr != NULL && alignment_hint > 0));







2151 
2152   // In 64K mode, we claim the global page size (os::vm_page_size())
2153   // is 64K. This is one of the few points where that illusion may
2154   // break, because mmap() will always return memory aligned to 4K. So
2155   // we must ensure we only ever return memory aligned to 64k.
2156   if (alignment_hint) {
2157     alignment_hint = lcm(alignment_hint, os::vm_page_size());
2158   } else {
2159     alignment_hint = os::vm_page_size();
2160   }
2161 
2162   // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2163   const size_t size = align_size_up(bytes, os::vm_page_size());
2164 
2165   // alignment: Allocate memory large enough to include an aligned range of the right size and
2166   // cut off the leading and trailing waste pages.
2167   assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2168   const size_t extra_size = size + alignment_hint;
2169 
2170   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2171   // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2172   int flags = MAP_ANONYMOUS | MAP_SHARED;
2173 
2174   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2175   // it means if wishaddress is given but MAP_FIXED is not set.
2176   //
2177   // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2178   // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2179   // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2180   // get clobbered.
2181   if (requested_addr != NULL) {
2182     if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2183       flags |= MAP_FIXED;
2184     }



2185   }
2186 
2187   char* addr = (char*)::mmap(requested_addr, extra_size,
2188       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2189 
2190   if (addr == MAP_FAILED) {
2191     trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2192     return NULL;

2193   }
2194 
2195   // Handle alignment.
2196   char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2197   const size_t waste_pre = addr_aligned - addr;
2198   char* const addr_aligned_end = addr_aligned + size;
2199   const size_t waste_post = extra_size - waste_pre - size;
2200   if (waste_pre > 0) {
2201     ::munmap(addr, waste_pre);
2202   }
2203   if (waste_post > 0) {
2204     ::munmap(addr_aligned_end, waste_post);
2205   }
2206   addr = addr_aligned;
2207 
2208   if (addr) {
2209     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2210       addr, addr + bytes, bytes);
2211   } else {
2212     if (requested_addr != NULL) {
2213       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2214     } else {
2215       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2216     }
2217   }
2218 
2219   // bookkeeping
2220   vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2221 
2222   // Test alignment, see above.
2223   assert0(is_aligned_to(addr, os::vm_page_size()));

2224 
2225   return addr;
2226 }
2227 
2228 static bool release_mmaped_memory(char* addr, size_t size) {
2229   assert0(is_aligned_to(addr, os::vm_page_size()));
2230   assert0(is_aligned_to(size, os::vm_page_size()));
2231 
2232   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2233     addr, addr + size - 1);
2234   bool rc = false;
2235 
2236   if (::munmap(addr, size) != 0) {
2237     trcVerbose("failed (%d)\n", errno);
2238     rc = false;
2239   } else {
2240     trcVerbose("ok.");
2241     rc = true;
2242   }
2243 
2244   return rc;
2245 }
2246 
2247 static bool uncommit_mmaped_memory(char* addr, size_t size) {
2248 
2249   assert0(is_aligned_to(addr, os::vm_page_size()));
2250   assert0(is_aligned_to(size, os::vm_page_size()));
2251 
2252   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2253     addr, addr + size - 1);
2254   bool rc = false;



































2255 
2256   // Uncommit mmap memory with msync MS_INVALIDATE.
2257   if (::msync(addr, size, MS_INVALIDATE) != 0) {
2258     trcVerbose("failed (%d)\n", errno);
2259     rc = false;
2260   } else {
2261     trcVerbose("ok.");
2262     rc = true;



2263   }


2264 
2265   return rc;











2266 }
2267 



2268 // End: shared memory bookkeeping
2269 ////////////////////////////////////////////////////////////////////////////////////////////////////
2270 
2271 int os::vm_page_size() {
2272   // Seems redundant as all get out.
2273   assert(os::Aix::page_size() != -1, "must call os::init");
2274   return os::Aix::page_size();
2275 }
2276 
2277 // Aix allocates memory by pages.
2278 int os::vm_allocation_granularity() {
2279   assert(os::Aix::page_size() != -1, "must call os::init");
2280   return os::Aix::page_size();
2281 }
2282 
2283 #ifdef PRODUCT
2284 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2285                                     int err) {
2286   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2287           ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2288           strerror(err), err);






















2289 }
2290 #endif
2291 
2292 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2293                                   const char* mesg) {
2294   assert(mesg != NULL, "mesg must be specified");
2295   if (!pd_commit_memory(addr, size, exec)) {
2296     // Add extra info in product mode for vm_exit_out_of_memory():
2297     PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2298     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2299   }
2300 }
2301 
2302 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2303 
2304   assert0(is_aligned_to(addr, os::vm_page_size()));
2305   assert0(is_aligned_to(size, os::vm_page_size()));
2306 
2307   vmembk_t* const vmi = vmembk_find(addr);
2308   assert0(vmi);
2309   vmi->assert_is_valid_subrange(addr, size);
2310 
2311   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2312 
2313   return true;
2314 }
2315 
2316 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2317   return pd_commit_memory(addr, size, exec);

2318 }
2319 
2320 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2321                                   size_t alignment_hint, bool exec,
2322                                   const char* mesg) {
2323   // Alignment_hint is ignored on this OS.
2324   pd_commit_memory_or_exit(addr, size, exec, mesg);
2325 }
2326 
2327 bool os::pd_uncommit_memory(char* addr, size_t size) {
2328   assert0(is_aligned_to(addr, os::vm_page_size()));
2329   assert0(is_aligned_to(size, os::vm_page_size()));
2330 
2331   // Dynamically do different things for mmap/shmat.
2332   const vmembk_t* const vmi = vmembk_find(addr);
2333   assert0(vmi);
2334   vmi->assert_is_valid_subrange(addr, size);
2335 
2336   if (vmi->type == VMEM_SHMATED) {
2337     return uncommit_shmated_memory(addr, size);
2338   } else {
2339     return uncommit_mmaped_memory(addr, size);















2340   }

2341 }
2342 
2343 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2344   // Do not call this; no need to commit stack pages on AIX.
2345   ShouldNotReachHere();
2346   return true;
2347 }
2348 
2349 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2350   // Do not call this; no need to commit stack pages on AIX.
2351   ShouldNotReachHere();
2352   return true;
2353 }
2354 
2355 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2356 }
2357 
2358 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2359 }
2360 
2361 void os::numa_make_global(char *addr, size_t bytes) {
2362 }
2363 
2364 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2365 }
2366 
2367 bool os::numa_topology_changed() {
2368   return false;
2369 }
2370 
2371 size_t os::numa_get_groups_num() {
2372   return 1;


2375 int os::numa_get_group_id() {
2376   return 0;
2377 }
2378 
2379 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2380   if (size > 0) {
2381     ids[0] = 0;
2382     return 1;
2383   }
2384   return 0;
2385 }
2386 
2387 bool os::get_page_info(char *start, page_info* info) {
2388   return false;
2389 }
2390 
2391 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2392   return end;
2393 }
2394 
2395 // Reserves and attaches a shared memory segment.
2396 // Will assert if a wish address is given and could not be obtained.
2397 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {





















































































































































































2398 
2399   // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2400   // thereby clobbering old mappings at that place. That is probably
2401   // not intended, never used and almost certainly an error were it
2402   // ever be used this way (to try attaching at a specified address
2403   // without clobbering old mappings an alternate API exists,
2404   // os::attempt_reserve_memory_at()).
2405   // Instead of mimicking the dangerous coding of the other platforms, here I
2406   // just ignore the request address (release) or assert(debug).
2407   assert0(requested_addr == NULL);
2408 
2409   // Always round to os::vm_page_size(), which may be larger than 4K.
2410   bytes = align_size_up(bytes, os::vm_page_size());
2411   const size_t alignment_hint0 =
2412     alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2413 
2414   // In 4K mode always use mmap.
2415   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2416   if (os::vm_page_size() == SIZE_4K) {
2417     return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2418   } else {
2419     if (bytes >= Use64KPagesThreshold) {
2420       return reserve_shmated_memory(bytes, requested_addr, alignment_hint);





















































































2421     } else {
2422       return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2423     }
2424   }










2425 }
2426 
2427 bool os::pd_release_memory(char* addr, size_t size) {
2428 
2429   // Dynamically do different things for mmap/shmat.
2430   vmembk_t* const vmi = vmembk_find(addr);
2431   assert0(vmi);
2432 
2433   // Always round to os::vm_page_size(), which may be larger than 4K.
2434   size = align_size_up(size, os::vm_page_size());
2435   addr = (char *)align_ptr_up(addr, os::vm_page_size());
2436 
2437   bool rc = false;
2438   bool remove_bookkeeping = false;
2439   if (vmi->type == VMEM_SHMATED) {
2440     // For shmatted memory, we do:
2441     // - If user wants to release the whole range, release the memory (shmdt).
2442     // - If user only wants to release a partial range, uncommit (disclaim) that
2443     //   range. That way, at least, we do not use memory anymore (bust still page
2444     //   table space).
2445     vmi->assert_is_valid_subrange(addr, size);
2446     if (addr == vmi->addr && size == vmi->size) {
2447       rc = release_shmated_memory(addr, size);
2448       remove_bookkeeping = true;
2449     } else {
2450       rc = uncommit_shmated_memory(addr, size);



























2451     }
2452   } else {
2453     // User may unmap partial regions but region has to be fully contained.
2454 #ifdef ASSERT
2455     vmi->assert_is_valid_subrange(addr, size);
2456 #endif
2457     rc = release_mmaped_memory(addr, size);
2458     remove_bookkeeping = true;
2459   }






2460 
2461   // update bookkeeping
2462   if (rc && remove_bookkeeping) {
2463     vmembk_remove(vmi);
2464   }
2465 
2466   return rc;
2467 }
2468 
2469 static bool checked_mprotect(char* addr, size_t size, int prot) {
2470 
2471   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2472   // not tell me if protection failed when trying to protect an un-protectable range.
2473   //
2474   // This means if the memory was allocated using shmget/shmat, protection wont work
2475   // but mprotect will still return 0:
2476   //
2477   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2478 
2479   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2480 
2481   if (!rc) {
2482     const char* const s_errno = strerror(errno);
2483     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);


2524     ShouldNotReachHere();
2525   }
2526   // is_committed is unused.
2527   return checked_mprotect(addr, size, p);
2528 }
2529 
2530 bool os::guard_memory(char* addr, size_t size) {
2531   return checked_mprotect(addr, size, PROT_NONE);
2532 }
2533 
2534 bool os::unguard_memory(char* addr, size_t size) {
2535   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2536 }
2537 
2538 // Large page support
2539 
2540 static size_t _large_page_size = 0;
2541 
2542 // Enable large page support if OS allows that.
2543 void os::large_page_init() {
2544   return; // Nothing to do. See query_multipage_support and friends.
2545 }






































2546 
2547 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2548   // "exec" is passed in but not used. Creating the shared image for
2549   // the code cache doesn't have an SHM_X executable permission to check.
2550   Unimplemented();
2551   return 0;
2552 }
2553 
2554 bool os::release_memory_special(char* base, size_t bytes) {
2555   // Detaching the SHM segment will also delete it, see reserve_memory_special().
2556   Unimplemented();
2557   return false;
2558 }
2559 
2560 size_t os::large_page_size() {
2561   return _large_page_size;
2562 }
2563 
2564 bool os::can_commit_large_page_memory() {
2565   // Does not matter, we do not support huge pages.
2566   return false;

2567 }
2568 
2569 bool os::can_execute_large_page_memory() {
2570   // Does not matter, we do not support huge pages.
2571   return false;
2572 }
2573 
2574 // Reserve memory at an arbitrary address, only if that area is
2575 // available (and not reserved for something else).
2576 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2577   char* addr = NULL;
2578 
2579   // Always round to os::vm_page_size(), which may be larger than 4K.
2580   bytes = align_size_up(bytes, os::vm_page_size());








2581 
2582   // In 4K mode always use mmap.
2583   // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2584   if (os::vm_page_size() == SIZE_4K) {
2585     return reserve_mmaped_memory(bytes, requested_addr, 0);
2586   } else {
2587     if (bytes >= Use64KPagesThreshold) {
2588       return reserve_shmated_memory(bytes, requested_addr, 0);
2589     } else {
2590       return reserve_mmaped_memory(bytes, requested_addr, 0);




2591     }
2592   }
2593 
2594   return addr;
2595 }
2596 
2597 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2598   return ::read(fd, buf, nBytes);
2599 }
2600 
2601 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2602   return ::pread(fd, buf, nBytes, offset);
2603 }
2604 
2605 void os::naked_short_sleep(jlong ms) {
2606   struct timespec req;
2607 
2608   assert(ms < 1000, "Un-interruptable sleep, short time use only");
2609   req.tv_sec = 0;
2610   if (ms > 0) {


3405     // signal
3406     if (!signal_name(exception_code, buf, size)) {
3407       jio_snprintf(buf, size, "SIG%d", exception_code);
3408     }
3409     return buf;
3410   } else {
3411     return NULL;
3412   }
3413 }
3414 
3415 // To install functions for atexit system call
3416 extern "C" {
3417   static void perfMemory_exit_helper() {
3418     perfMemory_exit();
3419   }
3420 }
3421 
3422 // This is called _before_ the most of global arguments have been parsed.
3423 void os::init(void) {
3424   // This is basic, we want to know if that ever changes.
3425   // (Shared memory boundary is supposed to be a 256M aligned.)
3426   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3427 
3428   // First off, we need to know whether we run on AIX or PASE, and
3429   // the OS level we run on.
3430   os::Aix::initialize_os_info();
3431 
3432   // Scan environment (SPEC1170 behaviour, etc).
3433   os::Aix::scan_environment();
3434 
3435   // Check which pages are supported by AIX.
3436   query_multipage_support();
3437 
3438   // Act like we only have one page size by eliminating corner cases which
3439   // we did not support very well anyway.
3440   // We have two input conditions:
3441   // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3442   //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3443   //    setting.
3444   //    Data segment page size is important for us because it defines the thread stack page
3445   //    size, which is needed for guard page handling, stack banging etc.
3446   // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3447   //    and should be allocated with 64k pages.
3448   //
3449   // So, we do the following:
3450   // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3451   // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3452   // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3453   // 64k          no              --- AIX 5.2 ? ---
3454   // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3455 
3456   // We explicitly leave no option to change page size, because only upgrading would work,
3457   // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3458 
3459   if (g_multipage_support.datapsize == SIZE_4K) {
3460     // datapsize = 4K. Data segment, thread stacks are 4K paged.
3461     if (g_multipage_support.can_use_64K_pages) {
3462       // .. but we are able to use 64K pages dynamically.
3463       // This would be typical for java launchers which are not linked
3464       // with datapsize=64K (like, any other launcher but our own).
3465       //
3466       // In this case it would be smart to allocate the java heap with 64K
3467       // to get the performance benefit, and to fake 64k pages for the
3468       // data segment (when dealing with thread stacks).
3469       //
3470       // However, leave a possibility to downgrade to 4K, using
3471       // -XX:-Use64KPages.
3472       if (Use64KPages) {
3473         trcVerbose("64K page mode (faked for data segment)");
3474         Aix::_page_size = SIZE_64K;
3475       } else {
3476         trcVerbose("4K page mode (Use64KPages=off)");
3477         Aix::_page_size = SIZE_4K;
3478       }
3479     } else {
3480       // .. and not able to allocate 64k pages dynamically. Here, just
3481       // fall back to 4K paged mode and use mmap for everything.
3482       trcVerbose("4K page mode");
3483       Aix::_page_size = SIZE_4K;
3484       FLAG_SET_ERGO(bool, Use64KPages, false);
3485     }
3486   } else {
3487     // datapsize = 64k. Data segment, thread stacks are 64k paged.
3488     //   This normally means that we can allocate 64k pages dynamically.
3489     //   (There is one special case where this may be false: EXTSHM=on.
3490     //    but we decided to not support that mode).
3491     assert0(g_multipage_support.can_use_64K_pages);
3492     Aix::_page_size = SIZE_64K;
3493     trcVerbose("64K page mode");
3494     FLAG_SET_ERGO(bool, Use64KPages, true);
3495   }
3496 
3497   // Short-wire stack page size to base page size; if that works, we just remove
3498   // that stack page size altogether.
3499   Aix::_stack_page_size = Aix::_page_size;
3500 
3501   // For now UseLargePages is just ignored.
3502   FLAG_SET_ERGO(bool, UseLargePages, false);
3503   _page_sizes[0] = 0;
3504   _large_page_size = -1;
3505 
3506   // debug trace
3507   trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3508 
3509   // Next, we need to initialize libo4 and libperfstat libraries.
3510   if (os::Aix::on_pase()) {
3511     os::Aix::initialize_libo4();
3512   } else {
3513     os::Aix::initialize_libperfstat();
3514   }
3515 
3516   // Reset the perfstat information provided by ODM.
3517   if (os::Aix::on_aix()) {
3518     libperfstat::perfstat_reset();
3519   }
3520 
3521   // Now initialze basic system properties. Note that for some of the values we
3522   // need libperfstat etc.
3523   os::Aix::initialize_system_info();
3524 




























3525   _initial_pid = getpid();
3526 
3527   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3528 
3529   init_random(1234567);
3530 
3531   ThreadCritical::initialize();
3532 
3533   // Main_thread points to the aboriginal thread.
3534   Aix::_main_thread = pthread_self();
3535 
3536   initial_time_count = os::elapsed_counter();
3537   pthread_mutex_init(&dl_mutex, NULL);
3538 
3539   // If the pagesize of the VM is greater than 8K determine the appropriate
3540   // number of initial guard pages. The user can change this with the
3541   // command line arguments, if needed.
3542   if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3543     StackYellowPages = 1;
3544     StackRedPages = 1;
3545     StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3546   }
3547 }
3548 
3549 // This is called _after_ the global arguments have been parsed.
3550 jint os::init_2(void) {
3551 
3552   trcVerbose("processor count: %d", os::_processor_count);
3553   trcVerbose("physical memory: %lu", Aix::_physical_memory);
3554 
3555   // Initially build up the loaded dll map.
3556   LoadedLibraries::reload();
3557 
3558   const int page_size = Aix::page_size();
3559   const int map_size = page_size;
3560 
3561   address map_address = (address) MAP_FAILED;
3562   const int prot  = PROT_READ;
3563   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3564 
3565   // Use optimized addresses for the polling page,
3566   // e.g. map it to a special 32-bit address.
3567   if (OptimizePollingPageLocation) {
3568     // architecture-specific list of address wishes:
3569     address address_wishes[] = {
3570       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3571       // PPC64: all address wishes are non-negative 32 bit values where
3572       // the lower 16 bits are all zero. we can load these addresses
3573       // with a single ppc_lis instruction.
3574       (address) 0x30000000, (address) 0x31000000,
3575       (address) 0x32000000, (address) 0x33000000,
3576       (address) 0x40000000, (address) 0x41000000,
3577       (address) 0x42000000, (address) 0x43000000,
3578       (address) 0x50000000, (address) 0x51000000,
3579       (address) 0x52000000, (address) 0x53000000,
3580       (address) 0x60000000, (address) 0x61000000,
3581       (address) 0x62000000, (address) 0x63000000
3582     };
3583     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3584 
3585     // iterate over the list of address wishes:
3586     for (int i=0; i<address_wishes_length; i++) {
3587       // Try to map with current address wish.
3588       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3589       // fail if the address is already mapped.
3590       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3591                                      map_size, prot,
3592                                      flags | MAP_FIXED,
3593                                      -1, 0);
3594       if (Verbose) {
3595         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3596                 address_wishes[i], map_address + (ssize_t)page_size);
3597       }
3598 
3599       if (map_address + (ssize_t)page_size == address_wishes[i]) {
3600         // Map succeeded and map_address is at wished address, exit loop.
3601         break;
3602       }
3603 
3604       if (map_address != (address) MAP_FAILED) {
3605         // Map succeeded, but polling_page is not at wished address, unmap and continue.
3606         ::munmap(map_address, map_size);
3607         map_address = (address) MAP_FAILED;
3608       }
3609       // Map failed, continue loop.
3610     }
3611   } // end OptimizePollingPageLocation
3612 
3613   if (map_address == (address) MAP_FAILED) {
3614     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3615   }
3616   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3617   os::set_polling_page(map_address);
3618 
3619   if (!UseMembar) {
3620     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3621     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3622     os::set_memory_serialize_page(mem_serialize_page);
3623 
3624 #ifndef PRODUCT
3625     if (Verbose && PrintMiscellaneous) {
3626       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3627     }
3628 #endif
3629   }
3630 
3631   // initialize suspend/resume support - must do this before signal_sets_init()
3632   if (SR_initialize() != 0) {
3633     perror("SR_initialize failed");
3634     return JNI_ERR;
3635   }
3636 
3637   Aix::signal_sets_init();
3638   Aix::install_signal_handlers();
3639 
3640   // Check minimum allowable stack size for thread creation and to initialize
3641   // the java system classes, including StackOverflowError - depends on page
3642   // size. Add a page for compiler2 recursion in main thread.
3643   // Add in 2*BytesPerWord times page size to account for VM stack during
3644   // class initialization depending on 32 or 64 bit VM.
3645   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3646             (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3647                      (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3648 
3649   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3650 
3651   size_t threadStackSizeInBytes = ThreadStackSize * K;
3652   if (threadStackSizeInBytes != 0 &&
3653       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3654     tty->print_cr("\nThe stack size specified is too small, "
3655                   "Specify at least %dk",
3656                   os::Aix::min_stack_allowed / K);
3657     return JNI_ERR;
3658   }
3659 
3660   // Make the stack size a multiple of the page size so that
3661   // the yellow/red zones can be guarded.
3662   // Note that this can be 0, if no default stacksize was set.
3663   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3664 
3665   Aix::libpthread_init();
3666 
3667   if (MaxFDLimit) {
3668     // Set the number of file descriptors to max. print out error
3669     // if getrlimit/setrlimit fails but continue regardless.
3670     struct rlimit nbr_files;
3671     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3672     if (status != 0) {
3673       if (PrintMiscellaneous && (Verbose || WizardMode))
3674         perror("os::init_2 getrlimit failed");
3675     } else {
3676       nbr_files.rlim_cur = nbr_files.rlim_max;
3677       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3678       if (status != 0) {
3679         if (PrintMiscellaneous && (Verbose || WizardMode))
3680           perror("os::init_2 setrlimit failed");
3681       }
3682     }
3683   }
3684 
3685   if (PerfAllowAtExitRegistration) {
3686     // Only register atexit functions if PerfAllowAtExitRegistration is set.
3687     // Atexit functions can be delayed until process exit time, which
3688     // can be problematic for embedded VM situations. Embedded VMs should
3689     // call DestroyJavaVM() to assure that VM resources are released.
3690 
3691     // Note: perfMemory_exit_helper atexit function may be removed in
3692     // the future if the appropriate cleanup code can be added to the
3693     // VM_Exit VMOperation's doit method.
3694     if (atexit(perfMemory_exit_helper) != 0) {
3695       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3696     }
3697   }
3698 
3699   return JNI_OK;
3700 }
3701 
3702 // Mark the polling page as unreadable
3703 void os::make_polling_page_unreadable(void) {
3704   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3705     fatal("Could not disable polling page");
3706   }
3707 };
3708 
3709 // Mark the polling page as readable
3710 void os::make_polling_page_readable(void) {
3711   // Changed according to os_linux.cpp.


3993   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3994     return 0;
3995   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3996     return 0;
3997   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3998     return 0;
3999   }
4000   *bytes = end - cur;
4001   return 1;
4002 }
4003 
4004 // Map a block of memory.
4005 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4006                         char *addr, size_t bytes, bool read_only,
4007                         bool allow_exec) {
4008   int prot;
4009   int flags = MAP_PRIVATE;
4010 
4011   if (read_only) {
4012     prot = PROT_READ;
4013     flags = MAP_SHARED;
4014   } else {
4015     prot = PROT_READ | PROT_WRITE;
4016     flags = MAP_PRIVATE;
4017   }
4018 
4019   if (allow_exec) {
4020     prot |= PROT_EXEC;
4021   }
4022 
4023   if (addr != NULL) {
4024     flags |= MAP_FIXED;
4025   }
4026 
4027   // Allow anonymous mappings if 'fd' is -1.
4028   if (fd == -1) {
4029     flags |= MAP_ANONYMOUS;
4030   }
4031 
4032   char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
4033                                      fd, file_offset);
4034   if (mapped_address == MAP_FAILED) {
4035     return NULL;
4036   }
4037   return mapped_address;
4038 }
4039 
4040 // Remap a block of memory.
4041 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4042                           char *addr, size_t bytes, bool read_only,
4043                           bool allow_exec) {
4044   // same as map_memory() on this OS
4045   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4046                         allow_exec);
4047 }
4048 
4049 // Unmap a block of memory.
4050 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4051   return munmap(addr, bytes) == 0;
4052 }


4270 
4271 // Scan environment for important settings which might effect the VM.
4272 // Trace out settings. Warn about invalid settings and/or correct them.
4273 //
4274 // Must run after os::Aix::initialue_os_info().
4275 void os::Aix::scan_environment() {
4276 
4277   char* p;
4278   int rc;
4279 
4280   // Warn explicity if EXTSHM=ON is used. That switch changes how
4281   // System V shared memory behaves. One effect is that page size of
4282   // shared memory cannot be change dynamically, effectivly preventing
4283   // large pages from working.
4284   // This switch was needed on AIX 32bit, but on AIX 64bit the general
4285   // recommendation is (in OSS notes) to switch it off.
4286   p = ::getenv("EXTSHM");
4287   if (Verbose) {
4288     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4289   }
4290   if (p && strcasecmp(p, "ON") == 0) {
4291     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4292     _extshm = 1;
4293   } else {
4294     _extshm = 0;
4295   }
4296 
4297   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4298   // Not tested, not supported.
4299   //
4300   // Note that it might be worth the trouble to test and to require it, if only to
4301   // get useful return codes for mprotect.
4302   //
4303   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4304   // exec() ? before loading the libjvm ? ....)
4305   p = ::getenv("XPG_SUS_ENV");
4306   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4307   if (p && strcmp(p, "ON") == 0) {
4308     _xpg_sus_mode = 1;
4309     trc("Unsupported setting: XPG_SUS_ENV=ON");
4310     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to


4331 
4332 // AIX: initialize the libperfstat library (we load this dynamically
4333 // because it is only available on AIX.
4334 void os::Aix::initialize_libperfstat() {
4335 
4336   assert(os::Aix::on_aix(), "AIX only");
4337 
4338   if (!libperfstat::init()) {
4339     trc("libperfstat initialization failed.");
4340     assert(false, "libperfstat initialization failed");
4341   } else {
4342     if (Verbose) {
4343       fprintf(stderr, "libperfstat initialized.\n");
4344     }
4345   }
4346 } // end: os::Aix::initialize_libperfstat
4347 
4348 /////////////////////////////////////////////////////////////////////////////
4349 // thread stack
4350 
4351 // Function to query the current stack size using pthread_getthrds_np.
4352 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {



4353   // This only works when invoked on a pthread. As we agreed not to use
4354   // primordial threads anyway, I assert here.
4355   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4356 
4357   // Information about this api can be found (a) in the pthread.h header and
4358   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4359   //
4360   // The use of this API to find out the current stack is kind of undefined.
4361   // But after a lot of tries and asking IBM about it, I concluded that it is safe
4362   // enough for cases where I let the pthread library create its stacks. For cases
4363   // where I create an own stack and pass this to pthread_create, it seems not to
4364   // work (the returned stack size in that case is 0).
4365 
4366   pthread_t tid = pthread_self();
4367   struct __pthrdsinfo pinfo;
4368   char dummy[1]; // We only need this to satisfy the api and to not get E.
4369   int dummy_size = sizeof(dummy);
4370 
4371   memset(&pinfo, 0, sizeof(pinfo));
4372 
4373   const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4374                                      sizeof(pinfo), dummy, &dummy_size);
4375 
4376   if (rc != 0) {
4377     assert0(false);
4378     trcVerbose("pthread_getthrds_np failed (%d)", rc);
4379     return false;
4380   }
4381   guarantee0(pinfo.__pi_stackend);
4382 
4383   // The following can happen when invoking pthread_getthrds_np on a pthread running
4384   // on a user provided stack (when handing down a stack to pthread create, see 
4385   // pthread_attr_setstackaddr).

4386   // Not sure what to do here - I feel inclined to forbid this use case completely.
4387   guarantee0(pinfo.__pi_stacksize);
4388 
4389   // Note: the pthread stack on AIX seems to look like this:
4390   //
4391   // ---------------------   real base ? at page border ?
4392   // 
4393   //     pthread internal data, like ~2K, see also 
4394   //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4395   // 
4396   // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4397   // 
4398   //     stack 
4399   //      ....
4400   //
4401   //     stack 
4402   // 
4403   // ---------------------   __pi_stackend  - __pi_stacksize
4404   // 
4405   //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4406   // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4407   //
4408   //   AIX guard pages (?)
4409   //
4410 
4411   // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4412   // __pi_stackend however is almost never page aligned.   
4413   //
4414   

4415   if (p_stack_base) {
4416     (*p_stack_base) = (address) (pinfo.__pi_stackend);
4417   }
4418 
4419   if (p_stack_size) {
4420     (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;











4421   }

4422 
4423   return true;
4424 }
4425 
4426 // Get the current stack base from the OS (actually, the pthread library).
4427 address os::current_stack_base() {
4428   address p;
4429   query_stack_dimensions(&p, 0);
4430   return p;
4431 }
4432 
4433 // Get the current stack size from the OS (actually, the pthread library).
4434 size_t os::current_stack_size() {
4435   size_t s;
4436   query_stack_dimensions(0, &s);
4437   return s;
4438 }
4439 
4440 // Refer to the comments in os_solaris.cpp park-unpark.
4441 //
4442 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4443 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4444 // For specifics regarding the bug see GLIBC BUGID 261237 :
4445 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4446 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4447 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4448 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4449 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4450 // and monitorenter when we're using 1-0 locking. All those operations may result in
4451 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4452 // of libpthread avoids the problem, but isn't practical.
4453 //


< prev index next >