< prev index next >

src/hotspot/share/gc/g1/g1NUMA.cpp

Print this page

        

@@ -29,15 +29,10 @@
 #include "runtime/globals.hpp"
 #include "runtime/os.hpp"
 
 G1NUMA* G1NUMA::_inst = NULL;
 
-void* G1NUMA::base_address() const {
-  assert(_base_address != NULL, "Base address is not yet set");
-  return _base_address;
-}
-
 size_t G1NUMA::region_size() const {
   assert(_region_size > 0, "Heap region size is not yet set");
   return _region_size;
 }
 

@@ -77,11 +72,11 @@
 }
 
 G1NUMA::G1NUMA() :
   _node_id_to_index_map(NULL), _len_node_id_to_index_map(0),
   _node_ids(NULL), _num_active_node_ids(0),
-  _base_address(NULL), _region_size(0), _page_size(0) {
+  _region_size(0), _page_size(0) {
 }
 
 void G1NUMA::initialize_without_numa() {
   // If NUMA is not enabled or supported, initialize as having a singel node.
   _num_active_node_ids = 1;

@@ -129,12 +124,11 @@
 G1NUMA::~G1NUMA() {
   FREE_C_HEAP_ARRAY(int, _node_id_to_index_map);
   FREE_C_HEAP_ARRAY(int, _node_ids);
 }
 
-void G1NUMA::set_region_info(void* base_address, size_t region_size, size_t page_size) {
-  _base_address = base_address;
+void G1NUMA::set_region_info(size_t region_size, size_t page_size) {
   _region_size = region_size;
   _page_size = page_size;
 }
 
 uint G1NUMA::num_active_nodes() const {

@@ -201,32 +195,29 @@
 //      Memory will be touched one page at a time because G1RegionToSpaceMapper commits
 //      pages one by one.
 //      * Page #:       |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----|
 //      * HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
 //      * NUMA node #:  |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
-void G1NUMA::request_memory_on_node(size_t start_page, size_t size_in_pages, uint region_index) {
+void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index) {
   if (!is_enabled()) {
     return;
   }
   
-  if (size_in_pages == 0) {
+  if (size_in_bytes == 0) {
     return;
   }
 
-  char* aligned_address = (char*)base_address() + start_page * page_size();
-  size_t size_in_bytes = size_in_pages * page_size();
   uint node_index = preferred_node_index_for_index(region_index);
 
   assert(is_aligned(aligned_address, page_size()), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address));
   assert(is_aligned(size_in_bytes, page_size()), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes);
 
   log_debug(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be numa id (%d).",
-                            p2i(aligned_address), p2i(aligned_address + size_in_bytes), _node_ids[node_index]);
-  os::numa_make_local(aligned_address, size_in_bytes, _node_ids[node_index]);
+                            p2i(aligned_address), p2i((char*)aligned_address + size_in_bytes), _node_ids[node_index]);
+  os::numa_make_local((char*)aligned_address, size_in_bytes, _node_ids[node_index]);
 }
 
-
 uint G1NUMA::max_search_depth() const {
   // Multiple of 3 is just random number to limit iterations.
   // There would be some cases that 1 page may be consisted of multiple HeapRegions.
   return 3 * MAX2((uint)(page_size() / region_size()), (uint)1) * num_active_nodes();
 }
< prev index next >