1 /*
   2  * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1NUMA.hpp"
  27 #include "gc/g1/heapRegion.hpp"
  28 #include "logging/log.hpp"
  29 #include "runtime/globals.hpp"
  30 #include "runtime/os.hpp"
  31 
  32 G1NUMA* G1NUMA::_inst = NULL;
  33 
  34 size_t G1NUMA::region_size() const {
  35   assert(_region_size > 0, "Heap region size is not yet set");
  36   return _region_size;
  37 }
  38 
  39 size_t G1NUMA::page_size() const {
  40   assert(_page_size > 0, "Page size not is yet set");
  41   return _page_size;
  42 }
  43 
  44 bool G1NUMA::is_enabled() const { return num_active_nodes() > 1; }
  45 
  46 G1NUMA* G1NUMA::create() {
  47   guarantee(_inst == NULL, "Should be called once.");
  48   _inst = new G1NUMA();
  49 
  50   // NUMA only supported on Linux.
  51 #ifdef LINUX
  52   _inst->initialize(UseNUMA);
  53 #else
  54   _inst->initialize(false);
  55 #endif /* LINUX */
  56 
  57   return _inst;
  58 }
  59 
  60   // Returns memory node ids
  61 const int* G1NUMA::node_ids() const {
  62   return _node_ids;
  63 }
  64 
  65 uint G1NUMA::index_of_node_id(int node_id) const {
  66   assert(node_id >= 0, "invalid node id %d", node_id);
  67   assert(node_id < _len_node_id_to_index_map, "invalid node id %d", node_id);
  68   uint node_index = _node_id_to_index_map[node_id];
  69   assert(node_index != G1NUMA::UnknownNodeIndex,
  70          "invalid node id %d", node_id);
  71   return node_index;
  72 }
  73 
  74 G1NUMA::G1NUMA() :
  75   _node_id_to_index_map(NULL), _len_node_id_to_index_map(0),
  76   _node_ids(NULL), _num_active_node_ids(0),
  77   _region_size(0), _page_size(0) {
  78 }
  79 
  80 void G1NUMA::initialize_without_numa() {
  81   // If NUMA is not enabled or supported, initialize as having a singel node.
  82   _num_active_node_ids = 1;
  83   _node_ids = NEW_C_HEAP_ARRAY(int, _num_active_node_ids, mtGC);
  84   _node_ids[0] = 0;
  85   // Map index 0 to node 0
  86   _len_node_id_to_index_map = 1;
  87   _node_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_node_id_to_index_map, mtGC);
  88   _node_id_to_index_map[0] = 0;
  89 }
  90 
  91 void G1NUMA::initialize(bool use_numa) {
  92   if (!use_numa) {
  93     initialize_without_numa();
  94     return;
  95   }
  96 
  97   assert(UseNUMA, "Invariant");
  98   size_t num_node_ids = os::numa_get_groups_num();
  99 
 100   // Create an array of active node ids.
 101   _node_ids = NEW_C_HEAP_ARRAY(int, num_node_ids, mtGC);
 102   _num_active_node_ids = (uint)os::numa_get_leaf_groups(_node_ids, num_node_ids);
 103 
 104   int max_node_id = 0;
 105   for (uint i = 0; i < _num_active_node_ids; i++) {
 106     max_node_id = MAX2(max_node_id, _node_ids[i]);
 107   }
 108 
 109   // Create a mapping between node_id and index.
 110   _len_node_id_to_index_map = max_node_id + 1;
 111   _node_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_node_id_to_index_map, mtGC);
 112 
 113   // Set all indices with unknown node id.
 114   for (int i = 0; i < _len_node_id_to_index_map; i++) {
 115     _node_id_to_index_map[i] = G1NUMA::UnknownNodeIndex;
 116   }
 117 
 118   // Set the indices for the actually retrieved node ids.
 119   for (uint i = 0; i < _num_active_node_ids; i++) {
 120     _node_id_to_index_map[_node_ids[i]] = i;
 121   }
 122 }
 123 
 124 G1NUMA::~G1NUMA() {
 125   FREE_C_HEAP_ARRAY(int, _node_id_to_index_map);
 126   FREE_C_HEAP_ARRAY(int, _node_ids);
 127 }
 128 
 129 void G1NUMA::set_region_info(size_t region_size, size_t page_size) {
 130   _region_size = region_size;
 131   _page_size = page_size;
 132 }
 133 
 134 uint G1NUMA::num_active_nodes() const {
 135   assert(_num_active_node_ids > 0, "just checking");
 136   return _num_active_node_ids;
 137 }
 138 
 139 uint G1NUMA::index_of_current_thread() const {
 140   if (!is_enabled()) {
 141     return 0;
 142   }
 143   return index_of_node_id(os::numa_get_group_id());
 144 }
 145 
 146 uint G1NUMA::preferred_node_index_for_index(uint region_index) const {
 147   if (region_size() >= page_size()) {
 148     // Simple case, pages are smaller than the region so we
 149     // can just alternate over the nodes.
 150     return region_index % _num_active_node_ids;
 151   } else {
 152     // Multiple regions in one page, so we need to make sure the
 153     // regions within a page is preferred on the same node.
 154     size_t regions_per_page = page_size() / region_size();
 155     return (region_index / regions_per_page) % _num_active_node_ids;
 156   }
 157 }
 158 
 159 int G1NUMA::numa_id(int index) const {
 160   assert(index < _len_node_id_to_index_map, "Index %d out of range: [0,%d)",
 161          index, _len_node_id_to_index_map);
 162   return _node_ids[index];
 163 }
 164 
 165 uint G1NUMA::index_of_address(HeapWord *address) const {
 166   int numa_id = os::numa_get_address_id((void*)address);
 167   if (numa_id == os::InvalidNUMAId) {
 168     return UnknownNodeIndex;
 169   } else {
 170     return index_of_node_id(numa_id);
 171   }
 172 }
 173 
 174 uint G1NUMA::index_for_region(HeapRegion* hr) const {
 175   if (!is_enabled()) {
 176     return 0;
 177   }
 178 
 179   if (AlwaysPreTouch) {
 180     // If we already pretouched, we can check actual node index here.
 181     return index_of_address(hr->bottom());
 182   }
 183 
 184   return preferred_node_index_for_index(hr->hrm_index());
 185 }
 186 
 187 // Request to spread the given memory evenly across the available NUMA
 188 // nodes. Which node to request for a given address is given by the
 189 // region size and the page size. Below are two examples on 4 NUMA nodes system:
 190 //   1. G1HeapRegionSize(_region_size) is larger than or equal to page size.
 191 //      * Page #:       |-0--||-1--||-2--||-3--||-4--||-5--||-6--||-7--||-8--||-9--||-10-||-11-||-12-||-13-||-14-||-15-|
 192 //      * HeapRegion #: |----#0----||----#1----||----#2----||----#3----||----#4----||----#5----||----#6----||----#7----|
 193 //      * NUMA node #:  |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
 194 //   2. G1HeapRegionSize(_region_size) is smaller than page size.
 195 //      Memory will be touched one page at a time because G1RegionToSpaceMapper commits
 196 //      pages one by one.
 197 //      * Page #:       |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----|
 198 //      * HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
 199 //      * NUMA node #:  |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
 200 void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index) {
 201   if (!is_enabled()) {
 202     return;
 203   }
 204   
 205   if (size_in_bytes == 0) {
 206     return;
 207   }
 208 
 209   uint node_index = preferred_node_index_for_index(region_index);
 210 
 211   assert(is_aligned(aligned_address, page_size()), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address));
 212   assert(is_aligned(size_in_bytes, page_size()), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes);
 213 
 214   log_debug(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be numa id (%d).",
 215                             p2i(aligned_address), p2i((char*)aligned_address + size_in_bytes), _node_ids[node_index]);
 216   os::numa_make_local((char*)aligned_address, size_in_bytes, _node_ids[node_index]);
 217 }
 218 
 219 uint G1NUMA::max_search_depth() const {
 220   // Multiple of 3 is just random number to limit iterations.
 221   // There would be some cases that 1 page may be consisted of multiple HeapRegions.
 222   return 3 * MAX2((uint)(page_size() / region_size()), (uint)1) * num_active_nodes();
 223 }