< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 7992 : G1RootProcessor
rev 7998 : imported patch thomas-comments
rev 8001 : [mq]: remove-g1setparthreads


 763   // concurrently after the collection.
 764   DirtyCardQueueSet _dirty_card_queue_set;
 765 
 766   // The closure used to refine a single card.
 767   RefineCardTableEntryClosure* _refine_cte_cl;
 768 
 769   // A DirtyCardQueueSet that is used to hold cards that contain
 770   // references into the current collection set. This is used to
 771   // update the remembered sets of the regions in the collection
 772   // set in the event of an evacuation failure.
 773   DirtyCardQueueSet _into_cset_dirty_card_queue_set;
 774 
 775   // After a collection pause, make the regions in the CS into free
 776   // regions.
 777   void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
 778 
 779   // Abandon the current collection set without recording policy
 780   // statistics or updating free lists.
 781   void abandon_collection_set(HeapRegion* cs_head);
 782 
 783   // Applies "scan_non_heap_roots" to roots outside the heap,
 784   // "scan_rs" to roots inside the heap (having done "set_region" to
 785   // indicate the region in which the root resides),
 786   // and does "scan_metadata" If "scan_rs" is
 787   // NULL, then this step is skipped.  The "worker_i"
 788   // param is for use with parallel roots processing, and should be
 789   // the "i" of the calling parallel worker thread's work(i) function.
 790   // In the sequential case this param will be ignored.
 791   void g1_process_roots(OopClosure* scan_non_heap_roots,
 792                         OopClosure* scan_non_heap_weak_roots,
 793                         G1ParPushHeapRSClosure* scan_rs,
 794                         CLDClosure* scan_strong_clds,
 795                         CLDClosure* scan_weak_clds,
 796                         CodeBlobClosure* scan_strong_code,
 797                         uint worker_i);
 798 
 799   // The concurrent marker (and the thread it runs in.)
 800   ConcurrentMark* _cm;
 801   ConcurrentMarkThread* _cmThread;
 802   bool _mark_in_progress;
 803 
 804   // The concurrent refiner.
 805   ConcurrentG1Refine* _cg1r;
 806 
 807   // The parallel task queues
 808   RefToScanQueueSet *_task_queues;
 809 
 810   // True iff a evacuation has failed in the current collection.
 811   bool _evacuation_failed;
 812 
 813   EvacuationFailedInfo* _evacuation_failed_info_array;
 814 
 815   // Failed evacuations cause some logical from-space objects to have
 816   // forwarding pointers to themselves.  Reset them.
 817   void remove_self_forwarding_pointers();
 818 


 965   ReferenceProcessor* _ref_processor_cm;
 966 
 967   // Instance of the concurrent mark is_alive closure for embedding
 968   // into the Concurrent Marking reference processor as the
 969   // _is_alive_non_header field. Supplying a value for the
 970   // _is_alive_non_header field is optional but doing so prevents
 971   // unnecessary additions to the discovered lists during reference
 972   // discovery.
 973   G1CMIsAliveClosure _is_alive_closure_cm;
 974 
 975   // Cache used by G1CollectedHeap::start_cset_region_for_worker().
 976   HeapRegion** _worker_cset_start_region;
 977 
 978   // Time stamp to validate the regions recorded in the cache
 979   // used by G1CollectedHeap::start_cset_region_for_worker().
 980   // The heap region entry for a given worker is valid iff
 981   // the associated time stamp value matches the current value
 982   // of G1CollectedHeap::_gc_time_stamp.
 983   uint* _worker_cset_start_region_time_stamp;
 984 
 985   enum G1H_process_roots_tasks {
 986     G1H_PS_filter_satb_buffers,
 987     G1H_PS_refProcessor_oops_do,
 988     // Leave this one last.
 989     G1H_PS_NumElements
 990   };
 991 
 992   SubTasksDone* _process_strong_tasks;
 993 
 994   volatile bool _free_regions_coming;
 995 
 996 public:
 997 
 998   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
 999 
1000   void set_refine_cte_cl_concurrency(bool concurrent);
1001 
1002   RefToScanQueue *task_queue(int i) const;
1003 
1004   // A set of cards where updates happened during the GC
1005   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
1006 
1007   // A DirtyCardQueueSet that is used to hold cards that contain
1008   // references into the current collection set. This is used to
1009   // update the remembered sets of the regions in the collection
1010   // set in the event of an evacuation failure.
1011   DirtyCardQueueSet& into_cset_dirty_card_queue_set()
1012         { return _into_cset_dirty_card_queue_set; }
1013 
1014   // Create a G1CollectedHeap with the specified policy.
1015   // Must call the initialize method afterwards.
1016   // May not return if something goes wrong.
1017   G1CollectedHeap(G1CollectorPolicy* policy);
1018 
1019   // Initialize the G1CollectedHeap to have the initial and
1020   // maximum sizes and remembered and barrier sets
1021   // specified by the policy object.
1022   jint initialize();
1023 
1024   virtual void stop();
1025 
1026   // Return the (conservative) maximum heap alignment for any G1 heap
1027   static size_t conservative_max_heap_alignment();
1028 
1029   // Initialize weak reference processing.
1030   virtual void ref_processing_init();
1031 
1032   void set_par_threads(uint t) {
1033     SharedHeap::set_par_threads(t);
1034     // Done in SharedHeap but oddly there are
1035     // two _process_strong_tasks's in a G1CollectedHeap
1036     // so do it here too.
1037     _process_strong_tasks->set_n_threads(t);
1038   }
1039 
1040   // Set _n_par_threads according to a policy TBD.
1041   void set_par_threads();
1042 
1043   void set_n_termination(int t) {
1044     _process_strong_tasks->set_n_threads(t);
1045   }
1046 
1047   virtual CollectedHeap::Name kind() const {
1048     return CollectedHeap::G1CollectedHeap;
1049   }
1050 
1051   // The current policy object for the collector.
1052   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
1053 
1054   virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) g1_policy(); }
1055 
1056   // Adaptive size policy.  No such thing for g1.
1057   virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1058 
1059   // The rem set and barrier set.
1060   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1061 
1062   unsigned get_gc_time_stamp() {
1063     return _gc_time_stamp;
1064   }
1065 




 763   // concurrently after the collection.
 764   DirtyCardQueueSet _dirty_card_queue_set;
 765 
 766   // The closure used to refine a single card.
 767   RefineCardTableEntryClosure* _refine_cte_cl;
 768 
 769   // A DirtyCardQueueSet that is used to hold cards that contain
 770   // references into the current collection set. This is used to
 771   // update the remembered sets of the regions in the collection
 772   // set in the event of an evacuation failure.
 773   DirtyCardQueueSet _into_cset_dirty_card_queue_set;
 774 
 775   // After a collection pause, make the regions in the CS into free
 776   // regions.
 777   void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
 778 
 779   // Abandon the current collection set without recording policy
 780   // statistics or updating free lists.
 781   void abandon_collection_set(HeapRegion* cs_head);
 782 
















 783   // The concurrent marker (and the thread it runs in.)
 784   ConcurrentMark* _cm;
 785   ConcurrentMarkThread* _cmThread;
 786   bool _mark_in_progress;
 787 
 788   // The concurrent refiner.
 789   ConcurrentG1Refine* _cg1r;
 790 
 791   // The parallel task queues
 792   RefToScanQueueSet *_task_queues;
 793 
 794   // True iff a evacuation has failed in the current collection.
 795   bool _evacuation_failed;
 796 
 797   EvacuationFailedInfo* _evacuation_failed_info_array;
 798 
 799   // Failed evacuations cause some logical from-space objects to have
 800   // forwarding pointers to themselves.  Reset them.
 801   void remove_self_forwarding_pointers();
 802 


 949   ReferenceProcessor* _ref_processor_cm;
 950 
 951   // Instance of the concurrent mark is_alive closure for embedding
 952   // into the Concurrent Marking reference processor as the
 953   // _is_alive_non_header field. Supplying a value for the
 954   // _is_alive_non_header field is optional but doing so prevents
 955   // unnecessary additions to the discovered lists during reference
 956   // discovery.
 957   G1CMIsAliveClosure _is_alive_closure_cm;
 958 
 959   // Cache used by G1CollectedHeap::start_cset_region_for_worker().
 960   HeapRegion** _worker_cset_start_region;
 961 
 962   // Time stamp to validate the regions recorded in the cache
 963   // used by G1CollectedHeap::start_cset_region_for_worker().
 964   // The heap region entry for a given worker is valid iff
 965   // the associated time stamp value matches the current value
 966   // of G1CollectedHeap::_gc_time_stamp.
 967   uint* _worker_cset_start_region_time_stamp;
 968 









 969   volatile bool _free_regions_coming;
 970 
 971 public:
 972 


 973   void set_refine_cte_cl_concurrency(bool concurrent);
 974 
 975   RefToScanQueue *task_queue(int i) const;
 976 
 977   // A set of cards where updates happened during the GC
 978   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 979 
 980   // A DirtyCardQueueSet that is used to hold cards that contain
 981   // references into the current collection set. This is used to
 982   // update the remembered sets of the regions in the collection
 983   // set in the event of an evacuation failure.
 984   DirtyCardQueueSet& into_cset_dirty_card_queue_set()
 985         { return _into_cset_dirty_card_queue_set; }
 986 
 987   // Create a G1CollectedHeap with the specified policy.
 988   // Must call the initialize method afterwards.
 989   // May not return if something goes wrong.
 990   G1CollectedHeap(G1CollectorPolicy* policy);
 991 
 992   // Initialize the G1CollectedHeap to have the initial and
 993   // maximum sizes and remembered and barrier sets
 994   // specified by the policy object.
 995   jint initialize();
 996 
 997   virtual void stop();
 998 
 999   // Return the (conservative) maximum heap alignment for any G1 heap
1000   static size_t conservative_max_heap_alignment();
1001 
1002   // Initialize weak reference processing.
1003   virtual void ref_processing_init();
1004 








1005   // Set _n_par_threads according to a policy TBD.
1006   void set_par_threads();




1007 
1008   virtual CollectedHeap::Name kind() const {
1009     return CollectedHeap::G1CollectedHeap;
1010   }
1011 
1012   // The current policy object for the collector.
1013   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
1014 
1015   virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) g1_policy(); }
1016 
1017   // Adaptive size policy.  No such thing for g1.
1018   virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1019 
1020   // The rem set and barrier set.
1021   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1022 
1023   unsigned get_gc_time_stamp() {
1024     return _gc_time_stamp;
1025   }
1026 


< prev index next >