37 class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
38 public:
39 G1ParCopyClosure<G1BarrierNone, Mark> _oops;
40 G1ParCopyClosure<G1BarrierKlass, Mark> _oop_in_klass;
41 G1KlassScanClosure _klass_in_cld_closure;
42 CLDToKlassAndOopClosure _clds;
43 G1CodeBlobClosure _codeblobs;
44 BufferingOopClosure _buffered_oops;
45
46 G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
47 _oops(g1h, pss),
48 _oop_in_klass(g1h, pss),
49 _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
50 _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
51 _codeblobs(&_oops),
52 _buffered_oops(&_oops) {}
53 };
54
55 class G1EvacuationClosures : public G1EvacuationRootClosures {
56 G1SharedClosures<G1MarkNone> _closures;
57
58 public:
59 G1EvacuationClosures(G1CollectedHeap* g1h,
60 G1ParScanThreadState* pss,
61 bool gcs_are_young) :
62 _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
63
64 OopClosure* weak_oops() { return &_closures._buffered_oops; }
65 OopClosure* strong_oops() { return &_closures._buffered_oops; }
66
67 CLDClosure* weak_clds() { return &_closures._clds; }
68 CLDClosure* strong_clds() { return &_closures._clds; }
69 CLDClosure* thread_root_clds() { return NULL; }
70 CLDClosure* second_pass_weak_clds() { return NULL; }
71
72 CodeBlobClosure* strong_codeblobs() { return &_closures._codeblobs; }
73 CodeBlobClosure* weak_codeblobs() { return &_closures._codeblobs; }
74
75 void flush() { _closures._buffered_oops.done(); }
76 double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
77
78 OopClosure* raw_strong_oops() { return &_closures._oops; }
79
80 bool trace_metadata() { return false; }
81 };
82
83 // Closures used during initial mark.
84 // The treatment of "weak" roots is selectable through the template parameter,
85 // this is usually used to control unloading of classes and interned strings.
86 template <G1Mark MarkWeak>
87 class G1InitalMarkClosures : public G1EvacuationRootClosures {
88 G1SharedClosures<G1MarkFromRoot> _strong;
89 G1SharedClosures<MarkWeak> _weak;
90
91 // Filter method to help with returning the appropriate closures
92 // depending on the class template parameter.
93 template <G1Mark Mark, typename T>
94 T* null_if(T* t) {
95 if (Mark == MarkWeak) {
96 return NULL;
97 }
98 return t;
99 }
100
101 public:
102 G1InitalMarkClosures(G1CollectedHeap* g1h,
103 G1ParScanThreadState* pss) :
104 _strong(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true),
105 _weak(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true) {}
106
107 OopClosure* weak_oops() { return &_weak._buffered_oops; }
108 OopClosure* strong_oops() { return &_strong._buffered_oops; }
109
110 // If MarkWeak is G1MarkPromotedFromRoot then the weak CLDs must be processed in a second pass.
111 CLDClosure* weak_clds() { return null_if<G1MarkPromotedFromRoot>(&_weak._clds); }
112 CLDClosure* strong_clds() { return &_strong._clds; }
113
114 // If MarkWeak is G1MarkFromRoot then all CLDs are processed by the weak and strong variants
115 // return a NULL closure for the following specialized versions in that case.
116 CLDClosure* thread_root_clds() { return null_if<G1MarkFromRoot>(&_strong._clds); }
117 CLDClosure* second_pass_weak_clds() { return null_if<G1MarkFromRoot>(&_weak._clds); }
118
119 CodeBlobClosure* strong_codeblobs() { return &_strong._codeblobs; }
120 CodeBlobClosure* weak_codeblobs() { return &_weak._codeblobs; }
121
122 void flush() {
123 _strong._buffered_oops.done();
124 _weak._buffered_oops.done();
125 }
126
127 double closure_app_seconds() {
128 return _strong._buffered_oops.closure_app_seconds() +
129 _weak._buffered_oops.closure_app_seconds();
130 }
131
132 OopClosure* raw_strong_oops() { return &_strong._oops; }
133
134 // If we are not marking all weak roots then we are tracing
135 // which metadata is alive.
136 bool trace_metadata() { return MarkWeak == G1MarkPromotedFromRoot; }
137 };
138
139 G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
140 if (g1h->collector_state()->during_initial_mark_pause()) {
141 if (ClassUnloadingWithConcurrentMark) {
142 return new G1InitalMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
143 } else {
144 return new G1InitalMarkClosures<G1MarkFromRoot>(g1h, pss);
145 }
146 } else {
147 return new G1EvacuationClosures(g1h, pss, g1h->collector_state()->gcs_are_young());
148 }
149 }
|
37 class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
38 public:
39 G1ParCopyClosure<G1BarrierNone, Mark> _oops;
40 G1ParCopyClosure<G1BarrierKlass, Mark> _oop_in_klass;
41 G1KlassScanClosure _klass_in_cld_closure;
42 CLDToKlassAndOopClosure _clds;
43 G1CodeBlobClosure _codeblobs;
44 BufferingOopClosure _buffered_oops;
45
46 G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
47 _oops(g1h, pss),
48 _oop_in_klass(g1h, pss),
49 _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
50 _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
51 _codeblobs(&_oops),
52 _buffered_oops(&_oops) {}
53 };
54
55 class G1EvacuationClosures : public G1EvacuationRootClosures {
56 G1SharedClosures<G1MarkNone> _closures;
57 G1ParPushHeapRSClosure _inter_region_oops;
58
59 public:
60 G1EvacuationClosures(G1CollectedHeap* g1h,
61 G1ParScanThreadState* pss,
62 bool gcs_are_young) :
63 _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false),
64 _inter_region_oops(g1h, pss) {}
65
66 OopClosure* weak_oops() { return &_closures._buffered_oops; }
67 OopClosure* strong_oops() { return &_closures._buffered_oops; }
68
69 CLDClosure* weak_clds() { return &_closures._clds; }
70 CLDClosure* strong_clds() { return &_closures._clds; }
71 CLDClosure* thread_root_clds() { return NULL; }
72 CLDClosure* second_pass_weak_clds() { return NULL; }
73
74 CodeBlobClosure* strong_codeblobs() { return &_closures._codeblobs; }
75 CodeBlobClosure* weak_codeblobs() { return &_closures._codeblobs; }
76
77 void flush() { _closures._buffered_oops.done(); }
78 double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
79
80 OopClosure* raw_strong_oops() { return &_closures._oops; }
81
82 bool trace_metadata() { return false; }
83
84 G1ParPushHeapRSClosure* inter_region_oops() { return &_inter_region_oops; }
85 };
86
87 // Closures used during initial mark.
88 // The treatment of "weak" roots is selectable through the template parameter,
89 // this is usually used to control unloading of classes and interned strings.
90 template <G1Mark MarkWeak>
91 class G1InitalMarkClosures : public G1EvacuationRootClosures {
92 G1SharedClosures<G1MarkFromRoot> _strong;
93 G1SharedClosures<MarkWeak> _weak;
94 G1ParPushHeapRSClosure _inter_region_oops;
95
96 // Filter method to help with returning the appropriate closures
97 // depending on the class template parameter.
98 template <G1Mark Mark, typename T>
99 T* null_if(T* t) {
100 if (Mark == MarkWeak) {
101 return NULL;
102 }
103 return t;
104 }
105
106 public:
107 G1InitalMarkClosures(G1CollectedHeap* g1h,
108 G1ParScanThreadState* pss) :
109 _strong(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true),
110 _weak(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true),
111 _inter_region_oops(g1h, pss) {}
112
113 OopClosure* weak_oops() { return &_weak._buffered_oops; }
114 OopClosure* strong_oops() { return &_strong._buffered_oops; }
115
116 // If MarkWeak is G1MarkPromotedFromRoot then the weak CLDs must be processed in a second pass.
117 CLDClosure* weak_clds() { return null_if<G1MarkPromotedFromRoot>(&_weak._clds); }
118 CLDClosure* strong_clds() { return &_strong._clds; }
119
120 // If MarkWeak is G1MarkFromRoot then all CLDs are processed by the weak and strong variants
121 // return a NULL closure for the following specialized versions in that case.
122 CLDClosure* thread_root_clds() { return null_if<G1MarkFromRoot>(&_strong._clds); }
123 CLDClosure* second_pass_weak_clds() { return null_if<G1MarkFromRoot>(&_weak._clds); }
124
125 CodeBlobClosure* strong_codeblobs() { return &_strong._codeblobs; }
126 CodeBlobClosure* weak_codeblobs() { return &_weak._codeblobs; }
127
128 void flush() {
129 _strong._buffered_oops.done();
130 _weak._buffered_oops.done();
131 }
132
133 double closure_app_seconds() {
134 return _strong._buffered_oops.closure_app_seconds() +
135 _weak._buffered_oops.closure_app_seconds();
136 }
137
138 OopClosure* raw_strong_oops() { return &_strong._oops; }
139
140 // If we are not marking all weak roots then we are tracing
141 // which metadata is alive.
142 bool trace_metadata() { return MarkWeak == G1MarkPromotedFromRoot; }
143
144 G1ParPushHeapRSClosure* inter_region_oops() { return &_inter_region_oops; }
145 };
146
147 G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
148 if (g1h->collector_state()->during_initial_mark_pause()) {
149 if (ClassUnloadingWithConcurrentMark) {
150 return new G1InitalMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
151 } else {
152 return new G1InitalMarkClosures<G1MarkFromRoot>(g1h, pss);
153 }
154 } else {
155 return new G1EvacuationClosures(g1h, pss, g1h->collector_state()->gcs_are_young());
156 }
157 }
|