--- old/src/share/vm/gc/shared/workgroup.cpp 2015-06-12 14:52:04.243078305 +0200 +++ new/src/share/vm/gc/shared/workgroup.cpp 2015-06-12 14:52:04.123074400 +0200 @@ -254,9 +254,10 @@ WorkGang::WorkGang(const char* name, uint workers, bool are_GC_task_threads, - bool are_ConcurrentGC_threads) : + bool are_ConcurrentGC_threads, + GangTaskDispatcher* dispatcher) : AbstractWorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads), - _dispatcher(create_dispatcher(workers)) + _dispatcher(dispatcher != NULL ? dispatcher: create_dispatcher(workers)) { } AbstractGangWorker* WorkGang::allocate_worker(uint worker_id) { @@ -623,3 +624,76 @@ // Notify all would be safer, but this is OK, right? _mon->notify_all(); } + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class CountTask : public AbstractGangTask { + volatile jint _count; + bool _sleep; + public: + CountTask(bool sleep) : AbstractGangTask("CountTask"), _sleep(sleep), _count(0) {} + virtual void work(uint worker_id) { + if (_sleep) { + // Sleep a while, to delay the task to allow the coordinator to run. + os::sleep(Thread::current(), 100, false); + } + + // Count number of times executed. + Atomic::inc(&_count); + } + uint count() { return _count; } +}; + + +static void test_workgang_dispatch(bool use_semaphore, + uint total_workers, + uint active_workers, + bool sleep) { + + GangTaskDispatcher* dispatcher = use_semaphore + ? (GangTaskDispatcher*) new SemGangTaskDispatcher(total_workers) + : (GangTaskDispatcher*) new MutexGangTaskDispatcher(); + + // Intentionally leaking WorkGang, since there's no support to delete WorkGangs. + WorkGang* gang = new WorkGang("Test WorkGang", total_workers, false, false, dispatcher); + gang->initialize_workers(); + + gang->set_active_workers(active_workers); + + CountTask task(sleep); + + gang->run_task(&task); + + uint task_count = task.count(); + + assert(task_count == active_workers, err_msg("Expected count: %u got: %u", active_workers, task_count)); +} + +static void test_workgang_dispatch(bool use_semaphore) { + const bool sleep = true; + const uint total_workers = 8; + for (uint active_workers = 1; active_workers <= 4; active_workers++) { + test_workgang_dispatch(use_semaphore, total_workers, active_workers, sleep); + test_workgang_dispatch(use_semaphore, total_workers, active_workers, !sleep); + } +} + +static void test_workgang_dispatch_mutex() { + test_workgang_dispatch(false); +} + +static void test_workgang_dispatch_semaphore() { + test_workgang_dispatch(true); +} + +void test_workgang() { + // Needed to use dynamic number of active workers. + FlagSetting fs(UseDynamicNumberOfGCThreads, true); + + test_workgang_dispatch_semaphore(); + test_workgang_dispatch_mutex(); +} + +#endif // PRODUCT --- old/src/share/vm/gc/shared/workgroup.hpp 2015-06-12 14:52:04.479085985 +0200 +++ new/src/share/vm/gc/shared/workgroup.hpp 2015-06-12 14:52:04.351081820 +0200 @@ -182,7 +182,8 @@ WorkGang(const char* name, uint workers, bool are_GC_task_threads, - bool are_ConcurrentGC_threads); + bool are_ConcurrentGC_threads, + GangTaskDispatcher* dispatcher = NULL); // Run a task, returns when the task is done. virtual void run_task(AbstractGangTask* task); --- old/src/share/vm/prims/jni.cpp 2015-06-12 14:52:04.711093534 +0200 +++ new/src/share/vm/prims/jni.cpp 2015-06-12 14:52:04.583089368 +0200 @@ -3855,6 +3855,7 @@ unit_test_function_call // Forward declaration +void test_workgang(); void test_semaphore(); void TestOS_test(); void TestReservedSpace_test(); @@ -3881,6 +3882,7 @@ void execute_internal_vm_tests() { if (ExecuteInternalVMTests) { tty->print_cr("Running internal VM tests"); + run_unit_test(test_workgang()); run_unit_test(test_semaphore()); run_unit_test(TestOS_test()); run_unit_test(TestReservedSpace_test());