mirror of
https://github.com/espressif/binutils-gdb.git
synced 2025-06-23 19:50:13 +08:00
Return vector of results from parallel_for_each
This changes gdb::parallel_for_each to return a vector of the results. However, if the passed-in function returns void, the return type remains 'void'. This functionality is used later, to parallelize the new indexer.
This commit is contained in:
@ -21,11 +21,98 @@
|
|||||||
#define GDBSUPPORT_PARALLEL_FOR_H
|
#define GDBSUPPORT_PARALLEL_FOR_H
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <type_traits>
|
||||||
#include "gdbsupport/thread-pool.h"
|
#include "gdbsupport/thread-pool.h"
|
||||||
|
|
||||||
namespace gdb
|
namespace gdb
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace detail
|
||||||
|
{
|
||||||
|
|
||||||
|
/* This is a helper class that is used to accumulate results for
|
||||||
|
parallel_for. There is a specialization for 'void', below. */
|
||||||
|
template<typename T>
|
||||||
|
struct par_for_accumulator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
explicit par_for_accumulator (size_t n_threads)
|
||||||
|
: m_futures (n_threads)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The result type that is accumulated. */
|
||||||
|
typedef std::vector<T> result_type;
|
||||||
|
|
||||||
|
/* Post the Ith task to a background thread, and store a future for
|
||||||
|
later. */
|
||||||
|
void post (size_t i, std::function<T ()> task)
|
||||||
|
{
|
||||||
|
m_futures[i]
|
||||||
|
= gdb::thread_pool::g_thread_pool->post_task (std::move (task));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Invoke TASK in the current thread, then compute all the results
|
||||||
|
from all background tasks and put them into a result vector,
|
||||||
|
which is returned. */
|
||||||
|
result_type finish (gdb::function_view<T ()> task)
|
||||||
|
{
|
||||||
|
result_type result (m_futures.size () + 1);
|
||||||
|
|
||||||
|
result.back () = task ();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < m_futures.size (); ++i)
|
||||||
|
result[i] = m_futures[i].get ();
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
/* A vector of futures coming from the tasks run in the
|
||||||
|
background. */
|
||||||
|
std::vector<std::future<T>> m_futures;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* See the generic template. */
|
||||||
|
template<>
|
||||||
|
struct par_for_accumulator<void>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
explicit par_for_accumulator (size_t n_threads)
|
||||||
|
: m_futures (n_threads)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This specialization does not compute results. */
|
||||||
|
typedef void result_type;
|
||||||
|
|
||||||
|
void post (size_t i, std::function<void ()> task)
|
||||||
|
{
|
||||||
|
m_futures[i]
|
||||||
|
= gdb::thread_pool::g_thread_pool->post_task (std::move (task));
|
||||||
|
}
|
||||||
|
|
||||||
|
result_type finish (gdb::function_view<void ()> task)
|
||||||
|
{
|
||||||
|
task ();
|
||||||
|
|
||||||
|
for (auto &future : m_futures)
|
||||||
|
{
|
||||||
|
/* Use 'get' and not 'wait', to propagate any exception. */
|
||||||
|
future.get ();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
std::vector<std::future<void>> m_futures;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/* A very simple "parallel for". This splits the range of iterators
|
/* A very simple "parallel for". This splits the range of iterators
|
||||||
into subranges, and then passes each subrange to the callback. The
|
into subranges, and then passes each subrange to the callback. The
|
||||||
work may or may not be done in separate threads.
|
work may or may not be done in separate threads.
|
||||||
@ -36,21 +123,25 @@ namespace gdb
|
|||||||
|
|
||||||
The parameter N says how batching ought to be done -- there will be
|
The parameter N says how batching ought to be done -- there will be
|
||||||
at least N elements processed per thread. Setting N to 0 is not
|
at least N elements processed per thread. Setting N to 0 is not
|
||||||
allowed. */
|
allowed.
|
||||||
|
|
||||||
|
If the function returns a non-void type, then a vector of the
|
||||||
|
results is returned. The size of the resulting vector depends on
|
||||||
|
the number of threads that were used. */
|
||||||
|
|
||||||
template<class RandomIt, class RangeFunction>
|
template<class RandomIt, class RangeFunction>
|
||||||
void
|
typename gdb::detail::par_for_accumulator<
|
||||||
|
std::result_of_t<RangeFunction (RandomIt, RandomIt)>
|
||||||
|
>::result_type
|
||||||
parallel_for_each (unsigned n, RandomIt first, RandomIt last,
|
parallel_for_each (unsigned n, RandomIt first, RandomIt last,
|
||||||
RangeFunction callback)
|
RangeFunction callback)
|
||||||
{
|
{
|
||||||
/* So we can use a local array below. */
|
typedef typename std::result_of_t<RangeFunction (RandomIt, RandomIt)>
|
||||||
const size_t local_max = 16;
|
result_type;
|
||||||
size_t n_threads = std::min (thread_pool::g_thread_pool->thread_count (),
|
|
||||||
local_max);
|
|
||||||
size_t n_actual_threads = 0;
|
|
||||||
std::future<void> futures[local_max];
|
|
||||||
|
|
||||||
|
size_t n_threads = thread_pool::g_thread_pool->thread_count ();
|
||||||
size_t n_elements = last - first;
|
size_t n_elements = last - first;
|
||||||
|
size_t elts_per_thread = 0;
|
||||||
if (n_threads > 1)
|
if (n_threads > 1)
|
||||||
{
|
{
|
||||||
/* Require that there should be at least N elements in a
|
/* Require that there should be at least N elements in a
|
||||||
@ -58,26 +149,27 @@ parallel_for_each (unsigned n, RandomIt first, RandomIt last,
|
|||||||
gdb_assert (n > 0);
|
gdb_assert (n > 0);
|
||||||
if (n_elements / n_threads < n)
|
if (n_elements / n_threads < n)
|
||||||
n_threads = std::max (n_elements / n, (size_t) 1);
|
n_threads = std::max (n_elements / n, (size_t) 1);
|
||||||
size_t elts_per_thread = n_elements / n_threads;
|
elts_per_thread = n_elements / n_threads;
|
||||||
n_actual_threads = n_threads - 1;
|
}
|
||||||
for (int i = 0; i < n_actual_threads; ++i)
|
|
||||||
{
|
|
||||||
RandomIt end = first + elts_per_thread;
|
|
||||||
auto task = [=] ()
|
|
||||||
{
|
|
||||||
callback (first, end);
|
|
||||||
};
|
|
||||||
|
|
||||||
futures[i] = gdb::thread_pool::g_thread_pool->post_task (task);
|
size_t count = n_threads == 0 ? 0 : n_threads - 1;
|
||||||
first = end;
|
gdb::detail::par_for_accumulator<result_type> results (count);
|
||||||
}
|
|
||||||
|
for (int i = 0; i < count; ++i)
|
||||||
|
{
|
||||||
|
RandomIt end = first + elts_per_thread;
|
||||||
|
results.post (i, [=] ()
|
||||||
|
{
|
||||||
|
return callback (first, end);
|
||||||
|
});
|
||||||
|
first = end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Process all the remaining elements in the main thread. */
|
/* Process all the remaining elements in the main thread. */
|
||||||
callback (first, last);
|
return results.finish ([=] ()
|
||||||
|
{
|
||||||
for (int i = 0; i < n_actual_threads; ++i)
|
return callback (first, last);
|
||||||
futures[i].wait ();
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -134,11 +134,10 @@ thread_pool::set_thread_count (size_t num_threads)
|
|||||||
#endif /* CXX_STD_THREAD */
|
#endif /* CXX_STD_THREAD */
|
||||||
}
|
}
|
||||||
|
|
||||||
std::future<void>
|
void
|
||||||
thread_pool::post_task (std::function<void ()> &&func)
|
thread_pool::do_post_task (std::packaged_task<void ()> &&func)
|
||||||
{
|
{
|
||||||
std::packaged_task<void ()> t (std::move (func));
|
std::packaged_task<void ()> t (std::move (func));
|
||||||
std::future<void> f = t.get_future ();
|
|
||||||
|
|
||||||
#if CXX_STD_THREAD
|
#if CXX_STD_THREAD
|
||||||
if (m_thread_count != 0)
|
if (m_thread_count != 0)
|
||||||
@ -153,7 +152,6 @@ thread_pool::post_task (std::function<void ()> &&func)
|
|||||||
/* Just execute it now. */
|
/* Just execute it now. */
|
||||||
t ();
|
t ();
|
||||||
}
|
}
|
||||||
return f;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CXX_STD_THREAD
|
#if CXX_STD_THREAD
|
||||||
|
@ -64,7 +64,24 @@ public:
|
|||||||
|
|
||||||
/* Post a task to the thread pool. A future is returned, which can
|
/* Post a task to the thread pool. A future is returned, which can
|
||||||
be used to wait for the result. */
|
be used to wait for the result. */
|
||||||
std::future<void> post_task (std::function<void ()> &&func);
|
std::future<void> post_task (std::function<void ()> &&func)
|
||||||
|
{
|
||||||
|
std::packaged_task<void ()> task (std::move (func));
|
||||||
|
std::future<void> result = task.get_future ();
|
||||||
|
do_post_task (std::packaged_task<void ()> (std::move (task)));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Post a task to the thread pool. A future is returned, which can
|
||||||
|
be used to wait for the result. */
|
||||||
|
template<typename T>
|
||||||
|
std::future<T> post_task (std::function<T ()> &&func)
|
||||||
|
{
|
||||||
|
std::packaged_task<T ()> task (std::move (func));
|
||||||
|
std::future<T> result = task.get_future ();
|
||||||
|
do_post_task (std::packaged_task<void ()> (std::move (task)));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@ -74,6 +91,10 @@ private:
|
|||||||
/* The callback for each worker thread. */
|
/* The callback for each worker thread. */
|
||||||
void thread_function ();
|
void thread_function ();
|
||||||
|
|
||||||
|
/* Post a task to the thread pool. A future is returned, which can
|
||||||
|
be used to wait for the result. */
|
||||||
|
void do_post_task (std::packaged_task<void ()> &&func);
|
||||||
|
|
||||||
/* The current thread count. */
|
/* The current thread count. */
|
||||||
size_t m_thread_count = 0;
|
size_t m_thread_count = 0;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user