tf::FlowBuilder class

building methods of a task dependency graph

Derived classes

class Subflow
class to construct a subflow graph from the execution of a dynamic task
class Taskflow
main entry to create a task dependency graph

Constructors, destructors, conversion operators

FlowBuilder(Graph& graph) protected
constructs a flow builder with a graph

Public functions

template<typename C, std::enable_if_t<is_static_task_v<C>, void>* = nullptr>
auto emplace(C&& callable) -> Task
creates a static task
template<typename C, std::enable_if_t<is_dynamic_task_v<C>, void>* = nullptr>
auto emplace(C&& callable) -> Task
creates a dynamic task
template<typename C, std::enable_if_t<is_condition_task_v<C>, void>* = nullptr>
auto emplace(C&& callable) -> Task
creates a condition task
template<typename... C, std::enable_if_t<(sizeof...(C)> 1>
auto emplace(C && ... callables) -> auto
creates multiple tasks from a list of callable objects
auto composed_of(Taskflow& taskflow) -> Task
creates a module task from a taskflow
auto placeholder() -> Task
creates a placeholder task
template<typename C, std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr>
auto emplace(C&& callable) -> Task
creates a cudaFlow task on the caller's GPU device context
template<typename C, typename D, std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr>
auto emplace_on(C&& callable, D&& device) -> Task
creates a cudaFlow task on the given device
void linearize(std::vector<Task>& tasks)
adds adjacent dependency links to a linear list of tasks
void linearize(std::initializer_list<Task> tasks)
adds adjacent dependency links to a linear list of tasks
template<typename B, typename E, typename C>
auto for_each(B&& first, E&& last, C&& callable) -> Task
constructs a STL-styled parallel-for task
template<typename B, typename E, typename C, typename H = size_t>
auto for_each_guided(B&& beg, E&& end, C&& callable, H&& chunk_size = 1) -> Task
constructs a STL-styled parallel-for task using the guided partition algorithm
template<typename B, typename E, typename C, typename H = size_t>
auto for_each_dynamic(B&& beg, E&& end, C&& callable, H&& chunk_size = 1) -> Task
constructs a STL-styled parallel-for task using the dynamic partition algorithm
template<typename B, typename E, typename C, typename H = size_t>
auto for_each_static(B&& beg, E&& end, C&& callable, H&& chunk_size = 0) -> Task
constructs a STL-styled parallel-for task using the dynamic partition algorithm
template<typename B, typename E, typename S, typename C>
auto for_each_index(B&& first, E&& last, S&& step, C&& callable) -> Task
constructs an index-based parallel-for task
template<typename B, typename E, typename S, typename C, typename H = size_t>
auto for_each_index_guided(B&& beg, E&& end, S&& step, C&& callable, H&& chunk_size = 1) -> Task
constructs an index-based parallel-for task using the guided partition algorithm.
template<typename B, typename E, typename S, typename C, typename H = size_t>
auto for_each_index_dynamic(B&& beg, E&& end, S&& step, C&& callable, H&& chunk_size = 1) -> Task
constructs an index-based parallel-for task using the dynamic partition algorithm.
template<typename B, typename E, typename S, typename C, typename H = size_t>
auto for_each_index_static(B&& beg, E&& end, S&& step, C&& callable, H&& chunk_size = 0) -> Task
constructs an index-based parallel-for task using the static partition algorithm.
template<typename B, typename E, typename T, typename O>
auto reduce(B&& first, E&& last, T& init, O&& bop) -> Task
constructs a STL-styled parallel-reduce task
template<typename B, typename E, typename T, typename O, typename H = size_t>
auto reduce_guided(B&& first, E&& last, T& init, O&& bop, H&& chunk_size = 1) -> Task
constructs a STL-styled parallel-reduce task using the guided partition algorithm
template<typename B, typename E, typename T, typename O, typename H = size_t>
auto reduce_dynamic(B&& first, E&& last, T& init, O&& bop, H&& chunk_size = 1) -> Task
constructs a STL-styled parallel-reduce task using the dynamic partition algorithm
template<typename B, typename E, typename T, typename O, typename H = size_t>
auto reduce_static(B&& first, E&& last, T& init, O&& bop, H&& chunk_size = 0) -> Task
constructs a STL-styled parallel-reduce task using the static partition algorithm
template<typename B, typename E, typename T, typename BOP, typename UOP>
auto transform_reduce(B&& first, E&& last, T& init, BOP&& bop, UOP&& uop) -> Task
constructs a STL-styled parallel transform-reduce task
template<typename B, typename E, typename T, typename BOP, typename UOP, typename H = size_t>
auto transform_reduce_guided(B&& first, E&& last, T& init, BOP&& bop, UOP&& uop, H&& chunk_size = 1) -> Task
constructs a STL-styled parallel transform-reduce task using the guided partition algorithm
template<typename B, typename E, typename T, typename BOP, typename UOP, typename H = size_t>
auto transform_reduce_static(B&& first, E&& last, T& init, BOP&& bop, UOP&& uop, H&& chunk_size = 0) -> Task
constructs a STL-styled parallel transform-reduce task using the static partition algorithm
template<typename B, typename E, typename T, typename BOP, typename UOP, typename H = size_t>
auto transform_reduce_dynamic(B&& first, E&& last, T& init, BOP&& bop, UOP&& uop, H&& chunk_size = 1) -> Task
constructs a STL-styled parallel transform-reduce task using the dynamic partition algorithm
template<typename B, typename E, typename C>
auto sort(B&& first, E&& last, C&& cmp) -> Task
constructs a dynamic task to perform STL-styled parallel sort
template<typename B, typename E>
auto sort(B&& first, E&& last) -> Task
constructs a dynamic task to perform STL-styled parallel sort using the std::less<T> comparator, where T is the element type

Protected variables

Graph& _graph
associated graph object

Function documentation

template<typename C, std::enable_if_t<is_static_task_v<C>, void>* = nullptr>
Task tf::FlowBuilder::emplace(C&& callable)

creates a static task

Template parameters
C callable type constructible from std::function<void()>
Parameters
callable callable to construct a static task
Returns a tf::Task handle

The following example creates a static task.

tf::Task static_task = taskflow.emplace([](){});

Please refer to StaticTasking for details.

template<typename C, std::enable_if_t<is_dynamic_task_v<C>, void>* = nullptr>
Task tf::FlowBuilder::emplace(C&& callable)

creates a dynamic task

Template parameters
C callable type constructible from std::function<void(tf::Subflow&)>
Parameters
callable callable to construct a dynamic task
Returns a tf::Task handle

The following example creates a dynamic task (tf::Subflow) that spawns two static tasks.

tf::Task dynamic_task = taskflow.emplace([](tf::Subflow& sf){
  tf::Task static_task1 = sf.emplace([](){});
  tf::Task static_task2 = sf.emplace([](){});
});

Please refer to DynamicTasking for details.

template<typename C, std::enable_if_t<is_condition_task_v<C>, void>* = nullptr>
Task tf::FlowBuilder::emplace(C&& callable)

creates a condition task

Template parameters
C callable type constructible from std::function<int()>
Parameters
callable callable to construct a condition task
Returns a tf::Task handle

The following example creates an if-else block using one condition task and three static tasks.

tf::Taskflow taskflow;

auto [init, cond, yes, no] = taskflow.emplace(
 [] () { },
 [] () { return 0; },
 [] () { std::cout << "yes\n"; },
 [] () { std::cout << "no\n"; }
);

// executes yes if cond returns 0, or no if cond returns 1
cond.precede(yes, no);
cond.succeed(init);

Please refer to ConditionalTasking for details.

template<typename... C, std::enable_if_t<(sizeof...(C)> 1>
auto tf::FlowBuilder::emplace(C && ... callables)

creates multiple tasks from a list of callable objects

Template parameters
C callable types
1
Parameters
callables one or multiple callable objects constructible from each task category
Returns a tf::Task handle

The method returns a tuple of tasks each corresponding to the given callable target. You can use structured binding to get the return tasks one by one. The following example creates four static tasks and assign them to A, B, C, and D using structured binding.

auto [A, B, C, D] = taskflow.emplace(
  [] () { std::cout << "A"; },
  [] () { std::cout << "B"; },
  [] () { std::cout << "C"; },
  [] () { std::cout << "D"; }
);

Task tf::FlowBuilder::composed_of(Taskflow& taskflow)

creates a module task from a taskflow

Parameters
taskflow a taskflow object for the module
Returns a tf::Task handle

Please refer to ComposableTasking for details.

Task tf::FlowBuilder::placeholder()

creates a placeholder task

Returns a tf::Task handle

A placeholder task maps to a node in the taskflow graph, but it does not have any callable work assigned yet. A placeholder task is different from an empty task handle that does not point to any node in a graph.

// create a placeholder task with no callable target assigned
tf::Task placeholder = taskflow.placeholder(); 
assert(placeholder.empty() == false && placeholder.has_work() == false);

// create an empty task handle
tf::Task task;
assert(task.empty() == true);

// assign the task handle to the placeholder task
task = placeholder;
assert(task.empty() == false && task.has_work() == false);

template<typename C, std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr>
Task tf::FlowBuilder::emplace(C&& callable)

creates a cudaFlow task on the caller's GPU device context

Template parameters
C callable type constructible from std::function<void(tf::cudaFlow&)>
Returns a tf::Task handle

This method is equivalent to calling tf::FlowBuilder::emplace_on(callable, d) where d is the caller's device context. The following example creates a cudaFlow of two kernel tasks, task1 and task2, where task1 runs before task2.

taskflow.emplace([&](tf::cudaFlow& cf){
  // create two kernel tasks
  tf::cudaTask task1 = cf.kernel(grid1, block1, shm1, kernel1, args1);
  tf::cudaTask task2 = cf.kernel(grid2, block2, shm2, kernel2, args2);

  // kernel1 runs before kernel2
  task1.precede(task2);
});

Please refer to GPUTaskingcudaFlow and GPUTaskingcudaFlowCapturer for details.

template<typename C, typename D, std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr>
Task tf::FlowBuilder::emplace_on(C&& callable, D&& device)

creates a cudaFlow task on the given device

Template parameters
C callable type constructible from std::function<void(tf::cudaFlow&)>
D device type, either int or std::ref<int> (stateful)
Returns a tf::Task handle

The following example creates a cudaFlow of two kernel tasks, task1 and task2 on GPU 2, where task1 runs before task2

taskflow.emplace_on([&](tf::cudaFlow& cf){
  // create two kernel tasks
  tf::cudaTask task1 = cf.kernel(grid1, block1, shm1, kernel1, args1);
  tf::cudaTask task2 = cf.kernel(grid2, block2, shm2, kernel2, args2);

  // kernel1 runs before kernel2
  task1.precede(task2);
}, 2);

void tf::FlowBuilder::linearize(std::vector<Task>& tasks)

adds adjacent dependency links to a linear list of tasks

Parameters
tasks a vector of tasks

void tf::FlowBuilder::linearize(std::initializer_list<Task> tasks)

adds adjacent dependency links to a linear list of tasks

Parameters
tasks an initializer list of tasks

template<typename B, typename E, typename C>
Task tf::FlowBuilder::for_each(B&& first, E&& last, C&& callable)

constructs a STL-styled parallel-for task

Template parameters
B beginning iterator type
E ending iterator type
C callable type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
callable a callable object to apply to the dereferenced iterator
Returns a tf::Task handle

The task spawns a subflow that applies the callable object to each object obtained by dereferencing every iterator in the range [first, last). By default, we employ the guided partition algorithm with chunk size equal to one. This method is equivalent to the parallel execution of the following loop:

for(auto itr=first; itr!=last; itr++) {
  callable(*itr);
}

Arguments templated to enable stateful passing using std::reference_wrapper. The callable needs to take a single argument of the dereferenced iterator type.

Please refer to ParallelIterations for details.

template<typename B, typename E, typename C, typename H = size_t>
Task tf::FlowBuilder::for_each_guided(B&& beg, E&& end, C&& callable, H&& chunk_size = 1)

constructs a STL-styled parallel-for task using the guided partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
C callable type
H chunk size type
Parameters
beg iterator to the beginning (inclusive)
end iterator to the end (exclusive)
callable a callable object to apply to the dereferenced iterator
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow that applies the callable object to each object obtained by dereferencing every iterator in the range [beg, end). The runtime partitions the range into chunks of the given chunk size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper. The callable needs to take a single argument of the dereferenced iterator type.

Please refer to ParallelIterations for details.

template<typename B, typename E, typename C, typename H = size_t>
Task tf::FlowBuilder::for_each_dynamic(B&& beg, E&& end, C&& callable, H&& chunk_size = 1)

constructs a STL-styled parallel-for task using the dynamic partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
C callable type
H chunk size type
Parameters
beg iterator to the beginning (inclusive)
end iterator to the end (exclusive)
callable a callable object to apply to the dereferenced iterator
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow that applies the callable object to each object obtained by dereferencing every iterator in the range [beg, end). The runtime partitions the range into chunks of the given chunk size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper. The callable needs to take a single argument of the dereferenced iterator type.

Please refer to ParallelIterations for details.

template<typename B, typename E, typename C, typename H = size_t>
Task tf::FlowBuilder::for_each_static(B&& beg, E&& end, C&& callable, H&& chunk_size = 0)

constructs a STL-styled parallel-for task using the dynamic partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
C callable type
H chunk size type
Parameters
beg iterator to the beginning (inclusive)
end iterator to the end (exclusive)
callable a callable object to apply to the dereferenced iterator
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow that applies the callable object to each object obtained by dereferencing every iterator in the range [beg, end). The runtime partitions the range into chunks of the given chunk size, where each chunk is processed by a worker. When the given chunk size is zero, the runtime distributes the work evenly across workers.

Arguments are templated to enable stateful passing using std::reference_wrapper. The callable needs to take a single argument of the dereferenced iterator type.

Please refer to ParallelIterations for details.

template<typename B, typename E, typename S, typename C>
Task tf::FlowBuilder::for_each_index(B&& first, E&& last, S&& step, C&& callable)

constructs an index-based parallel-for task

Template parameters
B beginning index type (must be integral)
E ending index type (must be integral)
S step type (must be integral)
C callable type
Parameters
first index of the beginning (inclusive)
last index of the end (exclusive)
step step size
callable a callable object to apply to each valid index
Returns a tf::Task handle

The task spawns a subflow that applies the callable object to each index in the range [first, last) with the step size. By default, we employ the guided partition algorithm with chunk size equal to one.

This method is equivalent to the parallel execution of the following loop:

// case 1: step size is positive
for(auto i=first; i<last; i+=step) {
  callable(i);
}

// case 2: step size is negative
for(auto i=first, i>last; i+=step) {
  callable(i);
}

Arguments are templated to enable stateful passing using std::reference_wrapper. The callable needs to take a single argument of the integral index type.

Please refer to ParallelIterations for details.

template<typename B, typename E, typename S, typename C, typename H = size_t>
Task tf::FlowBuilder::for_each_index_guided(B&& beg, E&& end, S&& step, C&& callable, H&& chunk_size = 1)

constructs an index-based parallel-for task using the guided partition algorithm.

Template parameters
B beginning index type (must be integral)
E ending index type (must be integral)
S step type (must be integral)
C callable type
H chunk size type
Parameters
beg index of the beginning (inclusive)
end index of the end (exclusive)
step step size
callable a callable object to apply to each valid index
chunk_size chunk size (default 1)
Returns a tf::Task handle

The task spawns a subflow that applies the callable object to each index in the range [beg, end) with the step size. The runtime partitions the range into chunks of the given size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper. The callable needs to take a single argument of the integral index type.

Please refer to ParallelIterations for details.

template<typename B, typename E, typename S, typename C, typename H = size_t>
Task tf::FlowBuilder::for_each_index_dynamic(B&& beg, E&& end, S&& step, C&& callable, H&& chunk_size = 1)

constructs an index-based parallel-for task using the dynamic partition algorithm.

Template parameters
B beginning index type (must be integral)
E ending index type (must be integral)
S step type (must be integral)
C callable type
H chunk size type
Parameters
beg index of the beginning (inclusive)
end index of the end (exclusive)
step step size
callable a callable object to apply to each valid index
chunk_size chunk size (default 1)
Returns a tf::Task handle

The task spawns a subflow that applies the callable object to each index in the range [beg, end) with the step size. The runtime partitions the range into chunks of the given size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper. The callable needs to take a single argument of the integral index type.

Please refer to ParallelIterations for details.

template<typename B, typename E, typename S, typename C, typename H = size_t>
Task tf::FlowBuilder::for_each_index_static(B&& beg, E&& end, S&& step, C&& callable, H&& chunk_size = 0)

constructs an index-based parallel-for task using the static partition algorithm.

Template parameters
B beginning index type (must be integral)
E ending index type (must be integral)
S step type (must be integral)
C callable type
H chunk size type
Parameters
beg index of the beginning (inclusive)
end index of the end (exclusive)
step step size
callable a callable object to apply to each valid index
chunk_size chunk size (default 0)
Returns a tf::Task handle

The task spawns a subflow that applies the callable object to each index in the range [beg, end) with the step size. The runtime partitions the range into chunks of the given size, where each chunk is processed by a worker. When the given chunk size is zero, the runtime distributes the work evenly across workers.

Arguments are templated to enable stateful passing using std::reference_wrapper. The callable needs to take a single argument of the integral index type.

Please refer to ParallelIterations for details.

template<typename B, typename E, typename T, typename O>
Task tf::FlowBuilder::reduce(B&& first, E&& last, T& init, O&& bop)

constructs a STL-styled parallel-reduce task

Template parameters
B beginning iterator type
E ending iterator type
T result type
O binary reducer type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
init initial value of the reduction and the storage for the reduced result
bop binary operator that will be applied
Returns a tf::Task handle

The task spawns a subflow to perform parallel reduction over init and the elements in the range [first, last). The reduced result is store in init. The runtime partitions the range into chunks of the given chunk size, where each chunk is processed by a worker. By default, we employ the guided partition algorithm.

This method is equivalent to the parallel execution of the following loop:

for(auto itr=first; itr!=last; itr++) {
  init = bop(init, *itr);
}

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelReduction for details.

template<typename B, typename E, typename T, typename O, typename H = size_t>
Task tf::FlowBuilder::reduce_guided(B&& first, E&& last, T& init, O&& bop, H&& chunk_size = 1)

constructs a STL-styled parallel-reduce task using the guided partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
T result type
O binary reducer type
H chunk size type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
init initial value of the reduction and the storage for the reduced result
bop binary operator that will be applied
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow to perform parallel reduction over init and the elements in the range [first, last). The reduced result is store in init. The runtime partitions the range into chunks of size chunk_size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelReduction for details.

template<typename B, typename E, typename T, typename O, typename H = size_t>
Task tf::FlowBuilder::reduce_dynamic(B&& first, E&& last, T& init, O&& bop, H&& chunk_size = 1)

constructs a STL-styled parallel-reduce task using the dynamic partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
T result type
O binary reducer type
H chunk size type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
init initial value of the reduction and the storage for the reduced result
bop binary operator that will be applied
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow to perform parallel reduction over init and the elements in the range [first, last). The reduced result is store in init. The runtime partitions the range into chunks of size chunk_size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelReduction for details.

template<typename B, typename E, typename T, typename O, typename H = size_t>
Task tf::FlowBuilder::reduce_static(B&& first, E&& last, T& init, O&& bop, H&& chunk_size = 0)

constructs a STL-styled parallel-reduce task using the static partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
T result type
O binary reducer type
H chunk size type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
init initial value of the reduction and the storage for the reduced result
bop binary operator that will be applied
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow to perform parallel reduction over init and the elements in the range [first, last). The reduced result is store in init. The runtime partitions the range into chunks of size chunk_size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelReduction for details.

template<typename B, typename E, typename T, typename BOP, typename UOP>
Task tf::FlowBuilder::transform_reduce(B&& first, E&& last, T& init, BOP&& bop, UOP&& uop)

constructs a STL-styled parallel transform-reduce task

Template parameters
B beginning iterator type
E ending iterator type
T result type
BOP binary reducer type
UOP unary transformion type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
init initial value of the reduction and the storage for the reduced result
bop binary operator that will be applied in unspecified order to the results of uop
uop unary operator that will be applied to transform each element in the range to the result type
Returns a tf::Task handle

The task spawns a subflow to perform parallel reduction over init and the transformed elements in the range [first, last). The reduced result is store in init. The runtime partitions the range into chunks of the given chunk size, where each chunk is processed by a worker. By default, we employ the guided partition algorithm.

This method is equivalent to the parallel execution of the following loop:

for(auto itr=first; itr!=last; itr++) {
  init = bop(init, uop(*itr));
}

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelReduction for details.

template<typename B, typename E, typename T, typename BOP, typename UOP, typename H = size_t>
Task tf::FlowBuilder::transform_reduce_guided(B&& first, E&& last, T& init, BOP&& bop, UOP&& uop, H&& chunk_size = 1)

constructs a STL-styled parallel transform-reduce task using the guided partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
T result type
BOP binary reducer type
UOP unary transformion type
H chunk size type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
init initial value of the reduction and the storage for the reduced result
bop binary operator that will be applied in unspecified order to the results of uop
uop unary operator that will be applied to transform each element in the range to the result type
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow to perform parallel reduction over init and the transformed elements in the range [first, last). The reduced result is store in init. The runtime partitions the range into chunks of size chunk_size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelReduction for details.

template<typename B, typename E, typename T, typename BOP, typename UOP, typename H = size_t>
Task tf::FlowBuilder::transform_reduce_static(B&& first, E&& last, T& init, BOP&& bop, UOP&& uop, H&& chunk_size = 0)

constructs a STL-styled parallel transform-reduce task using the static partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
T result type
BOP binary reducer type
UOP unary transformion type
H chunk size type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
init initial value of the reduction and the storage for the reduced result
bop binary operator that will be applied in unspecified order to the results of uop
uop unary operator that will be applied to transform each element in the range to the result type
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow to perform parallel reduction over init and the transformed elements in the range [first, last). The reduced result is store in init. The runtime partitions the range into chunks of size chunk_size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelReduction for details.

template<typename B, typename E, typename T, typename BOP, typename UOP, typename H = size_t>
Task tf::FlowBuilder::transform_reduce_dynamic(B&& first, E&& last, T& init, BOP&& bop, UOP&& uop, H&& chunk_size = 1)

constructs a STL-styled parallel transform-reduce task using the dynamic partition algorithm

Template parameters
B beginning iterator type
E ending iterator type
T result type
BOP binary reducer type
UOP unary transformion type
H chunk size type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
init initial value of the reduction and the storage for the reduced result
bop binary operator that will be applied in unspecified order to the results of uop
uop unary operator that will be applied to transform each element in the range to the result type
chunk_size chunk size
Returns a tf::Task handle

The task spawns a subflow to perform parallel reduction over init and the transformed elements in the range [first, last). The reduced result is store in init. The runtime partitions the range into chunks of size chunk_size, where each chunk is processed by a worker.

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelReduction for details.

template<typename B, typename E, typename C>
Task tf::FlowBuilder::sort(B&& first, E&& last, C&& cmp)

constructs a dynamic task to perform STL-styled parallel sort

Template parameters
B beginning iterator type (random-accessible)
E ending iterator type (random-accessible)
C comparator type
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)
cmp comparison function object

The task spawns a subflow to parallelly sort elements in the range [first, last).

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelSort for details.

template<typename B, typename E>
Task tf::FlowBuilder::sort(B&& first, E&& last)

constructs a dynamic task to perform STL-styled parallel sort using the std::less<T> comparator, where T is the element type

Template parameters
B beginning iterator type (random-accessible)
E ending iterator type (random-accessible)
Parameters
first iterator to the beginning (inclusive)
last iterator to the end (exclusive)

The task spawns a subflow to parallelly sort elements in the range [first, last) using the std::less<T> comparator, where T is the dereferenced iterator type.

Arguments are templated to enable stateful passing using std::reference_wrapper.

Please refer to ParallelSort for details.