19 #include "../exception.h"
58 auto startTime = std::chrono::high_resolution_clock::now();
88 auto endTime = std::chrono::high_resolution_clock::now();
89 task.time = std::chrono::duration<float, std::milli>(endTime - startTime).count();
107 return (
int)
tasks.size();
118 const auto& it = std::find(
tasks.cbegin(),
tasks.cend(), holder);
119 if (it ==
tasks.cend())
121 return (
int) (it -
tasks.cbegin());
132 const auto& nextHolder = std::find(
tasks.cbegin(),
tasks.cend(), before);
133 if (nextHolder ==
tasks.cend())
134 throw RuntimeError(
"Reference task holder is not found in the task list");
141 const auto& pointer = std::find(
tasks.cbegin(),
tasks.cend(), target);
142 if (pointer ==
tasks.cend())
145 tasks.erase(pointer);
156 for (
auto& it :
tasks) {
157 switch (it->executionMode = it->getTask().getUsedDevices()) {
168 it->threadCount = it->getTask().getMaxThreads();
208 if (!(*currentTask)->getTask().process(
thread))
285 executionMode(holder.executionMode),
286 threadCount(holder.threadCount),
292 return &
left == &right;
Task: an operation that can be executed by multiple threads in parallel.
TaskDeviceRequirement
Specifies which device (CPU and/or GPU) is used to run the task.
@ GPU_OR_CPU
this task uses GPU if it is available, but CPU fallback is possible
@ CPU_ONLY
this task does not use GPU
@ GPU_ONLY
this task requires GPU, otherwise it cannot run
virtual void afterProcessing(ThreadIndex threadCount, GraphicPipeline *gpu, bool aborted)
Instruction called after the task is executed.
A task within a pipeline.
TaskHolder(AbstractTask &task)
Custom pipeline: a sequence of tasks to be executed as a whole.
bool removeTask(const TaskHolder &task)
Removes a task from the pipeline.
TaskDeviceRequirement getUsedDevices() const
Communicates devices (CPU and/or GPU) the task is run on.
void afterProcessing(ThreadIndex threadCount, GraphicPipeline *gpu, bool aborted)
Instruction called after the task is executed.
TaskHolder & getTask(int) const
Retrieves a task by its index.
int getTaskIndex(const TaskHolder &)
Retrieves task index if it is in the pipeline; returns -1 otherwise.
TaskHolder & insertTask(AbstractTask &task, const TaskHolder &before)
Inserts a task in a specified position of the pipeline before another task.
TaskHolder & addTask(AbstractTask &)
Adds a new task to the end of the pipeline.
bool processOnGPU(GraphicPipeline &gpu, TaskThread &thread)
Executes the task on GPU.
bool process(TaskThread &thread)
Executes the task on CPU within a given thread.
ThreadIndex getMaxThreads() const
Gives the upper limint on the number of threads the task may be performed by.
void measure()
Determines pipeline execution mode and required thread count.
virtual TaskHolder * createTaskHolder(AbstractTask &task)=0
void beforeProcessing(ThreadIndex threadCount, ProcessingTarget target, GraphicPipeline *gpu)
Instruction called before the task is executed.
Internal low-level GPU control API.
void afterProcessing(ThreadIndex threadCount, GraphicPipeline *gpu, bool aborted) override
Instruction called after the task is executed.
bool process(TaskThread &thread) override
Executes the task on CPU within a given thread.
void beforeProcessing(ThreadIndex threadCount, ProcessingTarget target, GraphicPipeline *gpu) override
Instruction called before the task is executed.
bool processOnGPU(GraphicPipeline &gpu, TaskThread &thread) override
Executes the task on GPU.
Interface managing the execution of a sequence of tasks.
virtual void synchronize()=0
Blocks until all the other threads running the same task reach the same point.
ThreadIndex currentThread() const
virtual bool isTaskAborted() const =0
Returns true if the task is asked to stop from outside.
const TaskHolder & getCurrentTask() const
std::vector< TaskHolder * > tasks
the list of tasks
AbstractTask::TaskDeviceRequirement getUsedDevices() const
void insertTask(TaskHolder *newbie, const TaskHolder *before)
std::mutex tasksAccess
task list access control
bool measured
if true, the execution mode and the thread count are determined
void addTask(TaskHolder *taskHolder)
void beforeProcessing(GraphicPipeline *gpu)
ThreadIndex maxThreadCount
bool allTasksAborted() const
Returns true if the current session is aborted.
bool removeTask(const TaskHolder *target)
void runTask()
Executes the pointed task.
void goToNextTask()
Goes to the next task in the list.
std::vector< TaskHolder * >::iterator currentTask
bool allTasksDone() const
Returns true if all tasks are done.
AbstractTask::TaskDeviceRequirement executionMode
TaskHolder & getCurrentTask()
Returns currently pointed task.
TaskHolder * getTask(int index)
void measure()
Determining execution mode (GPU or CPU) and thread count for each task.
int getTaskIndex(const TaskHolder *holder)
bool abort
if true, one of threads executing the current task caused its aborting
ThreadIndex getMaxThreads() const
bool process(GraphicPipeline *gpu, TaskThread &thread, CustomPipeline &pipeline)
Processing entry point.
#define BEATMUP_ASSERT_DEBUG(C)
bool operator==(const CustomRectangle< numeric > &lhs, const CustomRectangle< numeric > &rhs)
Checks whether two rectangles are actually the same.
unsigned char ThreadIndex
number of threads / thread index
CustomPoint< numeric > max(const CustomPoint< numeric > &a, const CustomPoint< numeric > &b)
return pipeline getTaskIndex * taskHolder
Beatmup::CustomPipeline::TaskHolder * newbie
return $pool getJavaReference & pipeline(index)
jlong jlong jint jint jint jint jint left
Beatmup::NNets::InferenceTask * task