Beatmup
inference_task.cpp
Go to the documentation of this file.
1
/*
2
Beatmup image and signal processing library
3
Copyright (C) 2020, lnstadrum
4
5
This program is free software: you can redistribute it and/or modify
6
it under the terms of the GNU General Public License as published by
7
the Free Software Foundation, either version 3 of the License, or
8
(at your option) any later version.
9
10
This program is distributed in the hope that it will be useful,
11
but WITHOUT ANY WARRANTY; without even the implied warranty of
12
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
GNU General Public License for more details.
14
15
You should have received a copy of the GNU General Public License
16
along with this program. If not, see <http://www.gnu.org/licenses/>.
17
*/
18
19
#include "
inference_task.h
"
20
21
using namespace
Beatmup
;
22
using namespace
NNets;
23
24
25
void
InferenceTask::connect
(
AbstractBitmap
& image,
AbstractOperation
& operation,
int
inputIndex) {
26
inputImages
[std::make_pair(&operation, inputIndex)] = ℑ
27
operation.
setInput
(image, inputIndex);
28
}
29
30
31
void
InferenceTask::beforeProcessing
(
ThreadIndex
threadCount,
ProcessingTarget
target,
GraphicPipeline
* gpu) {
32
for
(
auto
it :
inputImages
)
33
readLock
(gpu, it.second,
ProcessingTarget::GPU
);
34
model
.
prepare
(*gpu,
data
);
35
}
36
37
38
void
InferenceTask::afterProcessing
(
ThreadIndex
threadCount,
GraphicPipeline
* gpu,
bool
aborted) {
39
if
(gpu)
40
gpu->
flush
();
41
unlockAll
();
42
}
43
44
45
bool
InferenceTask::processOnGPU
(
GraphicPipeline
& gpu,
TaskThread
& thread) {
46
model
.
execute
(thread, &gpu);
47
return
true
;
48
}
49
50
51
bool
InferenceTask::process
(
TaskThread
& thread) {
52
model
.
execute
(thread,
nullptr
);
53
return
true
;
54
}
Beatmup::AbstractBitmap
A very basic class for any image.
Definition:
abstract_bitmap.h:87
Beatmup::BitmapContentLock::readLock
void readLock(GraphicPipeline *gpu, AbstractBitmap *bitmap, ProcessingTarget target)
Locks content of a bitmap for reading using a specific processing target device.
Definition:
content_lock.cpp:37
Beatmup::BitmapContentLock::unlockAll
void unlockAll()
Unlocks all the locked bitmaps unconditionally.
Definition:
content_lock.cpp:167
Beatmup::GraphicPipeline
Internal low-level GPU control API.
Definition:
pipeline.h:33
Beatmup::GraphicPipeline::flush
void flush()
Waits until all operations submitted to GPU are finished.
Definition:
pipeline.cpp:931
Beatmup::NNets::AbstractOperation
Abstract neural net operation (layer).
Definition:
operation.h:46
Beatmup::NNets::AbstractOperation::setInput
virtual void setInput(Storage::View &&storage, int index=0)
Definition:
operation.cpp:52
Beatmup::NNets::InferenceTask::inputImages
std::map< std::pair< AbstractOperation *, int >, AbstractBitmap * > inputImages
Definition:
inference_task.h:35
Beatmup::NNets::InferenceTask::connect
void connect(AbstractBitmap &image, AbstractOperation &operation, int inputIndex=0)
Connects an image to a specific operation input.
Definition:
inference_task.cpp:25
Beatmup::NNets::InferenceTask::afterProcessing
void afterProcessing(ThreadIndex threadCount, GraphicPipeline *gpu, bool aborted) override
Instruction called after the task is executed.
Definition:
inference_task.cpp:38
Beatmup::NNets::InferenceTask::process
bool process(TaskThread &thread) override
Executes the task on CPU within a given thread.
Definition:
inference_task.cpp:51
Beatmup::NNets::InferenceTask::beforeProcessing
void beforeProcessing(ThreadIndex threadCount, ProcessingTarget target, GraphicPipeline *gpu) override
Instruction called before the task is executed.
Definition:
inference_task.cpp:31
Beatmup::NNets::InferenceTask::data
ChunkCollection & data
Definition:
inference_task.h:44
Beatmup::NNets::InferenceTask::processOnGPU
bool processOnGPU(GraphicPipeline &gpu, TaskThread &thread) override
Executes the task on GPU.
Definition:
inference_task.cpp:45
Beatmup::NNets::InferenceTask::model
Model & model
Definition:
inference_task.h:45
Beatmup::NNets::Model::prepare
virtual void prepare(GraphicPipeline &gpu, ChunkCollection &data)
Prepares all operations: reads the model data from chunks and builds GPU programs.
Definition:
model.cpp:143
Beatmup::NNets::Model::execute
void execute(TaskThread &thread, GraphicPipeline *gpu)
Runs the inference.
Definition:
model.cpp:339
Beatmup::TaskThread
Thread executing tasks.
Definition:
parallelism.h:154
inference_task.h
Beatmup
Definition:
basic_types.h:22
Beatmup::ThreadIndex
unsigned char ThreadIndex
number of threads / thread index
Definition:
parallelism.h:68
Beatmup::ProcessingTarget
ProcessingTarget
Definition:
basic_types.h:55
Beatmup::GPU
@ GPU
Definition:
basic_types.h:55
core
nnets
inference_task.cpp
Generated on Tue Nov 21 2023 13:54:27 for Beatmup by
1.9.1