Beatmup
inference_task.cpp
Go to the documentation of this file.
1 /*
2  Beatmup image and signal processing library
3  Copyright (C) 2020, lnstadrum
4 
5  This program is free software: you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation, either version 3 of the License, or
8  (at your option) any later version.
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License
16  along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18 
19 #include "inference_task.h"
20 
21 using namespace Beatmup;
22 using namespace NNets;
23 
24 
25 void InferenceTask::connect(AbstractBitmap& image, AbstractOperation& operation, int inputIndex) {
26  inputImages[std::make_pair(&operation, inputIndex)] = &image;
27  operation.setInput(image, inputIndex);
28 }
29 
30 
32  for (auto it : inputImages)
33  readLock(gpu, it.second, ProcessingTarget::GPU);
34  model.prepare(*gpu, data);
35 }
36 
37 
38 void InferenceTask::afterProcessing(ThreadIndex threadCount, GraphicPipeline* gpu, bool aborted) {
39  if (gpu)
40  gpu->flush();
41  unlockAll();
42 }
43 
44 
46  model.execute(thread, &gpu);
47  return true;
48 }
49 
50 
52  model.execute(thread, nullptr);
53  return true;
54 }
A very basic class for any image.
void readLock(GraphicPipeline *gpu, AbstractBitmap *bitmap, ProcessingTarget target)
Locks content of a bitmap for reading using a specific processing target device.
void unlockAll()
Unlocks all the locked bitmaps unconditionally.
Internal low-level GPU control API.
Definition: pipeline.h:33
void flush()
Waits until all operations submitted to GPU are finished.
Definition: pipeline.cpp:931
Abstract neural net operation (layer).
Definition: operation.h:46
virtual void setInput(Storage::View &&storage, int index=0)
Definition: operation.cpp:52
std::map< std::pair< AbstractOperation *, int >, AbstractBitmap * > inputImages
void connect(AbstractBitmap &image, AbstractOperation &operation, int inputIndex=0)
Connects an image to a specific operation input.
void afterProcessing(ThreadIndex threadCount, GraphicPipeline *gpu, bool aborted) override
Instruction called after the task is executed.
bool process(TaskThread &thread) override
Executes the task on CPU within a given thread.
void beforeProcessing(ThreadIndex threadCount, ProcessingTarget target, GraphicPipeline *gpu) override
Instruction called before the task is executed.
bool processOnGPU(GraphicPipeline &gpu, TaskThread &thread) override
Executes the task on GPU.
virtual void prepare(GraphicPipeline &gpu, ChunkCollection &data)
Prepares all operations: reads the model data from chunks and builds GPU programs.
Definition: model.cpp:143
void execute(TaskThread &thread, GraphicPipeline *gpu)
Runs the inference.
Definition: model.cpp:339
Thread executing tasks.
Definition: parallelism.h:154
unsigned char ThreadIndex
number of threads / thread index
Definition: parallelism.h:68
ProcessingTarget
Definition: basic_types.h:55