Beatmup
model.cpp
Go to the documentation of this file.
1 /*
2  Beatmup image and signal processing library
3  Copyright (C) 2020, lnstadrum
4 
5  This program is free software: you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation, either version 3 of the License, or
8  (at your option) any later version.
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License
16  along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18 
19 #include "model.h"
20 #include "../utils/bitset.h"
21 #include <sstream>
22 
23 using namespace Beatmup;
24 using namespace NNets;
25 
26 
27 Model::Model(Context& context, std::initializer_list<AbstractOperation*> ops):
28  ProgramBank(context),
29  profiler(nullptr), ready(false),
30  ops(ops.begin(), ops.end())
31 {
32  // establish feedforward connections
33  for (size_t i = 1; i < this->ops.size(); ++i)
34  addConnection(*this->ops[i - 1], *this->ops[i]);
35 }
36 
37 
38 Model::Model(Context& context): Model(context, {}) {}
39 
41  for (auto op : ops)
42  op->disconnect();
43  freeMemory();
44 }
45 
46 
47 void Model::append(AbstractOperation* newOp, bool connect) {
48  for (auto op : ops) {
49  if (op == newOp)
50  throw RuntimeError("Cannot add operation " + newOp->getName() + " to the model: already added");
51  else
52  if (op->getName() == newOp->getName())
53  throw RuntimeError("Cannot add operation " + newOp->getName() + " to the model: an operation with the same exists in the model");
54  }
55  ops.push_back(newOp);
56  if (connect)
57  addConnection(*ops[ops.size() - 2], *ops.back(), 0, 0, 0);
58  ready = false;
59 }
60 
61 
62 void Model::append(std::initializer_list<AbstractOperation*> newOps, bool connect) {
63  for (auto op : newOps)
64  append(op, connect);
65 }
66 
67 
68 void Model::addOperation(const std::string& opName, AbstractOperation* newOp) {
69  auto it = std::find_if(ops.begin(), ops.end(), [&opName](AbstractOperation* op){ return op->getName() == opName; });
70  if (it == ops.end())
71  throw InvalidArgument("Cannot find operation " + opName);
72  ops.insert(it, newOp);
73 }
74 
75 
77  auto it = std::find(ops.begin(), ops.end(), &op);
78  if (it == ops.end())
79  throw InvalidArgument("Operation " + op.getName() + " is not in the model");
80  ops.insert(it, newOp);
81 }
82 
83 
84 void Model::addConnection(const std::string& sourceOpName, const std::string& destOpName, int output, int input, int shuffle) {
85  auto& source = getOperation(sourceOpName);
86  auto& dest = getOperation(destOpName);
87  addConnection(source, dest, output, input, shuffle);
88 }
89 
90 
91 void Model::addConnection(AbstractOperation& source, AbstractOperation& dest, int output, int input, int shuffle) {
92  RuntimeError::check(0 <= output && output < source.getOutputCount(),
93  "Operation " + source.getName() + " does not have output #" + std::to_string(output));
94  RuntimeError::check(0 <= input && input < dest.getInputCount(),
95  "Operation " + dest.getName() + " does not have input #" + std::to_string(input));
96  connections.emplace(&source, Connection{ &dest, output, input, shuffle });
97  ready = false;
98 }
99 
100 
101 void Model::addOutput(const std::string& opName, int output) {
102  auto op = (*this)[opName];
103  auto outputs = userOutputs.equal_range(op);
104  for (auto i = outputs.first; i != outputs.second; ++i)
105  if (i->second.index == output)
106  // already added
107  return;
108  userOutputs.emplace(op, UserOutput{ output });
109  ready = false;
110 }
111 
112 
113 void Model::addOutput(const AbstractOperation& operation, int output) {
114  RuntimeError::check(isOperationInModel(operation), "Operation " + operation.getName() + " is not in the model");
115  auto outputs = userOutputs.equal_range(&operation);
116  for (auto i = outputs.first; i != outputs.second; ++i)
117  if (i->second.index == output)
118  // already added
119  return;
120  userOutputs.emplace(&operation, UserOutput{ output });
121  ready = false;
122 }
123 
124 
125 const float* Model::getOutputData(size_t& numSamples, const std::string& operation, int output) const {
126  return getOutputData(numSamples, *(*this)[operation], output);
127 }
128 
129 
130 const float* Model::getOutputData(size_t& numSamples, const AbstractOperation& operation, int output) const {
131  auto outputs = userOutputs.equal_range(&operation);
132  for (auto i = outputs.first; i != outputs.second; ++i)
133  if (i->second.index == output) {
134  numSamples = i->second.data.size();
135  return i->second.data.data();
136  }
137 
138  numSamples = 0;
139  return nullptr;
140 }
141 
142 
144  if (ready)
145  return;
146  freeMemory();
147 
148  std::map<Storage*, std::vector<AbstractOperation*>> refs;
149  // Contains ops that use a specific storage as input, meaning that it cannot be reused elsewhere.
150  // If no ops refer a storage, in can be recycled.
151 
152  // find input depth capping
153  // If too many channels are sampled by an op having multiple inputs, its input storages will have reserved channels.
154  const int sampledChannelsLimit = 4 * gpu.getLimit(GraphicPipeline::Limit::TEXTURE_IMAGE_UNITS);
155  std::map<AbstractOperation*, int> sampledChannels; // op => number of sampled channels
156  for (auto conn : connections) {
157  auto* op = conn.second.dest;
158  // get the number of sampled channels
159  int min, max;
160  op->getSampledChannels(conn.second.input, min, max);
161  // cap the maximum: a storage will not have more channels than the limit anyway
162  max = std::min(max, sampledChannelsLimit);
163  // add to input channels
164  sampledChannels[op] += max;
165  }
166 
167  // loop through connected ops
168  data.open();
169  preparingProgress.reset(ops.size());
170  for (auto src : ops) {
171  std::vector<Beatmup::Object*> outputs(src->getOutputCount(), nullptr); // src output index => storage/vector bound to the output
172  std::vector<int> paddings(src->getOutputCount(), 0); // src output index => max padding over all connections
173  Bitset connectedOutputs(src->getOutputCount(), false);
174 
175  // loop over connections to find max paddings per output
176  auto connections = this->connections.equal_range(src);
177  for (auto i = connections.first; i != connections.second; ++i) {
178  const auto& conn = i->second;
179  paddings[conn.output] = std::max(paddings[conn.output], conn.dest->getInputPadding(conn.input));
180  }
181 
182  // loop over connections
183  for (auto i = connections.first; i != connections.second; ++i) {
184  const auto& conn = i->second;
185  auto* dst = conn.dest;
186  connectedOutputs.set(conn.output);
187 
188  if (outputs[conn.output])
189  RuntimeError::check(src->acceptsStorageOutput(conn.output) ^ src->acceptsVectorOutput(conn.output) ^ src->acceptsTextureOutput(conn.output),
190  "Operation output accepting different types can only have a single connection");
191  // To avoid output type mismatch when connecting second time
192 
193  // if a regular Storage is accepted by both source and destination
194  if (src->acceptsStorageOutput(conn.output) && dst->acceptsStorageInput(conn.input)) {
195  const Size size = src->getOutputSize(conn.output);
196  Storage* storage = nullptr;
197 
198  // check if the output storage is already allocated
199  if (outputs[conn.output]) {
200  storage = static_cast<Storage*>(outputs[conn.output]);
201  refs[storage].push_back(dst);
202  }
203 
204  else {
205  // decide on reserved depth (if capping)
206  int depthCapping = 0;
207  if (sampledChannels[dst] > sampledChannelsLimit) {
208  // the op exceeds the limit
209  int min, max;
210  dst->getSampledChannels(conn.input, min, max);
211  const int cappingMargin = std::min(sampledChannelsLimit, size[2]) - min; // this is how much we can cap at the current input
212  if (cappingMargin > 0) {
213  depthCapping = std::min(cappingMargin, sampledChannels[dst] - sampledChannelsLimit);
214  // reduce the excess
215  sampledChannels[dst] -= depthCapping;
216  }
217  }
218 
219  // try to recycle an existing storage first
220  for (auto& i : refs) {
221  auto candidate = i.first;
222  auto& users = i.second;
223  const int reservedDepth = sampledChannelsLimit - 4 * candidate->getNumberOfTextures();
224  // check if (1) size matches, (2) padding is sufficient, (3) reserved depth matches the number of channels to cap or no capping
225  if (candidate->getSize() == size && candidate->getPadding() >= dst->getInputPadding(conn.input) && (reservedDepth == depthCapping || depthCapping == 0)
226  && users.empty())
227  {
228  // found!
229  storage = candidate;
230  users.push_back(dst);
231  break;
232  }
233  if (storage)
234  break;
235  }
236 
237  // no matching storage found, allocate a new one
238  if (!storage) {
239  storage = (size[0] == 1 && size[1] == 1) ?
240  // allocate flat storage if the output size is of 1x1 pixels
241  &allocateFlatStorage(gpu, size[2]) :
242  &allocateStorage(gpu,
243  size,
244  src->usesGpu(), !src->usesGpu(),
245  paddings[conn.output],
246  depthCapping
247  );
248  refs.emplace(storage, std::vector<AbstractOperation*>{ dst });
249  }
250 
251  // mark output as allocated
252  outputs[conn.output] = storage;
253  }
254 
255  // connect
256  src->setOutput(*storage, conn.output);
257  if (conn.shuffle > 0)
258  dst->setInput(Storage::View(*storage, conn.shuffle), conn.input);
259  else
260  dst->setInput(*storage, conn.input);
261  }
262 
263  // if a Vector is accepted
264  else if (src->acceptsVectorOutput(conn.output) && dst->acceptsVectorInput(conn.input)) {
265  RuntimeError::check(conn.shuffle == 0, "Cannot shuffle vector");
266  GL::Vector* vector;
267 
268  // check if the output storage is already allocated
269  if (outputs[conn.output])
270  vector = static_cast<GL::Vector*>(outputs[conn.output]);
271  else {
272  vector = &allocateVector(gpu, src->getOutputSize(conn.output).volume());
273  outputs[conn.output] = vector;
274  }
275 
276  // connect
277  src->setOutput(*vector, conn.output);
278  dst->setInput(*vector, conn.input);
279  }
280 
281  // if a texture is accepted
282  else if (src->acceptsTextureOutput(conn.output) && dst->acceptsTextureInput(conn.input)) {
283  RuntimeError::check(conn.shuffle == 0, "Cannot shuffle texture");
284  InternalBitmap* texture;
285 
286  // check if the output storage is already allocated
287  if (outputs[conn.output])
288  texture = static_cast<InternalBitmap*>(outputs[conn.output]);
289  else
290  outputs[conn.output] = texture = &allocateTexture(gpu, src->getOutputSize(conn.output));
291 
292  // connect
293  src->setOutput(*texture, conn.output);
294  dst->setInput(*texture, conn.input);
295  }
296 
297  else
298  throw RuntimeError("Cannot connect " + src->getName() + " (output #" + std::to_string(conn.output) + ") "
299  "to " + dst->getName() + " (input #" + std::to_string(conn.input) + "): storage type mismatch");
300  }
301 
302  // allocate user outputs if not yet
303  auto userOutputs = this->userOutputs.equal_range(src);
304  for (auto i = userOutputs.first; i != userOutputs.second; ++i) {
305  int idx = i->second.index;
306  if (idx >= src->getOutputCount())
307  throw InvalidArgument("Operation " + src->getName() + " does not have output #" + std::to_string(idx));
308  if (!connectedOutputs[idx])
309  if (src->acceptsStorageOutput(idx)) {
310  src->setOutput(allocateStorage(gpu, src->getOutputSize(idx), src->usesGpu(), !src->usesGpu()), idx);
311  }
312  else if (src->acceptsVectorOutput(idx)) {
313  src->setOutput(allocateVector(gpu, src->getOutputSize(idx).volume()), idx);
314  }
315  }
316 
317  // prepare operation
318  src->prepare(gpu, data, *this);
319 
320  // remove references to storages used by the current operation. This allows their reuse in other connections.
321  for (auto& i : refs) {
322  auto& users = i.second;
323  for (auto op = users.begin(); op != users.end(); )
324  if (*op == src)
325  users.erase(op);
326  else
327  ++op;
328  }
329 
330  // advance the progress bar
332  }
333 
334  data.close();
335  ready = true;
336 }
337 
338 
340  if (gpu)
342 
343  // reset the progress tracker
344  inferenceProgress.reset(ops.size());
345 
346  // loop through ops
347  for (auto op : ops) {
348  if (thread.isTaskAborted())
349  return;
350 
351  // start profiling
352  if (thread.isManaging() && profiler)
353  (*profiler)(op->getName());
354 
355  // run operation
356  try {
357  if (gpu)
358  op->execute(thread, *gpu);
359  else
360  op->execute(thread);
361  } catch (const std::exception& ex) {
362  throw InferenceTimeError(*op, ex);
363  }
364 
365  // get user outputs
366  auto userOutputs = this->userOutputs.equal_range(op);
367  for (auto it = userOutputs.first; it != userOutputs.second; ++it) {
368  int idx = it->second.index;
369  auto& data = it->second.data;
370  if (gpu)
371  if (op->acceptsStorageOutput(idx)) {
372  // get data pointer from storage
373  auto view = op->getOutput(idx);
374  if (!view.getStorage().isUpToDate(ProcessingTarget::CPU))
375  view.getStorage().pull(*gpu);
376 
377  // copy to the vector
378  Storage::Scanner scan(view);
379  scan.move(0, 0);
380  data.resize(view.getSize().volume());
381  for (auto it = data.begin(); it != data.end(); it += view.getDepth()) {
382  scan.fill(it, data.end());
383  ++scan;
384  }
385  }
386  else if (op->acceptsVectorOutput(idx)) {
387  GL::Vector* vector;
388  op->getOutput(vector, idx);
389  vector->fetch(*gpu, data);
390  }
391  }
392 
393  if (thread.isManaging()) {
394  // stop profiler
395  if (profiler) {
396  gpu->flush(); // wait till GPU is done
397  profiler->lap();
398  }
399 
400  // increase inference progress
402  }
403  }
404 }
405 
406 
407 bool Model::isOperationInModel(const AbstractOperation& operation) const {
408  for (auto op : ops)
409  if (op == &operation)
410  return true;
411  return false;
412 }
413 
414 
416  for (auto storage : storages)
417  delete storage;
418  storages.clear();
419  for (auto vector : vectors)
420  delete vector;
421  vectors.clear();
422  for (auto texture : textures)
423  delete texture;
424  textures.clear();
425 }
426 
427 
428 Storage& Model::allocateStorage(GraphicPipeline& gpu, const Size size, bool forGpu, bool forCpu, const int pad, const int reservedDepth) {
429  Storage* storage = new Storage(context, gpu, size, pad, reservedDepth);
430  if (forGpu)
431  storage->allocate(gpu);
432  if (forCpu)
433  storage->allocate();
434  storages.push_back(storage);
435  return *storage;
436 }
437 
438 
440  Storage* storage = new Storage(context, gpu, Size(1, 1, size));
441  storage->allocate(gpu);
442  storages.push_back(storage);
443  return *storage;
444 }
445 
446 
449 #ifdef BEATMUP_OPENGLVERSION_GLES20
451 #else
453 #endif
454  GL::Vector* vector = new GL::Vector(context, gpu, size, format);
455  vectors.push_back(vector);
456  return *vector;
457 }
458 
459 
462  switch (size.getDepth()) {
463  case 1:
465  break;
466  case 3:
468  break;
469  case 4:
471  break;
472  default:
473  throw InvalidArgument("Unsupported depth: " + std::to_string(size.getDepth()));
474  }
475  textures.push_back(new InternalBitmap(context, pixelFormat, size.getWidth(), size.getHeight()));
476  return *textures.back();
477 }
478 
479 
480 bool Model::isPreceding(const AbstractOperation& first, const AbstractOperation& second) const {
481  for (size_t firstIdx = 0; firstIdx < ops.size(); ++firstIdx)
482  if (ops[firstIdx] == &first) {
483  for (size_t secondIdx = firstIdx + 1; secondIdx < ops.size(); ++secondIdx)
484  if (ops[secondIdx] == &second)
485  return true;
486  return false;
487  }
488  return false;
489 }
490 
491 
492 AbstractOperation* Model::operator[](const std::string& operationName) {
493  for (auto op : ops)
494  if (op->getName() == operationName)
495  return op;
496  throw InvalidArgument("Operation not found: " + operationName);
497 }
498 
499 
500 const AbstractOperation* Model::operator[](const std::string& operationName) const {
501  for (auto op : ops)
502  if (op->getName() == operationName)
503  return op;
504  throw InvalidArgument("Operation not found: " + operationName);
505 }
506 
507 
508 unsigned long Model::countMultiplyAdds() const {
509  unsigned long result = 0;
510  for (auto op : ops)
511  result += op->countMultiplyAdds();
512  return result;
513 }
514 
515 
516 unsigned long Model::countTexelFetches() const {
517  unsigned long result = 0;
518  for (auto op : ops)
519  result += op->countTexelFetches();
520  return result;
521 }
522 
523 
524 size_t Model::getMemorySize() const {
525  size_t size = 0;
526  for (auto& entry : storages)
527  size += entry->getMemorySize();
528  for (auto& entry : vectors)
529  size += entry->getMemorySize();
530  for (auto& entry : textures)
531  size += entry->getMemorySize();
532  return size;
533 }
534 
535 
536 Listing Model::serialize() const {
537  /** \page NNetsConnectionsSerialization Connections serialization
538  Every connection is serialized in a single block in \c connections part.
539 
540  Example:
541  \code{yaml}
542  - from: source operation name
543  to: destination operation name
544  from_output: 0 # output number of the source operation, defaults to 0
545  to_input: 0 # input number of the destination operation, defaults to 0
546  shuffle: 1 # shuffling step, defaults to 1
547  \endcode
548  Shuffling step description is given \ref NNetsShufflingExplained "here".
549  */
550  Listing listing;
551 
552  // serialize operations
553  for (const auto& op : ops)
554  listing.emplace("ops", op->serialize());
555 
556  // serialize connections
557  for (const auto& conn : connections) {
558  const auto& info = conn.second;
559  Listing::Block block;
560  block.set("from", conn.first->getName());
561  block.set("to", info.dest->getName());
562  if (info.output > 0)
563  block.set("from_output", info.output);
564  if (info.input > 0)
565  block.set("to_input", info.input);
566  if (info.shuffle > 0)
567  block.set("shuffle", info.shuffle);
568  listing.emplace("connections", std::move(block));
569  }
570 
571  // in case if no connections, add empty block
572  if (connections.empty())
573  listing.emplace("connections", {});
574 
575  return listing;
576 }
577 
578 
579 std::string Model::serializeToString() const {
580  Listing listing(serialize());
581  std::stringstream strstr;
582  listing.printOut(strstr);
583  return strstr.str();
584 }
585 
586 
588  Exception("Error in %s: %s", op.getName().c_str(), ex.what())
589 {}
A set of boolean flags.
Definition: bitset.h:30
void set(size_t i, bool value=true)
Definition: bitset.h:67
A key-value pair set storing pieces of arbitrary data (chunks) under string keys.
Definition: chunkfile.h:36
virtual void close()=0
Closes the collection after a reading session.
virtual void open()=0
Opens the collection to read chunks from it.
Basic class: task and memory management, any kind of static data.
Definition: context.h:59
Base class for all exceptions.
Definition: exception.h:37
Real-valued vector usable by GPU.
void fetch(GraphicPipeline &gpu, std::vector< float > &output) const
Grabs vector values back from GPU to user memory.
Format
Vector data format.
@ FIXED16
16 bit per element
@ FLOAT
32 bit per element, floating point
Internal low-level GPU control API.
Definition: pipeline.h:33
void switchMode(Mode mode)
Switches GPU mode.
Definition: pipeline.cpp:941
int getLimit(Limit limit) const
Definition: pipeline.cpp:936
@ INFERENCE
Textures are feature maps computed in fragment shaders.
@ TEXTURE_IMAGE_UNITS
maximum number of texture units per fragment shader
void flush()
Waits until all operations submitted to GPU are finished.
Definition: pipeline.cpp:931
Bitmap whose memory is managed by the Beatmup engine.
Set of key-value pairs.
Definition: listing.h:46
void set(const std::string &key, T value)
Sets a value for a specific key.
Parser of simple YAML-like listings.
Definition: listing.h:40
void printOut(std::ostream &stream)
Prints out the listing to an output stream.
Definition: listing.cpp:184
void emplace(const std::string &key, Block &&block)
Adds a block to a chapter.
Definition: listing.cpp:200
Abstract neural net operation (layer).
Definition: operation.h:46
virtual int getOutputCount() const
Returns number of operation outputs.
Definition: operation.h:135
virtual int getInputCount() const
Returns number of operation inputs.
Definition: operation.h:129
std::string getName() const
Definition: operation.h:242
Wrapper for exceptions occuring during the model inference.
Definition: model.h:357
InferenceTimeError(const AbstractOperation &op, const std::exception &ex)
Definition: model.cpp:587
Neural net model.
Definition: model.h:92
size_t getMemorySize() const
Returns the amount of texture memory in bytes currently allocated by the model to run the inference.
Definition: model.cpp:524
Storage & allocateFlatStorage(GraphicPipeline &gpu, const int size)
Allocates a new flat storage.
Definition: model.cpp:439
bool ready
if true, ops are connected to each other and storages are allocated
Definition: model.h:125
std::vector< AbstractOperation * > ops
model operations
Definition: model.h:122
void freeMemory()
Frees all allocated storages.
Definition: model.cpp:415
std::multimap< const AbstractOperation *, UserOutput > userOutputs
operation => user output mapping
Definition: model.h:114
OperationClass & getOperation(const std::string &operationName)
Retrieves an operation by its name.
Definition: model.h:303
unsigned long countTexelFetches() const
Provides an estimation of the total number of texels fetched by all the operations in the model per i...
Definition: model.cpp:516
Storage & allocateStorage(GraphicPipeline &gpu, const Size size, bool forGpu=true, bool forCpu=false, const int pad=0, const int reservedChannels=0)
Allocates a new storage.
Definition: model.cpp:428
ProgressTracking inferenceProgress
inference progress
Definition: model.h:124
Profiler * profiler
pointer to a Profiler attached to the model
Definition: model.h:119
void addConnection(AbstractOperation &source, AbstractOperation &dest, int output=0, int input=0, int shuffle=0)
Definition: model.cpp:91
Listing serialize() const
Returns serialized representation of the model as a Listing.
ProgressTracking preparingProgress
model preparation progress
Definition: model.h:123
void addOutput(const std::string &operation, int output=0)
Enables reading output data from the model memory through getOutputData().
Definition: model.cpp:101
std::vector< Storage * > storages
allocated storages used during the inference
Definition: model.h:116
bool isOperationInModel(const AbstractOperation &operation) const
Checks if a specific operation makes part of the model.
Definition: model.cpp:407
unsigned long countMultiplyAdds() const
Provides an estimation of the number of multiply-adds characterizing the model complexity.
Definition: model.cpp:508
GL::Vector & allocateVector(GraphicPipeline &gpu, const int size)
Allocates a vector that can be used as operation input or output.
Definition: model.cpp:447
Model(Context &context, std::initializer_list< AbstractOperation * > ops)
Instantiates a model from a list of operations interconnecting them in a feedforward fashion.
Definition: model.cpp:27
std::multimap< const AbstractOperation *, Connection > connections
source operation => connection descriptor mapping
Definition: model.h:113
virtual void prepare(GraphicPipeline &gpu, ChunkCollection &data)
Prepares all operations: reads the model data from chunks and builds GPU programs.
Definition: model.cpp:143
std::string serializeToString() const
Returns serialized representation of the model as a string.
Definition: model.cpp:579
AbstractOperation * operator[](const std::string &operationName)
Definition: model.cpp:492
void execute(TaskThread &thread, GraphicPipeline *gpu)
Runs the inference.
Definition: model.cpp:339
std::vector< InternalBitmap * > textures
allocated images used during the inference
Definition: model.h:118
void addOperation(const std::string &opName, AbstractOperation *newOp)
Adds a new operation to the model before another operation in the execution order.
Definition: model.cpp:68
bool isPreceding(const AbstractOperation &first, const AbstractOperation &second) const
Checks whether an operation goes before another operation in the model according the ops execution or...
Definition: model.cpp:480
std::vector< GL::Vector * > vectors
allocated vectors used during the inference
Definition: model.h:117
InternalBitmap & allocateTexture(GraphicPipeline &gpu, const Size size)
Allocates a texture that can be used as operation input or output.
Definition: model.cpp:460
void append(AbstractOperation *newOp, bool connect=false)
Adds a new operation to the model.
Definition: model.cpp:47
const float * getOutputData(size_t &numSamples, const std::string &operation, int output=0) const
Reads data from the model memory.
Definition: model.cpp:125
Operation 3D input/output size.
Definition: storage.h:37
Scans a storageview in RAM for further computations on CPU.
Definition: storage.h:466
void move(int x, int y)
Sets the pointer to a specific spatial position.
Definition: storage.cpp:663
void fill(T begin, T limit)
Extracts the content of feature maps at the current position.
Definition: storage.h:508
Maps a 3D tensor onto a storage.
Definition: storage.h:308
3D tensor stored in a set of textures.
Definition: storage.h:126
void allocate(GraphicPipeline &gpu)
Allocates the storage in GPU memory.
Definition: storage.cpp:167
void reset(unsigned int max)
Resets the progress to zero.
static void check(const bool condition, const std::string &message)
Definition: exception.h:64
Thread executing tasks.
Definition: parallelism.h:154
bool isManaging() const
Definition: parallelism.h:172
virtual bool isTaskAborted() const =0
Returns true if the task is asked to stop from outside.
@ SingleByte
single channel of 8 bits per pixel (like grayscale), unsigned integer values
@ QuadByte
4 channels of 8 bits per pixel (like RGBA), unsigned integer values
@ TripleByte
3 channels of 8 bits per pixel (like RGB), unsigned integer values
std::string to_string(Beatmup::NNets::ActivationFunction function)
CustomPoint< numeric > min(const CustomPoint< numeric > &a, const CustomPoint< numeric > &b)
Definition: geometry.h:724
CustomPoint< numeric > max(const CustomPoint< numeric > &a, const CustomPoint< numeric > &b)
Definition: geometry.h:728
Connection descriptor.
Definition: model.h:98
A user-defined output descriptor.
Definition: model.h:108
JNIEnv jobject jint format
jlong jint jint jint jint pixelFormat
jlong jint jint end
Beatmup::IntPoint result
jlong jobject size
return(jlong) new Beatmup jlong jstring src
jlong jint op
JNIEnv jlong jobject jstring opName