Skip to content
This repository was archived by the owner on Jul 17, 2024. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@
#include <algorithm>
#include <numeric>
#include <opencv2/opencv.hpp>
#include <stdlib.h>
#include <string>
#include <vector>
#include <stdlib.h>

#include "model.h"

Expand Down
7 changes: 6 additions & 1 deletion interfaces/common/include/ModelInterface.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,7 @@ class SourceNode : public Node
SourceNode() = default;
SourceNode(ell::model::SourceNodeBase* other, std::shared_ptr<ell::model::Model> model);
ell::model::SourceNodeBase* GetSourceNode() const;

private:
ell::model::SourceNodeBase* _sourceNode;
#endif
Expand All @@ -235,6 +236,7 @@ class SinkNode : public Node
SinkNode() = default;
SinkNode(ell::model::SinkNodeBase* other, std::shared_ptr<ell::model::Model> model);
ell::model::SinkNodeBase* GetSinkNode() const;

private:
ell::model::SinkNodeBase* _sinkNode;
#endif
Expand Down Expand Up @@ -380,7 +382,10 @@ class Map
std::vector<int64_t> ComputeInt64(const std::vector<int64_t>& inputData);

#ifndef SWIG
std::shared_ptr<ell::model::Map> GetInnerMap() const { return _map; }
std::shared_ptr<ell::model::Map> GetInnerMap() const
{
return _map;
}
#endif

private:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
#pragma once

#include "MathInterface.h"
#include "Ports.h"
#include "NeuralLayersInterface.h"
#include "Ports.h"

#ifndef SWIG
#include <predictors/include/NeuralNetworkPredictor.h>
Expand Down
18 changes: 7 additions & 11 deletions interfaces/common/src/ModelBuilderInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,16 @@
#include <nodes/include/DCTNode.h>
#include <nodes/include/DTWDistanceNode.h>
#include <nodes/include/DotProductNode.h>
#include <nodes/include/FastGRNNNode.h>
#include <nodes/include/FFTNode.h>
#include <nodes/include/FastGRNNNode.h>
#include <nodes/include/FilterBankNode.h>
#include <nodes/include/GRUNode.h>
#include <nodes/include/HammingWindowNode.h>
#include <nodes/include/HannWindowNode.h>
#include <nodes/include/IIRFilterNode.h>
#include <nodes/include/LSTMNode.h>
#include <nodes/include/MatrixMatrixMultiplyNode.h>
#include <nodes/include/MatrixMatrixMultiplyCodeNode.h>
#include <nodes/include/MatrixMatrixMultiplyNode.h>
#include <nodes/include/MatrixVectorMultiplyNode.h>
#include <nodes/include/NeuralNetworkPredictorNode.h>
#include <nodes/include/NodeOperations.h>
Expand Down Expand Up @@ -778,8 +778,7 @@ Node ModelBuilder::AddIIRFilterNode(Model model, PortElements input, std::vector
case PortType::real:
newNode = model.GetModel()->AddNode<ell::nodes::IIRFilterNode<double>>(ell::model::PortElements<double>(elements), bCoeffs, aCoeffs);
break;
case PortType::smallReal:
{
case PortType::smallReal: {
std::vector<float> bFloatCoeffs(bCoeffs.begin(), bCoeffs.end());
std::vector<float> aFloatCoeffs(aCoeffs.begin(), aCoeffs.end());
newNode = model.GetModel()->AddNode<ell::nodes::IIRFilterNode<float>>(ell::model::PortElements<float>(elements), bFloatCoeffs, aFloatCoeffs);
Expand Down Expand Up @@ -1229,14 +1228,12 @@ Node ModelBuilder::AddActivationLayerNode(Model model, PortElements input, const
case ActivationType::hardSigmoid:
case ActivationType::hardTanh:
case ActivationType::sigmoid:
case ActivationType::tanh:
{
case ActivationType::tanh: {
auto activationLayer = ell::predictors::neural::ActivationLayer<ElementType>(parameters, ell::api::predictors::neural::ActivationLayer::CreateActivation<ElementType>(layer.activation));
newNode = model.GetModel()->AddNode<ell::nodes::ActivationLayerNode<ElementType>>(ell::model::PortElements<ElementType>(elements), activationLayer);
break;
}
case ActivationType::leaky:
{
case ActivationType::leaky: {
// can't use the ell::api::predictors::CreateActivation helper method in this case because the neural::LeakyReLUActivation requires the alpha value parameter.
using ApiLeakyReLUActivationLayer = ell::api::predictors::neural::LeakyReLUActivationLayer;
auto* activationlayer = const_cast<ell::api::predictors::neural::ActivationLayer*>(&layer);
Expand All @@ -1251,11 +1248,10 @@ Node ModelBuilder::AddActivationLayerNode(Model model, PortElements input, const
implementation = new ell::predictors::neural::LeakyReLUActivation<ElementType>();
}
newNode = model.GetModel()->AddNode<ell::nodes::ActivationLayerNode<ElementType>>(ell::model::PortElements<ElementType>(elements),
ell::predictors::neural::ActivationLayer<ElementType>(parameters, implementation));
ell::predictors::neural::ActivationLayer<ElementType>(parameters, implementation));
break;
}
case ActivationType::prelu:
{
case ActivationType::prelu: {
// can't use the ell::api::predictors::CreateActivation helper method in this case because the neural::PReLUActivationLayer requires the alpha value parameter.
using ApiPReLUActivationLayer = ell::api::predictors::neural::PReLUActivationLayer;
auto* activationlayer = const_cast<ell::api::predictors::neural::ActivationLayer*>(&layer);
Expand Down
64 changes: 25 additions & 39 deletions interfaces/common/src/NeuralNetworkPredictorInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,14 +123,13 @@ namespace api

auto& parameters = layers.front()->parameters;
// Construct the input layer
UnderlyingInputParameters inputParameters =
{
{ static_cast<size_t>(parameters.inputShape.rows - (2 * parameters.inputPaddingParameters.paddingSize)), static_cast<size_t>(parameters.inputShape.columns - (2 * parameters.inputPaddingParameters.paddingSize)), static_cast<size_t>(parameters.inputShape.channels) },
underlying::NoPadding(),
{ static_cast<size_t>(parameters.inputShape.rows), static_cast<size_t>(parameters.inputShape.columns), static_cast<size_t>(parameters.inputShape.channels) },
parameters.inputPaddingParameters,
inputScaleFactor
};
UnderlyingInputParameters inputParameters = {
{ static_cast<size_t>(parameters.inputShape.rows - (2 * parameters.inputPaddingParameters.paddingSize)), static_cast<size_t>(parameters.inputShape.columns - (2 * parameters.inputPaddingParameters.paddingSize)), static_cast<size_t>(parameters.inputShape.channels) },
underlying::NoPadding(),
{ static_cast<size_t>(parameters.inputShape.rows), static_cast<size_t>(parameters.inputShape.columns), static_cast<size_t>(parameters.inputShape.channels) },
parameters.inputPaddingParameters,
inputScaleFactor
};
auto inputLayer = std::make_unique<underlying::InputLayer<ElementType>>(inputParameters);

UnderlyingLayers underlyingLayers;
Expand Down Expand Up @@ -200,8 +199,7 @@ namespace api
case neural::ActivationType::tanh:
activation = neural::ActivationLayer::CreateActivation<ElementType>(layer.activation);
break;
case neural::ActivationType::leaky:
{
case neural::ActivationType::leaky: {
ActivationImplType* implementation = nullptr;
if (LayerIs<neural::LeakyReLUActivationLayer>(&layer))
{
Expand All @@ -215,8 +213,7 @@ namespace api
activation = ell::predictors::neural::Activation<ElementType>(implementation);
break;
}
case neural::ActivationType::prelu:
{
case neural::ActivationType::prelu: {
auto& preluApiLayer = LayerAs<neural::PReLUActivationLayer>(&layer);
TensorType alpha(preluApiLayer.alpha.shape.rows, preluApiLayer.alpha.shape.columns, preluApiLayer.alpha.shape.channels, CastVector<ElementType>(preluApiLayer.alpha.data));
activation = ell::predictors::neural::Activation<ElementType>(new underlying::ParametricReLUActivation<ElementType>(alpha));
Expand Down Expand Up @@ -308,26 +305,23 @@ namespace api
{
// Set the layer parameters. Note that if this is the first layer, we set the input reference to the output of the InputLayer.
// Otherwise, we set it to the output of the last layer.
UnderlyingLayerParameters parameters =
{
((underlyingLayers.size() > 0) ? underlyingLayers.back()->GetOutput() : underlyingInputLayer->GetOutput()),
layer->parameters.inputPaddingParameters,
{ static_cast<size_t>(layer->parameters.outputShape.rows), static_cast<size_t>(layer->parameters.outputShape.columns), static_cast<size_t>(layer->parameters.outputShape.channels) },
layer->parameters.outputPaddingParameters,
};
UnderlyingLayerParameters parameters = {
((underlyingLayers.size() > 0) ? underlyingLayers.back()->GetOutput() : underlyingInputLayer->GetOutput()),
layer->parameters.inputPaddingParameters,
{ static_cast<size_t>(layer->parameters.outputShape.rows), static_cast<size_t>(layer->parameters.outputShape.columns), static_cast<size_t>(layer->parameters.outputShape.channels) },
layer->parameters.outputPaddingParameters,
};

// Instantiate the specific layer type
underlying::LayerType layerType = layer->GetLayerType();
switch (layerType)
{
case (underlying::LayerType::activation):
{
case (underlying::LayerType::activation): {
auto& apiLayer = LayerAs<neural::ActivationLayer>(layer);
underlyingLayers.push_back(CreateActivationLayer(apiLayer, parameters));
}
break;
case (underlying::LayerType::batchNormalization):
{
case (underlying::LayerType::batchNormalization): {
auto& apiLayer = LayerAs<neural::BatchNormalizationLayer>(layer);
auto epsilonSummand = (apiLayer.epsilonSummand == neural::EpsilonSummand::variance) ? underlying::EpsilonSummand::Variance : underlying::EpsilonSummand::SqrtVariance;
underlyingLayers.push_back(std::make_unique<underlying::BatchNormalizationLayer<ElementType>>(parameters,
Expand All @@ -337,36 +331,31 @@ namespace api
epsilonSummand));
}
break;
case (underlying::LayerType::bias):
{
case (underlying::LayerType::bias): {
auto& apiLayer = LayerAs<neural::BiasLayer>(layer);
underlyingLayers.push_back(std::make_unique<underlying::BiasLayer<ElementType>>(parameters, CastVector<ElementType>(apiLayer.bias)));
}
break;
case (underlying::LayerType::binaryConvolution):
{
case (underlying::LayerType::binaryConvolution): {
auto& apiLayer = LayerAs<neural::BinaryConvolutionalLayer>(layer);
TensorType weights(apiLayer.weights.shape.rows, apiLayer.weights.shape.columns, apiLayer.weights.shape.channels, CastVector<ElementType>(apiLayer.weights.data));
underlyingLayers.push_back(std::make_unique<underlying::BinaryConvolutionalLayer<ElementType>>(parameters, apiLayer.convolutionalParameters, weights));
}
break;
case (underlying::LayerType::convolution):
{
case (underlying::LayerType::convolution): {
auto& apiLayer = LayerAs<neural::ConvolutionalLayer>(layer);
TensorType weights(apiLayer.weights.shape.rows, apiLayer.weights.shape.columns, apiLayer.weights.shape.channels, CastVector<ElementType>(apiLayer.weights.data));
underlyingLayers.push_back(std::make_unique<underlying::ConvolutionalLayer<ElementType>>(parameters, apiLayer.convolutionalParameters, weights));
}
break;
case (underlying::LayerType::fullyConnected):
{
case (underlying::LayerType::fullyConnected): {
auto& apiLayer = LayerAs<neural::FullyConnectedLayer>(layer);

TensorType weights(apiLayer.weights.shape.rows, apiLayer.weights.shape.columns, apiLayer.weights.shape.channels, CastVector<ElementType>(apiLayer.weights.data));
underlyingLayers.push_back(std::make_unique<underlying::FullyConnectedLayer<ElementType>>(parameters, weights));
}
break;
case (underlying::LayerType::pooling):
{
case (underlying::LayerType::pooling): {
auto& apiLayer = LayerAs<neural::PoolingLayer>(layer);
if (apiLayer.poolingType == neural::PoolingType::max)
{
Expand All @@ -378,20 +367,17 @@ namespace api
}
}
break;
case (underlying::LayerType::region):
{
case (underlying::LayerType::region): {
auto& apiLayer = LayerAs<neural::RegionDetectionLayer>(layer);
underlyingLayers.push_back(std::make_unique<underlying::RegionDetectionLayer<ElementType>>(parameters, apiLayer.detectionParameters));
}
break;
case (underlying::LayerType::scaling):
{
case (underlying::LayerType::scaling): {
auto& apiLayer = LayerAs<neural::ScalingLayer>(layer);
underlyingLayers.push_back(std::make_unique<underlying::ScalingLayer<ElementType>>(parameters, CastVector<ElementType>(apiLayer.scales)));
}
break;
case (underlying::LayerType::softmax):
{
case (underlying::LayerType::softmax): {
underlyingLayers.push_back(std::make_unique<underlying::SoftmaxLayer<ElementType>>(parameters));
}
break;
Expand Down
14 changes: 5 additions & 9 deletions libraries/common/include/DataLoaders.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,13 +167,11 @@ namespace common
ptrdiff_t callbackAddress = 0;
switch (map.GetInputType())
{
case model::Port::PortType::smallReal:
{
case model::Port::PortType::smallReal: {
callbackAddress = reinterpret_cast<ptrdiff_t>(&InputCallback_Float);
break;
}
case model::Port::PortType::real:
{
case model::Port::PortType::real: {
callbackAddress = reinterpret_cast<ptrdiff_t>(&InputCallback_Double);
break;
}
Expand Down Expand Up @@ -216,8 +214,7 @@ namespace common
auto type = map.GetInputType();
switch (type)
{
case model::Port::PortType::smallReal:
{
case model::Port::PortType::smallReal: {
return input.template Transform<ExampleType>([&compiledMap](const ExampleType& example) {
auto data = example.GetDataVector().ToArray();
std::vector<float> smallData(data.size());
Expand All @@ -227,8 +224,7 @@ namespace common
return ExampleType(std::move(transformedDataVector), example.GetMetadata());
});
}
case model::Port::PortType::real:
{
case model::Port::PortType::real: {
return input.template Transform<ExampleType>([&compiledMap](const ExampleType& example) {
compiledMap.SetInputValue(0, example.GetDataVector().ToArray());
auto transformedDataVector = compiledMap.template ComputeOutput<typename ExampleType::DataVectorType>(0);
Expand All @@ -237,7 +233,7 @@ namespace common
}
default:
throw utilities::InputException(utilities::InputExceptionErrors::typeMismatch,
utilities::FormatString("Unexpected input type %d, expecting float or double", type));
utilities::FormatString("Unexpected input type %d, expecting float or double", type));
}
}
}
Expand Down
6 changes: 3 additions & 3 deletions libraries/common/src/LoadModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
#include <nodes/include/DiagonalConvolutionNode.h>
#include <nodes/include/DotProductNode.h>
#include <nodes/include/ExtremalValueNode.h>
#include <nodes/include/FastGRNNNode.h>
#include <nodes/include/FFTNode.h>
#include <nodes/include/FastGRNNNode.h>
#include <nodes/include/FilterBankNode.h>
#include <nodes/include/ForestPredictorNode.h>
#include <nodes/include/GRUNode.h>
Expand All @@ -41,8 +41,8 @@
#include <nodes/include/L2NormSquaredNode.h>
#include <nodes/include/LSTMNode.h>
#include <nodes/include/LinearPredictorNode.h>
#include <nodes/include/MatrixMatrixMultiplyNode.h>
#include <nodes/include/MatrixMatrixMultiplyCodeNode.h>
#include <nodes/include/MatrixMatrixMultiplyNode.h>
#include <nodes/include/MatrixVectorMultiplyNode.h>
#include <nodes/include/MatrixVectorProductNode.h>
#include <nodes/include/MovingAverageNode.h>
Expand All @@ -53,8 +53,8 @@
#include <nodes/include/RNNNode.h>
#include <nodes/include/ReceptiveFieldMatrixNode.h>
#include <nodes/include/ReinterpretLayoutNode.h>
#include <nodes/include/ReorderDataNode.h>
#include <nodes/include/ReorderDataCodeNode.h>
#include <nodes/include/ReorderDataNode.h>
#include <nodes/include/ScalingNode.h>
#include <nodes/include/SimpleConvolutionNode.h>
#include <nodes/include/SinkNode.h>
Expand Down
2 changes: 1 addition & 1 deletion libraries/common/src/MapCompilerArguments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ namespace common
"gva",
"The number of bytes to align global buffers to",
32);

parser.AddOption(
skip_ellcode,
"skip_ellcode",
Expand Down
3 changes: 2 additions & 1 deletion libraries/data/test/src/DataVector_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,8 @@ void DataVectorCopyAsTest(std::initializer_list<double> list, bool testDense = t

if (testDense)
{
auto d = data::TransformAs<DataVectorType1, data::IterationPolicy::all, DataVectorType2>(a, [](data::IndexValue x) { return x.value + 3; }, 3);
auto d = data::TransformAs<DataVectorType1, data::IterationPolicy::all, DataVectorType2>(
a, [](data::IndexValue x) { return x.value + 3; }, 3);
auto dv = d.ToArray();
std::vector<double> r{ av[0] + 3, av[1] + 3, av[2] + 3 };
testing::ProcessTest(name1 + "::TransformAs<all," + name2 + ">", testing::IsEqual(r, dv, 1.0e-6));
Expand Down
2 changes: 1 addition & 1 deletion libraries/dsp/include/FilterBank.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ namespace dsp
/// | / | \ |
/// |---|---|-.-|-.-|-.-|---|---|
/// 0 1 2 3 4 5 6 7
///
///
/// with offset 0 the result would be (I3 * 1) but with offset 0.5 it would be (I2 * 0.5) + (I3 * 0.5).</summary>
TriangleFilterBank(size_t windowSize, double sampleRate, size_t numFilters, double offset = 0);
TriangleFilterBank(size_t windowSize, double sampleRate, size_t numFilters, size_t numFiltersToUse, double offset = 0);
Expand Down
5 changes: 3 additions & 2 deletions libraries/dsp/src/FilterBank.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ namespace dsp
_centerBin(centerBin),
_highBin(highBin),
_size(size),
_offset(offset) {}
_offset(offset)
{}

double TriangleFilter::operator[](size_t index)
{
Expand Down Expand Up @@ -201,7 +202,7 @@ namespace dsp
if (v > _windowSize)
{
throw utilities::InputException(utilities::InputExceptionErrors::indexOutOfRange,
utilities::FormatString("TriangleFilterBank::SetBins received a value %d that is outside the _windowSize %d", (int)v, (int)_windowSize));
utilities::FormatString("TriangleFilterBank::SetBins received a value %d that is outside the _windowSize %d", (int)v, (int)_windowSize));
}
}
}
Expand Down
Loading