diff --git a/docs/tutorials/Getting-started-with-image-classification-in-cpp/tutorialHelpers.h b/docs/tutorials/Getting-started-with-image-classification-in-cpp/tutorialHelpers.h index 0773a1ff0..8c454b4a3 100644 --- a/docs/tutorials/Getting-started-with-image-classification-in-cpp/tutorialHelpers.h +++ b/docs/tutorials/Getting-started-with-image-classification-in-cpp/tutorialHelpers.h @@ -9,9 +9,9 @@ #include #include #include +#include #include #include -#include #include "model.h" diff --git a/interfaces/common/include/ModelInterface.h b/interfaces/common/include/ModelInterface.h index c36c7114e..36c54aa33 100644 --- a/interfaces/common/include/ModelInterface.h +++ b/interfaces/common/include/ModelInterface.h @@ -212,6 +212,7 @@ class SourceNode : public Node SourceNode() = default; SourceNode(ell::model::SourceNodeBase* other, std::shared_ptr model); ell::model::SourceNodeBase* GetSourceNode() const; + private: ell::model::SourceNodeBase* _sourceNode; #endif @@ -235,6 +236,7 @@ class SinkNode : public Node SinkNode() = default; SinkNode(ell::model::SinkNodeBase* other, std::shared_ptr model); ell::model::SinkNodeBase* GetSinkNode() const; + private: ell::model::SinkNodeBase* _sinkNode; #endif @@ -380,7 +382,10 @@ class Map std::vector ComputeInt64(const std::vector& inputData); #ifndef SWIG - std::shared_ptr GetInnerMap() const { return _map; } + std::shared_ptr GetInnerMap() const + { + return _map; + } #endif private: diff --git a/interfaces/common/include/NeuralNetworkPredictorInterface.h b/interfaces/common/include/NeuralNetworkPredictorInterface.h index 2693ec065..83f5ee8f5 100644 --- a/interfaces/common/include/NeuralNetworkPredictorInterface.h +++ b/interfaces/common/include/NeuralNetworkPredictorInterface.h @@ -8,8 +8,8 @@ #pragma once #include "MathInterface.h" -#include "Ports.h" #include "NeuralLayersInterface.h" +#include "Ports.h" #ifndef SWIG #include diff --git a/interfaces/common/src/ModelBuilderInterface.cpp b/interfaces/common/src/ModelBuilderInterface.cpp index 87939b8e6..fe2a24c7c 100644 --- a/interfaces/common/src/ModelBuilderInterface.cpp +++ b/interfaces/common/src/ModelBuilderInterface.cpp @@ -30,16 +30,16 @@ #include #include #include -#include #include +#include #include #include #include #include #include #include -#include #include +#include #include #include #include @@ -778,8 +778,7 @@ Node ModelBuilder::AddIIRFilterNode(Model model, PortElements input, std::vector case PortType::real: newNode = model.GetModel()->AddNode>(ell::model::PortElements(elements), bCoeffs, aCoeffs); break; - case PortType::smallReal: - { + case PortType::smallReal: { std::vector bFloatCoeffs(bCoeffs.begin(), bCoeffs.end()); std::vector aFloatCoeffs(aCoeffs.begin(), aCoeffs.end()); newNode = model.GetModel()->AddNode>(ell::model::PortElements(elements), bFloatCoeffs, aFloatCoeffs); @@ -1229,14 +1228,12 @@ Node ModelBuilder::AddActivationLayerNode(Model model, PortElements input, const case ActivationType::hardSigmoid: case ActivationType::hardTanh: case ActivationType::sigmoid: - case ActivationType::tanh: - { + case ActivationType::tanh: { auto activationLayer = ell::predictors::neural::ActivationLayer(parameters, ell::api::predictors::neural::ActivationLayer::CreateActivation(layer.activation)); newNode = model.GetModel()->AddNode>(ell::model::PortElements(elements), activationLayer); break; } - case ActivationType::leaky: - { + case ActivationType::leaky: { // can't use the ell::api::predictors::CreateActivation helper method in this case because the neural::LeakyReLUActivation requires the alpha value parameter. using ApiLeakyReLUActivationLayer = ell::api::predictors::neural::LeakyReLUActivationLayer; auto* activationlayer = const_cast(&layer); @@ -1251,11 +1248,10 @@ Node ModelBuilder::AddActivationLayerNode(Model model, PortElements input, const implementation = new ell::predictors::neural::LeakyReLUActivation(); } newNode = model.GetModel()->AddNode>(ell::model::PortElements(elements), - ell::predictors::neural::ActivationLayer(parameters, implementation)); + ell::predictors::neural::ActivationLayer(parameters, implementation)); break; } - case ActivationType::prelu: - { + case ActivationType::prelu: { // can't use the ell::api::predictors::CreateActivation helper method in this case because the neural::PReLUActivationLayer requires the alpha value parameter. using ApiPReLUActivationLayer = ell::api::predictors::neural::PReLUActivationLayer; auto* activationlayer = const_cast(&layer); diff --git a/interfaces/common/src/NeuralNetworkPredictorInterface.cpp b/interfaces/common/src/NeuralNetworkPredictorInterface.cpp index 3a9c00b27..c1f7b404f 100644 --- a/interfaces/common/src/NeuralNetworkPredictorInterface.cpp +++ b/interfaces/common/src/NeuralNetworkPredictorInterface.cpp @@ -123,14 +123,13 @@ namespace api auto& parameters = layers.front()->parameters; // Construct the input layer - UnderlyingInputParameters inputParameters = - { - { static_cast(parameters.inputShape.rows - (2 * parameters.inputPaddingParameters.paddingSize)), static_cast(parameters.inputShape.columns - (2 * parameters.inputPaddingParameters.paddingSize)), static_cast(parameters.inputShape.channels) }, - underlying::NoPadding(), - { static_cast(parameters.inputShape.rows), static_cast(parameters.inputShape.columns), static_cast(parameters.inputShape.channels) }, - parameters.inputPaddingParameters, - inputScaleFactor - }; + UnderlyingInputParameters inputParameters = { + { static_cast(parameters.inputShape.rows - (2 * parameters.inputPaddingParameters.paddingSize)), static_cast(parameters.inputShape.columns - (2 * parameters.inputPaddingParameters.paddingSize)), static_cast(parameters.inputShape.channels) }, + underlying::NoPadding(), + { static_cast(parameters.inputShape.rows), static_cast(parameters.inputShape.columns), static_cast(parameters.inputShape.channels) }, + parameters.inputPaddingParameters, + inputScaleFactor + }; auto inputLayer = std::make_unique>(inputParameters); UnderlyingLayers underlyingLayers; @@ -200,8 +199,7 @@ namespace api case neural::ActivationType::tanh: activation = neural::ActivationLayer::CreateActivation(layer.activation); break; - case neural::ActivationType::leaky: - { + case neural::ActivationType::leaky: { ActivationImplType* implementation = nullptr; if (LayerIs(&layer)) { @@ -215,8 +213,7 @@ namespace api activation = ell::predictors::neural::Activation(implementation); break; } - case neural::ActivationType::prelu: - { + case neural::ActivationType::prelu: { auto& preluApiLayer = LayerAs(&layer); TensorType alpha(preluApiLayer.alpha.shape.rows, preluApiLayer.alpha.shape.columns, preluApiLayer.alpha.shape.channels, CastVector(preluApiLayer.alpha.data)); activation = ell::predictors::neural::Activation(new underlying::ParametricReLUActivation(alpha)); @@ -308,26 +305,23 @@ namespace api { // Set the layer parameters. Note that if this is the first layer, we set the input reference to the output of the InputLayer. // Otherwise, we set it to the output of the last layer. - UnderlyingLayerParameters parameters = - { - ((underlyingLayers.size() > 0) ? underlyingLayers.back()->GetOutput() : underlyingInputLayer->GetOutput()), - layer->parameters.inputPaddingParameters, - { static_cast(layer->parameters.outputShape.rows), static_cast(layer->parameters.outputShape.columns), static_cast(layer->parameters.outputShape.channels) }, - layer->parameters.outputPaddingParameters, - }; + UnderlyingLayerParameters parameters = { + ((underlyingLayers.size() > 0) ? underlyingLayers.back()->GetOutput() : underlyingInputLayer->GetOutput()), + layer->parameters.inputPaddingParameters, + { static_cast(layer->parameters.outputShape.rows), static_cast(layer->parameters.outputShape.columns), static_cast(layer->parameters.outputShape.channels) }, + layer->parameters.outputPaddingParameters, + }; // Instantiate the specific layer type underlying::LayerType layerType = layer->GetLayerType(); switch (layerType) { - case (underlying::LayerType::activation): - { + case (underlying::LayerType::activation): { auto& apiLayer = LayerAs(layer); underlyingLayers.push_back(CreateActivationLayer(apiLayer, parameters)); } break; - case (underlying::LayerType::batchNormalization): - { + case (underlying::LayerType::batchNormalization): { auto& apiLayer = LayerAs(layer); auto epsilonSummand = (apiLayer.epsilonSummand == neural::EpsilonSummand::variance) ? underlying::EpsilonSummand::Variance : underlying::EpsilonSummand::SqrtVariance; underlyingLayers.push_back(std::make_unique>(parameters, @@ -337,36 +331,31 @@ namespace api epsilonSummand)); } break; - case (underlying::LayerType::bias): - { + case (underlying::LayerType::bias): { auto& apiLayer = LayerAs(layer); underlyingLayers.push_back(std::make_unique>(parameters, CastVector(apiLayer.bias))); } break; - case (underlying::LayerType::binaryConvolution): - { + case (underlying::LayerType::binaryConvolution): { auto& apiLayer = LayerAs(layer); TensorType weights(apiLayer.weights.shape.rows, apiLayer.weights.shape.columns, apiLayer.weights.shape.channels, CastVector(apiLayer.weights.data)); underlyingLayers.push_back(std::make_unique>(parameters, apiLayer.convolutionalParameters, weights)); } break; - case (underlying::LayerType::convolution): - { + case (underlying::LayerType::convolution): { auto& apiLayer = LayerAs(layer); TensorType weights(apiLayer.weights.shape.rows, apiLayer.weights.shape.columns, apiLayer.weights.shape.channels, CastVector(apiLayer.weights.data)); underlyingLayers.push_back(std::make_unique>(parameters, apiLayer.convolutionalParameters, weights)); } break; - case (underlying::LayerType::fullyConnected): - { + case (underlying::LayerType::fullyConnected): { auto& apiLayer = LayerAs(layer); TensorType weights(apiLayer.weights.shape.rows, apiLayer.weights.shape.columns, apiLayer.weights.shape.channels, CastVector(apiLayer.weights.data)); underlyingLayers.push_back(std::make_unique>(parameters, weights)); } break; - case (underlying::LayerType::pooling): - { + case (underlying::LayerType::pooling): { auto& apiLayer = LayerAs(layer); if (apiLayer.poolingType == neural::PoolingType::max) { @@ -378,20 +367,17 @@ namespace api } } break; - case (underlying::LayerType::region): - { + case (underlying::LayerType::region): { auto& apiLayer = LayerAs(layer); underlyingLayers.push_back(std::make_unique>(parameters, apiLayer.detectionParameters)); } break; - case (underlying::LayerType::scaling): - { + case (underlying::LayerType::scaling): { auto& apiLayer = LayerAs(layer); underlyingLayers.push_back(std::make_unique>(parameters, CastVector(apiLayer.scales))); } break; - case (underlying::LayerType::softmax): - { + case (underlying::LayerType::softmax): { underlyingLayers.push_back(std::make_unique>(parameters)); } break; diff --git a/libraries/common/include/DataLoaders.h b/libraries/common/include/DataLoaders.h index 89ff631b1..ce914d0a8 100644 --- a/libraries/common/include/DataLoaders.h +++ b/libraries/common/include/DataLoaders.h @@ -167,13 +167,11 @@ namespace common ptrdiff_t callbackAddress = 0; switch (map.GetInputType()) { - case model::Port::PortType::smallReal: - { + case model::Port::PortType::smallReal: { callbackAddress = reinterpret_cast(&InputCallback_Float); break; } - case model::Port::PortType::real: - { + case model::Port::PortType::real: { callbackAddress = reinterpret_cast(&InputCallback_Double); break; } @@ -216,8 +214,7 @@ namespace common auto type = map.GetInputType(); switch (type) { - case model::Port::PortType::smallReal: - { + case model::Port::PortType::smallReal: { return input.template Transform([&compiledMap](const ExampleType& example) { auto data = example.GetDataVector().ToArray(); std::vector smallData(data.size()); @@ -227,8 +224,7 @@ namespace common return ExampleType(std::move(transformedDataVector), example.GetMetadata()); }); } - case model::Port::PortType::real: - { + case model::Port::PortType::real: { return input.template Transform([&compiledMap](const ExampleType& example) { compiledMap.SetInputValue(0, example.GetDataVector().ToArray()); auto transformedDataVector = compiledMap.template ComputeOutput(0); @@ -237,7 +233,7 @@ namespace common } default: throw utilities::InputException(utilities::InputExceptionErrors::typeMismatch, - utilities::FormatString("Unexpected input type %d, expecting float or double", type)); + utilities::FormatString("Unexpected input type %d, expecting float or double", type)); } } } diff --git a/libraries/common/src/LoadModel.cpp b/libraries/common/src/LoadModel.cpp index 8d7f1f7a4..e5c01ac30 100644 --- a/libraries/common/src/LoadModel.cpp +++ b/libraries/common/src/LoadModel.cpp @@ -30,8 +30,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -41,8 +41,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -53,8 +53,8 @@ #include #include #include -#include #include +#include #include #include #include diff --git a/libraries/common/src/MapCompilerArguments.cpp b/libraries/common/src/MapCompilerArguments.cpp index 33fd28277..512300324 100644 --- a/libraries/common/src/MapCompilerArguments.cpp +++ b/libraries/common/src/MapCompilerArguments.cpp @@ -198,7 +198,7 @@ namespace common "gva", "The number of bytes to align global buffers to", 32); - + parser.AddOption( skip_ellcode, "skip_ellcode", diff --git a/libraries/data/test/src/DataVector_test.cpp b/libraries/data/test/src/DataVector_test.cpp index 3314d67ed..c5ae27e20 100644 --- a/libraries/data/test/src/DataVector_test.cpp +++ b/libraries/data/test/src/DataVector_test.cpp @@ -120,7 +120,8 @@ void DataVectorCopyAsTest(std::initializer_list list, bool testDense = t if (testDense) { - auto d = data::TransformAs(a, [](data::IndexValue x) { return x.value + 3; }, 3); + auto d = data::TransformAs( + a, [](data::IndexValue x) { return x.value + 3; }, 3); auto dv = d.ToArray(); std::vector r{ av[0] + 3, av[1] + 3, av[2] + 3 }; testing::ProcessTest(name1 + "::TransformAs", testing::IsEqual(r, dv, 1.0e-6)); diff --git a/libraries/dsp/include/FilterBank.h b/libraries/dsp/include/FilterBank.h index e7ddcce85..053181a94 100644 --- a/libraries/dsp/include/FilterBank.h +++ b/libraries/dsp/include/FilterBank.h @@ -154,7 +154,7 @@ namespace dsp /// | / | \ | /// |---|---|-.-|-.-|-.-|---|---| /// 0 1 2 3 4 5 6 7 - /// + /// /// with offset 0 the result would be (I3 * 1) but with offset 0.5 it would be (I2 * 0.5) + (I3 * 0.5). TriangleFilterBank(size_t windowSize, double sampleRate, size_t numFilters, double offset = 0); TriangleFilterBank(size_t windowSize, double sampleRate, size_t numFilters, size_t numFiltersToUse, double offset = 0); diff --git a/libraries/dsp/src/FilterBank.cpp b/libraries/dsp/src/FilterBank.cpp index 5721b5c98..63b78f25f 100644 --- a/libraries/dsp/src/FilterBank.cpp +++ b/libraries/dsp/src/FilterBank.cpp @@ -46,7 +46,8 @@ namespace dsp _centerBin(centerBin), _highBin(highBin), _size(size), - _offset(offset) {} + _offset(offset) + {} double TriangleFilter::operator[](size_t index) { @@ -201,7 +202,7 @@ namespace dsp if (v > _windowSize) { throw utilities::InputException(utilities::InputExceptionErrors::indexOutOfRange, - utilities::FormatString("TriangleFilterBank::SetBins received a value %d that is outside the _windowSize %d", (int)v, (int)_windowSize)); + utilities::FormatString("TriangleFilterBank::SetBins received a value %d that is outside the _windowSize %d", (int)v, (int)_windowSize)); } } } diff --git a/libraries/dsp/src/WinogradConvolution.cpp b/libraries/dsp/src/WinogradConvolution.cpp index 5738a3730..d13e66f00 100644 --- a/libraries/dsp/src/WinogradConvolution.cpp +++ b/libraries/dsp/src/WinogradConvolution.cpp @@ -1619,13 +1619,11 @@ namespace dsp switch (order) { - case WinogradFilterOrder::filtersFirst: - { + case WinogradFilterOrder::filtersFirst: { Convolve2DWinogradFiltersFirst(input, transformedFilters, numFilters, tileSize, filterSize, output); } break; - case WinogradFilterOrder::tilesFirst: - { + case WinogradFilterOrder::tilesFirst: { auto transformedInputScratch = AllocateScratchInput(numOutputRows, numOutputColumns, numChannels, tileSize, filterSize, order); auto transformedOutputScratch = AllocateScratchOutput(numOutputRows, numOutputColumns, numFilters, tileSize, filterSize, order); Convolve2DWinogradTilesFirst(input, transformedFilters, numFilters, tileSize, filterSize, transformedInputScratch, transformedOutputScratch, output); diff --git a/libraries/dsp/test/src/main.cpp b/libraries/dsp/test/src/main.cpp index eabfd75a1..59a5d4f4d 100644 --- a/libraries/dsp/test/src/main.cpp +++ b/libraries/dsp/test/src/main.cpp @@ -36,7 +36,7 @@ void DspTest(const std::string& path) // // Testing // - + // 1D Convolution TestConv1D(ConvolutionMethodOption::simple); TestConv1D(ConvolutionMethodOption::winograd); diff --git a/libraries/emittable_functions/include/Convolutions.h b/libraries/emittable_functions/include/Convolutions.h index 6de01bb9a..8c001bea6 100644 --- a/libraries/emittable_functions/include/Convolutions.h +++ b/libraries/emittable_functions/include/Convolutions.h @@ -17,7 +17,6 @@ namespace emittable_functions void SimpleConvolve1D(value::Vector signal, value::Vector filter, value::Vector output); - void SimpleDepthwiseSeparableConvolve2D(value::Tensor signal, value::Tensor filter, value::Scalar rowStride, - value::Scalar columnStride, value::Tensor output); + void SimpleDepthwiseSeparableConvolve2D(value::Tensor signal, value::Tensor filter, value::Scalar rowStride, value::Scalar columnStride, value::Tensor output); } // namespace emittable_functions } // namespace ell diff --git a/libraries/emittable_functions/include/LogisticFunctions.h b/libraries/emittable_functions/include/LogisticFunctions.h index 6adff0e42..13e0a1b01 100644 --- a/libraries/emittable_functions/include/LogisticFunctions.h +++ b/libraries/emittable_functions/include/LogisticFunctions.h @@ -26,6 +26,5 @@ namespace emittable_functions value::Scalar HardTanh(value::Scalar s); - } // namespace emittable_functions } // namespace ell diff --git a/libraries/emittable_functions/include/VoiceActivityDetector.h b/libraries/emittable_functions/include/VoiceActivityDetector.h index 2a6ba855f..10b268d1e 100644 --- a/libraries/emittable_functions/include/VoiceActivityDetector.h +++ b/libraries/emittable_functions/include/VoiceActivityDetector.h @@ -50,9 +50,7 @@ namespace emittable_functions /// Then we compare the energy of the current frame to the noise floor. If it is thresholdUp times higher, we switch to state VOICE. /// Then we compare the energy of the current frame to the noise floor. If it is thresholdDown times lower, we switch to state NO VOICE. /// Special case is when the energy of the frame is lower than levelThreshold, when we force the state to NO VOICE. - VoiceActivityDetector(double sampleRate, double windowSize, double frameDuration, double tauUp, double tauDown, - double largeInput, double gainAtt, double thresholdUp, double thresholdDown, - double levelThreshold); + VoiceActivityDetector(double sampleRate, double windowSize, double frameDuration, double tauUp, double tauDown, double largeInput, double gainAtt, double thresholdUp, double thresholdDown, double levelThreshold); /// destructor ~VoiceActivityDetector(); diff --git a/libraries/emittable_functions/src/VoiceActivityDetector.cpp b/libraries/emittable_functions/src/VoiceActivityDetector.cpp index 0f23d53be..cc8e6cfe4 100644 --- a/libraries/emittable_functions/src/VoiceActivityDetector.cpp +++ b/libraries/emittable_functions/src/VoiceActivityDetector.cpp @@ -275,25 +275,55 @@ namespace emittable_functions _impl->_tracker.Reset(); } - double VoiceActivityDetector::GetSampleRate() const { return _impl->_sampleRate; } + double VoiceActivityDetector::GetSampleRate() const + { + return _impl->_sampleRate; + } - double VoiceActivityDetector::GetWindowSize() const { return _impl->_windowSize; } + double VoiceActivityDetector::GetWindowSize() const + { + return _impl->_windowSize; + } - double VoiceActivityDetector::GetFrameDuration() const { return _impl->_frameDuration; } + double VoiceActivityDetector::GetFrameDuration() const + { + return _impl->_frameDuration; + } - double VoiceActivityDetector::GetTauUp() const { return _impl->_tracker._tauUp; } + double VoiceActivityDetector::GetTauUp() const + { + return _impl->_tracker._tauUp; + } - double VoiceActivityDetector::GetTauDown() const { return _impl->_tracker._tauDown; } + double VoiceActivityDetector::GetTauDown() const + { + return _impl->_tracker._tauDown; + } - double VoiceActivityDetector::GetLargeInput() const { return _impl->_tracker._largeInput; } + double VoiceActivityDetector::GetLargeInput() const + { + return _impl->_tracker._largeInput; + } - double VoiceActivityDetector::GetGainAtt() const { return _impl->_tracker._gainAtt; } + double VoiceActivityDetector::GetGainAtt() const + { + return _impl->_tracker._gainAtt; + } - double VoiceActivityDetector::GetThresholdUp() const { return _impl->_tracker._thresholdUp; } + double VoiceActivityDetector::GetThresholdUp() const + { + return _impl->_tracker._thresholdUp; + } - double VoiceActivityDetector::GetThresholdDown() const { return _impl->_tracker._thresholdDown; } + double VoiceActivityDetector::GetThresholdDown() const + { + return _impl->_tracker._thresholdDown; + } - double VoiceActivityDetector::GetLevelThreshold() const { return _impl->_tracker._levelThreshold; } + double VoiceActivityDetector::GetLevelThreshold() const + { + return _impl->_tracker._levelThreshold; + } Scalar VoiceActivityDetector::Process(const Vector data) { @@ -322,7 +352,10 @@ namespace emittable_functions return signal; } - const std::vector& VoiceActivityDetector::GetWeights() const { return _impl->_cmw.GetWeights(); } + const std::vector& VoiceActivityDetector::GetWeights() const + { + return _impl->_cmw.GetWeights(); + } bool VoiceActivityDetector::Equals(const VoiceActivityDetector& other) const { diff --git a/libraries/emittable_functions/test/src/Convolutions_test.cpp b/libraries/emittable_functions/test/src/Convolutions_test.cpp index a2ccc09c0..41d644241 100644 --- a/libraries/emittable_functions/test/src/Convolutions_test.cpp +++ b/libraries/emittable_functions/test/src/Convolutions_test.cpp @@ -21,8 +21,8 @@ #include #include -#include #include +#include #include #include #include diff --git a/libraries/emitters/include/IRAssemblyWriter.h b/libraries/emitters/include/IRAssemblyWriter.h index 4793965c1..35ed17679 100644 --- a/libraries/emitters/include/IRAssemblyWriter.h +++ b/libraries/emitters/include/IRAssemblyWriter.h @@ -45,13 +45,13 @@ namespace emitters OptimizationLevel optimizationLevel = OptimizationLevel::Default; FloatABIType floatABI = FloatABIType::Default; - + FloatFusionMode floatFusionMode = FloatFusionMode::Fast; bool unsafeFPMath = true; bool noInfsFPMath = true; bool noNaNsFPMath = true; bool noSignedZerosFPMath = true; - + OutputRelocationModel relocModel = OutputRelocationModel::Static; }; diff --git a/libraries/emitters/include/IRExecutionEngine.h b/libraries/emitters/include/IRExecutionEngine.h index a12b29e14..edf0bc510 100644 --- a/libraries/emitters/include/IRExecutionEngine.h +++ b/libraries/emitters/include/IRExecutionEngine.h @@ -9,8 +9,8 @@ #include "LLVMUtilities.h" -#include #include +#include #include #include diff --git a/libraries/emitters/include/IRModuleEmitter.h b/libraries/emitters/include/IRModuleEmitter.h index 570b96bad..c19964244 100644 --- a/libraries/emitters/include/IRModuleEmitter.h +++ b/libraries/emitters/include/IRModuleEmitter.h @@ -712,7 +712,6 @@ namespace emitters /// Optional global constant that this function is for. If the data is optimized away, then the finalization function will be also. void AddFinalizationFunction(IRFunctionEmitter& function, int priority = 65536, llvm::Constant* forData = nullptr); - /// Return the typed CallbackRegistry object that is used to manage any std::functions defined /// on any SourceNodes or SinkNodes in the graph. template diff --git a/libraries/emitters/include/ModuleEmitter.h b/libraries/emitters/include/ModuleEmitter.h index 045b24b87..485911e56 100644 --- a/libraries/emitters/include/ModuleEmitter.h +++ b/libraries/emitters/include/ModuleEmitter.h @@ -139,7 +139,6 @@ namespace emitters void FreeVariable(Variable& var); private: - CompilerOptions _options; EmittedVariableAllocator _inputVars; // Runtime input variable table diff --git a/libraries/emitters/src/CompilerOptions.cpp b/libraries/emitters/src/CompilerOptions.cpp index fe45c1a8c..35714dec6 100644 --- a/libraries/emitters/src/CompilerOptions.cpp +++ b/libraries/emitters/src/CompilerOptions.cpp @@ -96,7 +96,7 @@ namespace utilities { throw utilities::InputException(utilities::InputExceptionErrors::indexOutOfRange, "Unknown BlasType"); } - + return it->second; } } // namespace utilities diff --git a/libraries/emitters/src/IREmitter.cpp b/libraries/emitters/src/IREmitter.cpp index e4f18359a..cd030c7cb 100644 --- a/libraries/emitters/src/IREmitter.cpp +++ b/libraries/emitters/src/IREmitter.cpp @@ -144,9 +144,15 @@ namespace emitters } } - llvm::PointerType* IREmitter::PointerType(VariableType type) { return Type(type)->getPointerTo(); } + llvm::PointerType* IREmitter::PointerType(VariableType type) + { + return Type(type)->getPointerTo(); + } - llvm::PointerType* IREmitter::PointerType(LLVMType type) { return type->getPointerTo(); } + llvm::PointerType* IREmitter::PointerType(LLVMType type) + { + return type->getPointerTo(); + } llvm::ArrayType* IREmitter::ArrayType(VariableType type, size_t size) { @@ -159,7 +165,10 @@ namespace emitters return llvm::ArrayType::get(rowType, rows); } - llvm::ArrayType* IREmitter::ArrayType(LLVMType type, size_t size) { return llvm::ArrayType::get(type, size); } + llvm::ArrayType* IREmitter::ArrayType(LLVMType type, size_t size) + { + return llvm::ArrayType::get(type, size); + } llvm::ArrayType* IREmitter::ArrayType(LLVMType type, size_t rows, size_t columns) { @@ -172,19 +181,40 @@ namespace emitters return llvm::VectorType::get(Type(type), size); } - llvm::VectorType* IREmitter::VectorType(LLVMType type, size_t size) { return llvm::VectorType::get(type, size); } + llvm::VectorType* IREmitter::VectorType(LLVMType type, size_t size) + { + return llvm::VectorType::get(type, size); + } - llvm::Constant* IREmitter::Literal(const bool value) { return Integer(VariableType::Byte, value ? 1 : 0); } + llvm::Constant* IREmitter::Literal(const bool value) + { + return Integer(VariableType::Byte, value ? 1 : 0); + } - llvm::Constant* IREmitter::Literal(const int8_t value) { return Integer(VariableType::Char8, value); } + llvm::Constant* IREmitter::Literal(const int8_t value) + { + return Integer(VariableType::Char8, value); + } - llvm::Constant* IREmitter::Literal(const uint8_t value) { return Integer(VariableType::Byte, value); } + llvm::Constant* IREmitter::Literal(const uint8_t value) + { + return Integer(VariableType::Byte, value); + } - llvm::Constant* IREmitter::Literal(const short value) { return Integer(VariableType::Int16, value); } + llvm::Constant* IREmitter::Literal(const short value) + { + return Integer(VariableType::Int16, value); + } - llvm::Constant* IREmitter::Literal(const int value) { return Integer(VariableType::Int32, value); } + llvm::Constant* IREmitter::Literal(const int value) + { + return Integer(VariableType::Int32, value); + } - llvm::Constant* IREmitter::Literal(const int64_t value) { return Integer(VariableType::Int64, value); } + llvm::Constant* IREmitter::Literal(const int64_t value) + { + return Integer(VariableType::Int64, value); + } llvm::Constant* IREmitter::Literal(const float value) { @@ -282,15 +312,30 @@ namespace emitters return nullptr; } - llvm::Constant* IREmitter::Zero(LLVMType type) { return llvm::Constant::getNullValue(type); } + llvm::Constant* IREmitter::Zero(LLVMType type) + { + return llvm::Constant::getNullValue(type); + } - llvm::Constant* IREmitter::True() { return Literal(true); } + llvm::Constant* IREmitter::True() + { + return Literal(true); + } - llvm::Constant* IREmitter::False() { return Literal(false); } + llvm::Constant* IREmitter::False() + { + return Literal(false); + } - llvm::Constant* IREmitter::TrueBit() { return llvm::ConstantInt::getTrue(_llvmContext); } + llvm::Constant* IREmitter::TrueBit() + { + return llvm::ConstantInt::getTrue(_llvmContext); + } - llvm::Constant* IREmitter::FalseBit() { return llvm::ConstantInt::getFalse(_llvmContext); } + llvm::Constant* IREmitter::FalseBit() + { + return llvm::ConstantInt::getFalse(_llvmContext); + } llvm::ConstantPointerNull* IREmitter::NullPointer(llvm::PointerType* pointerType) { @@ -871,9 +916,15 @@ namespace emitters } } - void IREmitter::SetCurrentInsertPoint(llvm::IRBuilder<>::InsertPoint pos) { _irBuilder.restoreIP(pos); } + void IREmitter::SetCurrentInsertPoint(llvm::IRBuilder<>::InsertPoint pos) + { + _irBuilder.restoreIP(pos); + } - void IREmitter::SetCurrentInsertPoint(llvm::Instruction* pos) { _irBuilder.SetInsertPoint(pos); } + void IREmitter::SetCurrentInsertPoint(llvm::Instruction* pos) + { + _irBuilder.SetInsertPoint(pos); + } // // Calling functions @@ -1032,7 +1083,10 @@ namespace emitters return _irBuilder.CreateAlloca(Type(type), nullptr); } - llvm::AllocaInst* IREmitter::StackAllocate(LLVMType type) { return _irBuilder.CreateAlloca(type, nullptr); } + llvm::AllocaInst* IREmitter::StackAllocate(LLVMType type) + { + return _irBuilder.CreateAlloca(type, nullptr); + } llvm::AllocaInst* IREmitter::StackAllocate(VariableType type, const std::string& name) { @@ -1129,7 +1183,10 @@ namespace emitters return llvm::StructType::get(_llvmContext, fields, packed); } - llvm::StructType* IREmitter::GetStruct(const std::string& name) { return _structs[name]; } + llvm::StructType* IREmitter::GetStruct(const std::string& name) + { + return _structs[name]; + } uint64_t IREmitter::SizeOf(LLVMType type) const { diff --git a/libraries/emitters/src/IRExecutionEngine.cpp b/libraries/emitters/src/IRExecutionEngine.cpp index d444581ba..61734bd42 100644 --- a/libraries/emitters/src/IRExecutionEngine.cpp +++ b/libraries/emitters/src/IRExecutionEngine.cpp @@ -13,20 +13,19 @@ #include +#include #include #include -#include -extern "C" +extern "C" { +void DebugPrintImpl(char* message) { - void DebugPrintImpl(char* message) + if (message != nullptr) { - if (message != nullptr) - { - std::cout << message; - } + std::cout << message; } } +} namespace ell { diff --git a/libraries/emitters/src/IRHeaderWriter.cpp b/libraries/emitters/src/IRHeaderWriter.cpp index 0add706d4..ec96e1c47 100644 --- a/libraries/emitters/src/IRHeaderWriter.cpp +++ b/libraries/emitters/src/IRHeaderWriter.cpp @@ -21,10 +21,10 @@ #include #include +#include #include #include #include -#include namespace ell { @@ -489,7 +489,7 @@ namespace emitters for (auto arg = predictFunction->arg_begin(), end = predictFunction->arg_end(); arg != end; ++arg) { ArgumentFlags flags = ArgumentFlags::InOut; - if (index < argDecls.size()) + if (index < argDecls.size()) { flags = argDecls[index++].GetFlags(); } diff --git a/libraries/emitters/src/IRIfEmitter.cpp b/libraries/emitters/src/IRIfEmitter.cpp index 4b4959f2a..39f86bb3d 100644 --- a/libraries/emitters/src/IRIfEmitter.cpp +++ b/libraries/emitters/src/IRIfEmitter.cpp @@ -255,7 +255,5 @@ namespace emitters } } - - } // namespace emitters } // namespace ell diff --git a/libraries/emitters/src/IRLocalArray.cpp b/libraries/emitters/src/IRLocalArray.cpp index f743a8b05..26eb3ee82 100644 --- a/libraries/emitters/src/IRLocalArray.cpp +++ b/libraries/emitters/src/IRLocalArray.cpp @@ -31,7 +31,8 @@ namespace emitters LLVMValue pOffset) : _function(function), _pPointer(value), - _pOffset(pOffset) {} + _pOffset(pOffset) + {} IRLocalArray::IRLocalArrayValue& IRLocalArray::IRLocalArrayValue::operator=(const IRLocalArrayValue& value) { diff --git a/libraries/emitters/src/IRLocalMultidimArray.cpp b/libraries/emitters/src/IRLocalMultidimArray.cpp index e9a68b170..83592f4f7 100644 --- a/libraries/emitters/src/IRLocalMultidimArray.cpp +++ b/libraries/emitters/src/IRLocalMultidimArray.cpp @@ -95,7 +95,8 @@ namespace emitters IRLocalMultidimArray::IRLocalArrayElement::IRLocalArrayElement(emitters::IRFunctionEmitter& function, LLVMValue data, LLVMValue offset) : _function(function), _data(data), - _offset(offset) {} + _offset(offset) + {} IRLocalMultidimArray::IRLocalArrayElement& IRLocalMultidimArray::IRLocalArrayElement::operator=(const IRLocalArrayElement& value) { diff --git a/libraries/emitters/src/IRLocalValue.cpp b/libraries/emitters/src/IRLocalValue.cpp index e4510de76..fc77bd755 100644 --- a/libraries/emitters/src/IRLocalValue.cpp +++ b/libraries/emitters/src/IRLocalValue.cpp @@ -74,7 +74,8 @@ namespace emitters // IRLocalValue::IRLocalValue(emitters::IRFunctionEmitter& function, LLVMValue value) : function(function), - value(value) {} + value(value) + {} IRLocalValue& IRLocalValue::operator=(const IRLocalValue& other) { diff --git a/libraries/emitters/src/IRLoopEmitter.cpp b/libraries/emitters/src/IRLoopEmitter.cpp index dd8997312..875c717e3 100644 --- a/libraries/emitters/src/IRLoopEmitter.cpp +++ b/libraries/emitters/src/IRLoopEmitter.cpp @@ -47,7 +47,8 @@ namespace emitters } // namespace IRLoopEmitter::IRLoopEmitter(IRFunctionEmitter& functionEmitter) : - _functionEmitter(functionEmitter) {} + _functionEmitter(functionEmitter) + {} void IRLoopEmitter::AddLoopMetadata(llvm::BranchInst* branch, bool unroll, bool vectorize) { diff --git a/libraries/emitters/src/IRMath.cpp b/libraries/emitters/src/IRMath.cpp index 6c1fdbfb3..4f9b00db9 100644 --- a/libraries/emitters/src/IRMath.cpp +++ b/libraries/emitters/src/IRMath.cpp @@ -6,8 +6,8 @@ // //////////////////////////////////////////////////////////////////////////////////////////////////// -#include "EmitterException.h" #include "IRMath.h" +#include "EmitterException.h" #include "IRModuleEmitter.h" #include diff --git a/libraries/emitters/src/IRModuleEmitter.cpp b/libraries/emitters/src/IRModuleEmitter.cpp index 8cf9a1a72..7723d4d1d 100644 --- a/libraries/emitters/src/IRModuleEmitter.cpp +++ b/libraries/emitters/src/IRModuleEmitter.cpp @@ -757,19 +757,16 @@ namespace emitters case ModuleOutputFormat::assembly: case ModuleOutputFormat::bitcode: case ModuleOutputFormat::ir: - case ModuleOutputFormat::objectCode: - { + case ModuleOutputFormat::objectCode: { WriteToFile(filePath, format, options); break; } - case ModuleOutputFormat::cHeader: - { + case ModuleOutputFormat::cHeader: { auto os = utilities::OpenOfstream(filePath); WriteToStream(os, format, options); break; } - case ModuleOutputFormat::swigInterface: - { + case ModuleOutputFormat::swigInterface: { // Write the swig interface file auto headerFilePath = filePath + ".h"; auto os = utilities::OpenOfstream(filePath); diff --git a/libraries/emitters/src/IROptimizer.cpp b/libraries/emitters/src/IROptimizer.cpp index 6c461258e..7dd09ee53 100644 --- a/libraries/emitters/src/IROptimizer.cpp +++ b/libraries/emitters/src/IROptimizer.cpp @@ -47,7 +47,7 @@ namespace emitters { throw EmitterException(EmitterError::unexpected, "Unable to allocate target machine"); } - + auto& llvmTargetMachine = static_cast(*targetMachine); auto config = static_cast(llvmTargetMachine.createPassConfig(_modulePasses)); _modulePasses.add(config); diff --git a/libraries/emitters/src/IRParallelLoopEmitter.cpp b/libraries/emitters/src/IRParallelLoopEmitter.cpp index e7a932d07..0eb56b828 100644 --- a/libraries/emitters/src/IRParallelLoopEmitter.cpp +++ b/libraries/emitters/src/IRParallelLoopEmitter.cpp @@ -19,7 +19,8 @@ namespace ell namespace emitters { IRParallelForLoopEmitter::IRParallelForLoopEmitter(IRFunctionEmitter& functionEmitter) : - _functionEmitter(functionEmitter) {} + _functionEmitter(functionEmitter) + {} void IRParallelForLoopEmitter::EmitLoop(int begin, int end, int increment, const ParallelLoopOptions& options, const std::vector& capturedValues, BodyFunction body) { diff --git a/libraries/emitters/src/IRProfiler.cpp b/libraries/emitters/src/IRProfiler.cpp index 481ebe0b6..fec2ea8ae 100644 --- a/libraries/emitters/src/IRProfiler.cpp +++ b/libraries/emitters/src/IRProfiler.cpp @@ -302,7 +302,7 @@ namespace emitters // save old insert point auto oldInsertPoint = emitter.GetCurrentInsertPoint(); emitter.SetCurrentBlock(&exitBlock); - + // add new return instruction emitter.Return(emitter.Literal(_regionCount)); diff --git a/libraries/emitters/src/ModuleEmitter.cpp b/libraries/emitters/src/ModuleEmitter.cpp index 0a201d89f..d07c4bb4a 100644 --- a/libraries/emitters/src/ModuleEmitter.cpp +++ b/libraries/emitters/src/ModuleEmitter.cpp @@ -135,14 +135,12 @@ namespace emitters VariableScope scope = var.Scope(); switch (scope) { - case VariableScope::local: - { + case VariableScope::local: { auto v = var.GetAssignedVar(); _localVars.Free(v); } break; - case VariableScope::global: - { + case VariableScope::global: { auto v = var.GetAssignedVar(); _globalVars.Free(v); } diff --git a/libraries/emitters/test/src/IREmitterTest.cpp b/libraries/emitters/test/src/IREmitterTest.cpp index 6a6cf23a6..8ea3b75f8 100644 --- a/libraries/emitters/test/src/IREmitterTest.cpp +++ b/libraries/emitters/test/src/IREmitterTest.cpp @@ -1178,9 +1178,9 @@ void TestInlineAssembly() auto functionName = "square"; std::string asmStr; - if(targetDevice.IsWindows()) + if (targetDevice.IsWindows()) { - asmStr= R"XX( + asmStr = R"XX( .globl FUNCTION FUNCTION: movl %ecx, %eax @@ -1190,7 +1190,7 @@ void TestInlineAssembly() } else { - asmStr= R"XX( + asmStr = R"XX( .globl FUNCTION FUNCTION: imull %edi, %edi @@ -1213,7 +1213,7 @@ void TestInlineAssembly() auto x = fn.LocalScalar(&(*arguments++)); auto squareFn = module.GetFunction(functionName); squareFn->addFnAttr(llvm::Attribute::AttrKind::AlwaysInline); - auto result = fn.Call(squareFn, {x}); + auto result = fn.Call(squareFn, { x }); fn.Return(result); } diff --git a/libraries/model/include/CompiledMap.h b/libraries/model/include/CompiledMap.h index d0870d6d2..9239b556d 100644 --- a/libraries/model/include/CompiledMap.h +++ b/libraries/model/include/CompiledMap.h @@ -84,7 +84,7 @@ namespace model /// /// The name of this type. std::string GetRuntimeTypeName() const override { return GetTypeName(); } - + /// Reset any model state. virtual void Reset() = 0; @@ -99,7 +99,7 @@ namespace model protected: CompiledMap(Map map, std::string functionName, const MapCompilerOptions& options); MapCompilerOptions GetMapCompilerOptions() const { return _compilerOptions; } - + std::string _functionName; MapCompilerOptions _compilerOptions; diff --git a/libraries/model/include/IRCompiledMap.h b/libraries/model/include/IRCompiledMap.h index 3efb6881e..9fdaa0efc 100644 --- a/libraries/model/include/IRCompiledMap.h +++ b/libraries/model/include/IRCompiledMap.h @@ -271,8 +271,7 @@ namespace model ComputeFunction computeFunction; switch (GetOutput(0).GetType()) // Switch on output type { - case model::Port::PortType::boolean: - { + case model::Port::PortType::boolean: { _cachedOutput = Vector(outputSize); auto fn = reinterpret_cast(functionPointer); computeFunction = [this, fn](void* context, const InputType* input) { @@ -281,8 +280,7 @@ namespace model } break; - case model::Port::PortType::integer: - { + case model::Port::PortType::integer: { _cachedOutput = Vector(outputSize); auto fn = reinterpret_cast(functionPointer); computeFunction = [this, fn](void* context, const InputType* input) { @@ -291,8 +289,7 @@ namespace model } break; - case model::Port::PortType::bigInt: - { + case model::Port::PortType::bigInt: { _cachedOutput = Vector(outputSize); auto fn = reinterpret_cast(functionPointer); computeFunction = [this, fn](void* context, const InputType* input) { @@ -301,8 +298,7 @@ namespace model } break; - case model::Port::PortType::smallReal: - { + case model::Port::PortType::smallReal: { _cachedOutput = Vector(outputSize); auto fn = reinterpret_cast(functionPointer); computeFunction = [this, fn](void* context, const InputType* input) { @@ -311,8 +307,7 @@ namespace model } break; - case model::Port::PortType::real: - { + case model::Port::PortType::real: { _cachedOutput = Vector(outputSize); auto fn = reinterpret_cast(functionPointer); computeFunction = [this, fn](void* context, const InputType* input) { @@ -327,10 +322,10 @@ namespace model _computeInputFunction = computeFunction; functionPointer = _executionEngine->ResolveFunctionAddress(_functionName + "_dispatch"); - _computeDispatchFunction = reinterpret_cast(functionPointer); + _computeDispatchFunction = reinterpret_cast(functionPointer); functionPointer = _executionEngine->ResolveFunctionAddress(_moduleName + "_Reset"); - _resetFunction = reinterpret_cast(functionPointer); + _resetFunction = reinterpret_cast(functionPointer); } } diff --git a/libraries/model/include/Map.h b/libraries/model/include/Map.h index 16927d5ac..2c1218500 100644 --- a/libraries/model/include/Map.h +++ b/libraries/model/include/Map.h @@ -713,7 +713,6 @@ namespace model return static_cast&>(GetOutput(outputName)); } - } // namespace model } // namespace ell diff --git a/libraries/model/include/MapCompiler.h b/libraries/model/include/MapCompiler.h index 93a0baab5..0690b7bfc 100644 --- a/libraries/model/include/MapCompiler.h +++ b/libraries/model/include/MapCompiler.h @@ -140,7 +140,6 @@ namespace model virtual emitters::ModuleEmitter* GetModuleEmitter() = 0; private: - friend class CompilableNode; void CompileNodes(Model& model); diff --git a/libraries/model/include/Node.h b/libraries/model/include/Node.h index d9f63193c..e5bface62 100644 --- a/libraries/model/include/Node.h +++ b/libraries/model/include/Node.h @@ -160,7 +160,11 @@ namespace model static std::string GetTypeName() { return "Node"; } /// Indicates if this node is able to compile itself to code. - virtual bool IsCompilable(const MapCompiler* compiler) const { UNUSED(compiler); return false; } + virtual bool IsCompilable(const MapCompiler* compiler) const + { + UNUSED(compiler); + return false; + } /// Print a human-readable representation of the Node. /// diff --git a/libraries/model/optimizer/test/src/Environment.cpp b/libraries/model/optimizer/test/src/Environment.cpp index 87973e4e7..0e20a57f2 100644 --- a/libraries/model/optimizer/test/src/Environment.cpp +++ b/libraries/model/optimizer/test/src/Environment.cpp @@ -14,29 +14,31 @@ namespace ell { namespace model { -namespace optimizer -{ - Environment::Environment() : _targetDevice(std::nullopt) + namespace optimizer { - } + Environment::Environment() : + _targetDevice(std::nullopt) + { + } - Environment::Environment(const emitters::TargetDevice& targetDevice) : _targetDevice(targetDevice) - { - } + Environment::Environment(const emitters::TargetDevice& targetDevice) : + _targetDevice(targetDevice) + { + } - bool Environment::HasTargetDevice() const - { - return static_cast(_targetDevice); - } + bool Environment::HasTargetDevice() const + { + return static_cast(_targetDevice); + } - const emitters::TargetDevice& Environment::GetTargetDevice() const - { - if (!HasTargetDevice()) + const emitters::TargetDevice& Environment::GetTargetDevice() const { - throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument, "Environment doesn't have a target device"); + if (!HasTargetDevice()) + { + throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument, "Environment doesn't have a target device"); + } + return _targetDevice.value(); } - return _targetDevice.value(); - } -} -} -} + } // namespace optimizer +} // namespace model +} // namespace ell diff --git a/libraries/model/optimizer/test/src/EnvironmentTest.cpp b/libraries/model/optimizer/test/src/EnvironmentTest.cpp index 8fe6a75ca..a076a4cf8 100644 --- a/libraries/model/optimizer/test/src/EnvironmentTest.cpp +++ b/libraries/model/optimizer/test/src/EnvironmentTest.cpp @@ -26,12 +26,11 @@ void TestEnvironment() { // Creating an instance of `IRModuleEmitter` will initialize LLVM so we can retrieve the host device emitters::CompilerOptions options; - emitters::IRModuleEmitter module("testModule", options); - + emitters::IRModuleEmitter module("testModule", options); + Environment emptyEnv; ProcessTest("Testing default environment", !emptyEnv.HasTargetDevice()); - + Environment hostEnv(emitters::GetTargetDevice("host")); ProcessTest("Testing host environment", hostEnv.HasTargetDevice()); } - diff --git a/libraries/model/optimizer/test/src/ObjectiveTest.cpp b/libraries/model/optimizer/test/src/ObjectiveTest.cpp index 0e584456c..b214a25a3 100644 --- a/libraries/model/optimizer/test/src/ObjectiveTest.cpp +++ b/libraries/model/optimizer/test/src/ObjectiveTest.cpp @@ -7,8 +7,8 @@ //////////////////////////////////////////////////////////////////////////////////////////////////// #include "ObjectiveTest.h" -#include "OptimizerTestUtil.h" #include "ExampleObjectives.h" +#include "OptimizerTestUtil.h" #include diff --git a/libraries/model/src/CompilableCodeNode.cpp b/libraries/model/src/CompilableCodeNode.cpp index 3f052c4be..434192fb7 100644 --- a/libraries/model/src/CompilableCodeNode.cpp +++ b/libraries/model/src/CompilableCodeNode.cpp @@ -234,7 +234,8 @@ namespace model const auto numInputs = inputs.size(); // we just need a binary predicate to map over the two vectors, so the accumulator functor is basically a no-op - (void)std::inner_product(args.begin() + numInputs, args.end(), outputs.begin(), 0, [](int, int) { return 0; }, ValueToPort); + (void)std::inner_product( + args.begin() + numInputs, args.end(), outputs.begin(), 0, [](int, int) { return 0; }, ValueToPort); } } // namespace model diff --git a/libraries/model/src/CompilableNode.cpp b/libraries/model/src/CompilableNode.cpp index 4300bc6b2..cd90cbb68 100644 --- a/libraries/model/src/CompilableNode.cpp +++ b/libraries/model/src/CompilableNode.cpp @@ -43,7 +43,7 @@ namespace model if (ShouldCompileInline() || compiler.GetMapCompilerOptions(*this).inlineNodes) { Log() << "Inlining node " << DiagnosticString(*this) << " into function " << enclosingFunction.GetFunctionName() << ", currently in block " << enclosingFunction.GetCurrentBlock()->getName().str() << EOL; - + irCompiler->NewNodeRegion(*this); auto oldOptions = enclosingFunction.GetCompilerOptions(); enclosingFunction.SetCompilerOptions(compiler.GetMapCompilerOptions(*this).compilerSettings); diff --git a/libraries/model/src/CompiledMap.cpp b/libraries/model/src/CompiledMap.cpp index 9d7d433b2..baec4a2c0 100644 --- a/libraries/model/src/CompiledMap.cpp +++ b/libraries/model/src/CompiledMap.cpp @@ -18,8 +18,8 @@ namespace model CompiledMap::CompiledMap(model::Map map, std::string functionName, const MapCompilerOptions& options) : Map(std::move(map)), _functionName(functionName), - _compilerOptions(options) {} - + _compilerOptions(options) + {} template <> CallbackRegistry& CompiledMap::GetCallbackRegistry() const diff --git a/libraries/model/src/IRCompiledMap.cpp b/libraries/model/src/IRCompiledMap.cpp index 684bd9168..0b8bd9aac 100644 --- a/libraries/model/src/IRCompiledMap.cpp +++ b/libraries/model/src/IRCompiledMap.cpp @@ -22,17 +22,17 @@ #include -#include #include +#include extern "C" { - // This is implementing the Source and SinkNode callback thunks which are used to provide support for std::function callbacks. - bool SourceCallbackThunk_float(int index, void* context, float* buffer, int size); - bool SourceCallbackThunk_double(int index, void* context, double* buffer, int size); - bool SourceCallbackThunk_int(int index, void* context, int* buffer, int size); - void SinkCallbackThunk_float(int index, void* context, float* buffer, int size); - void SinkCallbackThunk_double(int index, void* context, double* buffer, int size); - void SinkCallbackThunk_int(int index, void* context, int* buffer, int size); +// This is implementing the Source and SinkNode callback thunks which are used to provide support for std::function callbacks. +bool SourceCallbackThunk_float(int index, void* context, float* buffer, int size); +bool SourceCallbackThunk_double(int index, void* context, double* buffer, int size); +bool SourceCallbackThunk_int(int index, void* context, int* buffer, int size); +void SinkCallbackThunk_float(int index, void* context, float* buffer, int size); +void SinkCallbackThunk_double(int index, void* context, double* buffer, int size); +void SinkCallbackThunk_int(int index, void* context, int* buffer, int size); } namespace ell diff --git a/libraries/model/src/IRMapCompiler.cpp b/libraries/model/src/IRMapCompiler.cpp index cbea4752e..43b5449fd 100644 --- a/libraries/model/src/IRMapCompiler.cpp +++ b/libraries/model/src/IRMapCompiler.cpp @@ -766,7 +766,7 @@ namespace model void IRMapCompiler::NewNodeRegion(const Node& node) { auto& currentFunction = GetModule().GetCurrentFunction(); - + auto currentBlock = currentFunction.GetCurrentBlock(); auto termInst = currentBlock->getTerminator(); @@ -774,7 +774,7 @@ namespace model { Log() << "Prev block had no terminator!" << EOL; } - + // Create a new block auto pBlock = currentFunction.BeginBlock(IdString(node), true); assert(pBlock != nullptr && "Got null new block"); diff --git a/libraries/model/src/Map.cpp b/libraries/model/src/Map.cpp index 0b491e9be..21fb8d340 100644 --- a/libraries/model/src/Map.cpp +++ b/libraries/model/src/Map.cpp @@ -207,30 +207,25 @@ namespace model void* ptr = inputs[i]; switch (GetInputType(i)) { - case ell::model::Port::PortType::smallReal: - { + case ell::model::Port::PortType::smallReal: { float* floatData = reinterpret_cast(ptr); SetInputValue(i, std::vector(floatData, floatData + size)); } break; - case ell::model::Port::PortType::real: - { + case ell::model::Port::PortType::real: { double* doubleData = reinterpret_cast(ptr); SetInputValue(i, std::vector(doubleData, doubleData + size)); } break; - case ell::model::Port::PortType::integer: - { + case ell::model::Port::PortType::integer: { int* intData = reinterpret_cast(ptr); SetInputValue(i, std::vector(intData, intData + size)); } - case ell::model::Port::PortType::bigInt: - { + case ell::model::Port::PortType::bigInt: { int64_t* int64Data = reinterpret_cast(ptr); SetInputValue(i, std::vector(int64Data, int64Data + size)); } - case ell::model::Port::PortType::boolean: - { + case ell::model::Port::PortType::boolean: { bool* boolData = reinterpret_cast(ptr); SetInputValue(i, std::vector(boolData, boolData + size)); } @@ -249,32 +244,27 @@ namespace model void* ptr = outputs[i]; switch (GetOutputType(i)) { - case ell::model::Port::PortType::smallReal: - { + case ell::model::Port::PortType::smallReal: { const std::vector& values = _outputs[i]->GetOutput(); ::memcpy(ptr, values.data(), size * sizeof(float)); } break; - case ell::model::Port::PortType::real: - { + case ell::model::Port::PortType::real: { const std::vector& values = _outputs[i]->GetOutput(); ::memcpy(ptr, values.data(), size * sizeof(double)); } break; - case ell::model::Port::PortType::integer: - { + case ell::model::Port::PortType::integer: { const std::vector& values = _outputs[i]->GetOutput(); ::memcpy(ptr, values.data(), size * sizeof(int)); } break; - case ell::model::Port::PortType::bigInt: - { + case ell::model::Port::PortType::bigInt: { const std::vector& values = _outputs[i]->GetOutput(); ::memcpy(ptr, values.data(), size * sizeof(int64_t)); } break; - case ell::model::Port::PortType::boolean: - { + case ell::model::Port::PortType::boolean: { bool* boolData = reinterpret_cast(ptr); const std::vector& values = _outputs[i]->GetOutput(); // std::vector has no data() member. diff --git a/libraries/model/src/ModelTransformer.cpp b/libraries/model/src/ModelTransformer.cpp index b7cfc7fd2..9d0ad05b4 100644 --- a/libraries/model/src/ModelTransformer.cpp +++ b/libraries/model/src/ModelTransformer.cpp @@ -13,8 +13,8 @@ #include "RefineTransformation.h" #include -#include #include +#include #include @@ -507,32 +507,27 @@ namespace model auto layout = outputPort->GetMemoryLayout().GetExtent(); switch (outputPort->GetType()) { - case PortType::boolean: - { + case PortType::boolean: { auto outputNode = AddNode>(outputPort->Size()); MapNodeOutput(*static_cast*>(outputPort), outputNode->output); break; } - case PortType::integer: - { + case PortType::integer: { auto outputNode = AddNode>(outputPort->Size()); MapNodeOutput(*static_cast*>(outputPort), outputNode->output); break; } - case PortType::bigInt: - { + case PortType::bigInt: { auto outputNode = AddNode>(outputPort->Size()); MapNodeOutput(*static_cast*>(outputPort), outputNode->output); break; } - case PortType::smallReal: - { + case PortType::smallReal: { auto outputNode = AddNode>(outputPort->Size()); MapNodeOutput(*static_cast*>(outputPort), outputNode->output); break; } - case PortType::real: - { + case PortType::real: { auto outputNode = AddNode>(outputPort->Size()); MapNodeOutput(*static_cast*>(outputPort), outputNode->output); break; diff --git a/libraries/model/src/Port.cpp b/libraries/model/src/Port.cpp index afa9aba0f..f41ed6900 100644 --- a/libraries/model/src/Port.cpp +++ b/libraries/model/src/Port.cpp @@ -9,8 +9,8 @@ #include "Port.h" #include "Node.h" -#include #include +#include #include diff --git a/libraries/model/src/PortElements.cpp b/libraries/model/src/PortElements.cpp index e7f8ad7df..75bed2f9e 100644 --- a/libraries/model/src/PortElements.cpp +++ b/libraries/model/src/PortElements.cpp @@ -281,25 +281,29 @@ namespace model _referencedPort(&port), _startIndex(0), _sliceSize(port.Size()), - _isFixedSize(false) {} + _isFixedSize(false) + {} PortRange::PortRange(const OutputPortBase& port, size_t index) : _referencedPort(&port), _startIndex(index), _sliceSize(1), - _isFixedSize(true) {} + _isFixedSize(true) + {} PortRange::PortRange(const OutputPortBase& port, size_t startIndex, size_t numValues) : _referencedPort(&port), _startIndex(startIndex), _sliceSize(numValues), - _isFixedSize(true) {} + _isFixedSize(true) + {} PortRange::PortRange(const PortElementBase& element) : _referencedPort(element.ReferencedPort()), _startIndex(element.GetIndex()), _sliceSize(1), - _isFixedSize(true) {} + _isFixedSize(true) + {} size_t PortRange::Size() const { @@ -380,7 +384,8 @@ namespace model // PortElementBase::PortElementBase(const OutputPortBase& port, size_t index) : _referencedPort(&port), - _index(index) {} + _index(index) + {} // // PortElementsBase diff --git a/libraries/model/test/include/CompilableNodesTest.h b/libraries/model/test/include/CompilableNodesTest.h index a464acbf3..45ab4f5ea 100644 --- a/libraries/model/test/include/CompilableNodesTest.h +++ b/libraries/model/test/include/CompilableNodesTest.h @@ -66,7 +66,7 @@ void TestMultipleOutputNodes(); void TestShapeFunctionGeneration(); void TestCompilableClockNode(); void TestCompilableFFTNode(); -template +template void TestBufferNode(); // diff --git a/libraries/model/test/src/CompilableCodeNode_test.cpp b/libraries/model/test/src/CompilableCodeNode_test.cpp index 7376ca293..5763ca313 100644 --- a/libraries/model/test/src/CompilableCodeNode_test.cpp +++ b/libraries/model/test/src/CompilableCodeNode_test.cpp @@ -70,7 +70,7 @@ namespace detail void Define(FunctionDeclaration& fn) override { (void)fn.Define([](Vector v1, Vector v2, Scalar s) { - s = Dot(v1, v2); + s = Dot(v1, v2); }); } diff --git a/libraries/model/test/src/CompilableNodesTest.cpp b/libraries/model/test/src/CompilableNodesTest.cpp index 13a2947a1..a69bc0424 100644 --- a/libraries/model/test/src/CompilableNodesTest.cpp +++ b/libraries/model/test/src/CompilableNodesTest.cpp @@ -810,8 +810,7 @@ void TestCompilableBinaryOperationNode() BinaryOperationType::divide, BinaryOperationType::modulo, BinaryOperationType::maximum, - BinaryOperationType::minimum - }) + BinaryOperationType::minimum }) { model::Model model; auto inputNode = model.AddNode>(3); diff --git a/libraries/model/test/src/CompilerTest.cpp b/libraries/model/test/src/CompilerTest.cpp index 764ea3efe..b18e083d0 100644 --- a/libraries/model/test/src/CompilerTest.cpp +++ b/libraries/model/test/src/CompilerTest.cpp @@ -143,7 +143,7 @@ void TestSimpleMap(bool optimize) auto compiledMap = compiler.Compile(map); testing::ProcessTest("Testing IsValid of original map", testing::IsEqual(compiledMap.IsValid(), true)); - std::vector input({ 4,5,6 }); + std::vector input({ 4, 5, 6 }); std::vector output({ 0, 0, 0 }); std::vector expected({ 12, 15, 18 }); @@ -197,28 +197,24 @@ void TestProtoNNPredictorMap() predictors::ProtoNNPredictor protonnPredictor(dim, projectedDim, numPrototypes, numLabels, gamma); // projectedDim * dim - auto W = protonnPredictor.GetProjectionMatrix() = - { + auto W = protonnPredictor.GetProjectionMatrix() = { #include "TestProtoNNPredictorMap_Projection.inc" - }; + }; // projectedDim * numPrototypes - auto B = protonnPredictor.GetPrototypes() = - { + auto B = protonnPredictor.GetPrototypes() = { #include "TestProtoNNPredictorMap_Prototypes.inc" - }; + }; // numLabels * numPrototypes - auto Z = protonnPredictor.GetLabelEmbeddings() = - { + auto Z = protonnPredictor.GetLabelEmbeddings() = { #include "TestProtoNNPredictorMap_LabelEmbeddings.inc" - }; + }; // MNIST training data features - std::vector> features = - { + std::vector> features = { #include "TestProtoNNPredictorMap_features.inc" - }; + }; std::vector> labels{ { 0, 0, 0, 0, 1, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 } }; @@ -402,16 +398,17 @@ void TestCompiledMapParallelClone() std::vector> futures; for (const auto& mapRef : compiledMaps) { - futures.push_back(std::async(std::launch::async, - [&](int index, model::IRCompiledMap* map) { - auto engine = utilities::GetRandomEngine("123"); - std::uniform_int_distribution dist(0, 500); - std::this_thread::sleep_for(std::chrono::milliseconds(dist(engine))); - VerifyMapOutput(*map, signal, expected, "Parallel map test"); - return true; - }, - mapRef.first, - mapRef.second.get())); + futures.push_back(std::async( + std::launch::async, + [&](int index, model::IRCompiledMap* map) { + auto engine = utilities::GetRandomEngine("123"); + std::uniform_int_distribution dist(0, 500); + std::this_thread::sleep_for(std::chrono::milliseconds(dist(engine))); + VerifyMapOutput(*map, signal, expected, "Parallel map test"); + return true; + }, + mapRef.first, + mapRef.second.get())); } // wait on futures @@ -852,15 +849,14 @@ void TestMultiSourceSinkMap(bool expanded, bool optimized) testing::ProcessTest("Testing GetCallbackFunctionNames", found); // Compare output - std::vector> signal = - { - { 0 }, - { interval * 1 + lagThreshold / 2 }, // within threshold - { interval * 2 }, // on time - { interval * 3 + lagThreshold }, // late - { interval * 4 + lagThreshold * 20 }, // really late - { interval * 5 } // on time - }; + std::vector> signal = { + { 0 }, + { interval * 1 + lagThreshold / 2 }, // within threshold + { interval * 2 }, // on time + { interval * 3 + lagThreshold }, // late + { interval * 4 + lagThreshold * 20 }, // really late + { interval * 5 } // on time + }; VerifyCompiledOutput(map, compiledMap, signal, " multi-sink and source map"); } diff --git a/libraries/model/test/src/Map_test.cpp b/libraries/model/test/src/Map_test.cpp index e94231606..6d0448225 100644 --- a/libraries/model/test/src/Map_test.cpp +++ b/libraries/model/test/src/Map_test.cpp @@ -193,15 +193,14 @@ void TestMapClockNode() auto map = model::Map(model, { { "clockInput", in } }, { { "sinkOutput", sink->output } }); TestMapSerialization(map); - std::vector> clockValues = - { - { 0 }, - { interval * 1 + lagThreshold / 2 }, // within threshold - { interval * 2 }, // on time - { interval * 3 + lagThreshold }, // late - { interval * 4 + lagThreshold * 20 }, // really late - { interval * 5 } // on time - }; + std::vector> clockValues = { + { 0 }, + { interval * 1 + lagThreshold / 2 }, // within threshold + { interval * 2 }, // on time + { interval * 3 + lagThreshold }, // late + { interval * 4 + lagThreshold * 20 }, // really late + { interval * 5 } // on time + }; for (const auto& input : clockValues) { diff --git a/libraries/model/test/src/ModelTransformerTest.cpp b/libraries/model/test/src/ModelTransformerTest.cpp index 1551a3b4d..bcfab1c1c 100644 --- a/libraries/model/test/src/ModelTransformerTest.cpp +++ b/libraries/model/test/src/ModelTransformerTest.cpp @@ -90,8 +90,7 @@ class ModifyFirstDebugNode }; // Tests -void -TestCopySubmodel() +void TestCopySubmodel() { // Tests the function: // diff --git a/libraries/model/test/src/main.cpp b/libraries/model/test/src/main.cpp index 4f5e6ab63..149532d9b 100644 --- a/libraries/model/test/src/main.cpp +++ b/libraries/model/test/src/main.cpp @@ -61,7 +61,7 @@ int main() // Metadata tests TestModelMetadata(); - + // ModelBuilder tests // Test passthrough to Model::AddNode diff --git a/libraries/model/test/src/model_compiler_test_main.cpp b/libraries/model/test/src/model_compiler_test_main.cpp index 8352df272..004fbbaeb 100644 --- a/libraries/model/test/src/model_compiler_test_main.cpp +++ b/libraries/model/test/src/model_compiler_test_main.cpp @@ -150,8 +150,8 @@ void TestIRCompiler() TestReorderDataCodeNode1(); TestReorderDataCodeNode2(); TestReorderDataCodeNode3(); - TestReorderDataCodeNode4(); - TestReceptiveFieldMatrixNode(1, true); // new version + TestReorderDataCodeNode4(); + TestReceptiveFieldMatrixNode(1, true); // new version TestReceptiveFieldMatrixNode(1, false); // old (slow) version TestReceptiveFieldMatrixNode(2, true); // new version // TestReceptiveFieldMatrixNode(2, false); // old (slow) version -- Fails @@ -270,8 +270,8 @@ void TestIRCompiler() TestConvolutionalLayerNode2(ConvolutionMethod::winograd, 1, 0); TestConvolutionalLayerNode3(ConvolutionMethod::winograd, 1, 0); - //BUGBUG: This test currently fails for Compute but passes for Compile. - //TestSpatialConvolutionNode(1, 0); + //BUGBUG: This test currently fails for Compute but passes for Compile. + //TestSpatialConvolutionNode(1, 0); TestFullyConnectedLayerNode(); // TestFullyConnectedLayerNode(0, 1); // Fully-connected layer nodes can't have padding (yet) diff --git a/libraries/nodes/include/BroadcastOperationNodes.h b/libraries/nodes/include/BroadcastOperationNodes.h index 29ec60002..983679376 100644 --- a/libraries/nodes/include/BroadcastOperationNodes.h +++ b/libraries/nodes/include/BroadcastOperationNodes.h @@ -445,15 +445,15 @@ namespace nodes value::Scalar BroadcastOperationNode::CallKernelFunction(FunctionType f, std::vector inputs, std::vector> indices) const { // TODO: if FunctionType was a function that took a vector of inputs, then we could dispense with this `if constexpr` block - if constexpr(std::is_same_v) + if constexpr (std::is_same_v) { return f(inputs[0](indices[0])); } - else if constexpr(std::is_same_v) + else if constexpr (std::is_same_v) { return f(inputs[0](indices[0]), inputs[1](indices[1])); } - else if constexpr(std::is_same_v) + else if constexpr (std::is_same_v) { return f(inputs[0](indices[0]), inputs[1](indices[1]), inputs[2](indices[2])); } diff --git a/libraries/nodes/include/BufferNode.h b/libraries/nodes/include/BufferNode.h index 9736bac46..85e0b6b72 100644 --- a/libraries/nodes/include/BufferNode.h +++ b/libraries/nodes/include/BufferNode.h @@ -8,7 +8,6 @@ #pragma once - #include #include #include @@ -30,8 +29,8 @@ namespace nodes { /// A node that combines one or more input buffers returning a larger window over that input. /// On each new input the buffer is shifted left by the size of that input. For example, if the input size - /// is 8 and the windowSize is 16 and the inputs are given in the sequence i1, i2, i3, i4 then the output - /// of the buffer node will be [0 i1], [i1 i2], [i2, i3], [i3 i4]. So if you think of the input as a + /// is 8 and the windowSize is 16 and the inputs are given in the sequence i1, i2, i3, i4 then the output + /// of the buffer node will be [0 i1], [i1 i2], [i2, i3], [i3 i4]. So if you think of the input as a /// series of values over time (like audio signal) then the BufferNode provides a sliding window over that /// input data. /// diff --git a/libraries/nodes/include/ConstantNode.h b/libraries/nodes/include/ConstantNode.h index b5b16746e..10f6c64b8 100644 --- a/libraries/nodes/include/ConstantNode.h +++ b/libraries/nodes/include/ConstantNode.h @@ -240,7 +240,7 @@ namespace nodes archiver["values"] >> _values; model::PortMemoryLayout layout; archiver["layout"] >> layout; - _output.SetMemoryLayout(layout); + _output.SetMemoryLayout(layout); } template diff --git a/libraries/nodes/include/FilterBankNode.h b/libraries/nodes/include/FilterBankNode.h index ab4a4c679..9bf3a7409 100644 --- a/libraries/nodes/include/FilterBankNode.h +++ b/libraries/nodes/include/FilterBankNode.h @@ -32,7 +32,7 @@ namespace nodes /// /// Base class for nodes that perform elementwise multiply between a set of triangular filters and the input frequency response. /// This can be useful as a way to sample different frequency bands in an FFT output to form a type of spectrogram. - /// Each value in the FilterBankNode output is the result of convolving the FFT output with a triangular filter, with some width, + /// Each value in the FilterBankNode output is the result of convolving the FFT output with a triangular filter, with some width, /// centered at some location on the FFT output. As an example, imagine we have a 10-element input, and a filter of width 6 centered /// over the 6th input: /// @@ -49,13 +49,13 @@ namespace nodes /// | / | \ | /// | / | \ | /// |---|---|-.-|-.-|-.-|-.-|-.-|-.-|---| - /// 0 1 2 3 4 5 6 7 8 9 + /// 0 1 2 3 4 5 6 7 8 9 /// /// then the result from this one filter would be the following (where I is the input vector): /// (I[2] * 0) + (I[3] * 0.333...) + (I[4] * 0.666) + (I[5] * 1) + (I[6] * 0.666) + (I[7] * 0.333) + (I[8] * 0) - /// + /// /// the idea then is the filters can overlap to create smooth samples of each band in the input, and the output then is sized to - /// the number of filters. The implementation is optimized on the assumption that each triangle is a relatively small slice of + /// the number of filters. The implementation is optimized on the assumption that each triangle is a relatively small slice of /// the input such that it is faster to compute each triangle than to do a dot products for each filter against the entire input. /// template diff --git a/libraries/nodes/include/LinearPredictorNode.h b/libraries/nodes/include/LinearPredictorNode.h index 8802bcbde..d85b00a72 100644 --- a/libraries/nodes/include/LinearPredictorNode.h +++ b/libraries/nodes/include/LinearPredictorNode.h @@ -87,7 +87,6 @@ namespace nodes LinearPredictorType _predictor; }; - /// Convenience function to add a linear predictor node. /// /// The fundamental type used by this predictor. @@ -221,7 +220,7 @@ namespace nodes auto node = model->AddNode>(input, predictor); return node->output; } - + template LinearPredictorNode* AddNodeToModelTransformer(const model::PortElements& input, const predictors::LinearPredictor& predictor, model::ModelTransformer& transformer) { diff --git a/libraries/nodes/include/NodeOperations.h b/libraries/nodes/include/NodeOperations.h index eed243bdd..28c716b26 100644 --- a/libraries/nodes/include/NodeOperations.h +++ b/libraries/nodes/include/NodeOperations.h @@ -33,7 +33,7 @@ namespace nodes min, max, sigmoid, - sign, + sign, sin, softmax, sqrt, diff --git a/libraries/nodes/include/ReorderDataCodeNode.h b/libraries/nodes/include/ReorderDataCodeNode.h index 37b3085e7..57e470f08 100644 --- a/libraries/nodes/include/ReorderDataCodeNode.h +++ b/libraries/nodes/include/ReorderDataCodeNode.h @@ -139,10 +139,10 @@ namespace nodes ValueType _paddingValue; - // This is used in the Define function as a workaround for passing in constant Scalar values - // to the kernel - int _kernel_size; -}; + // This is used in the Define function as a workaround for passing in constant Scalar values + // to the kernel + int _kernel_size; + }; /// Convenience function for adding a node to a model. /// @@ -259,7 +259,7 @@ namespace nodes _inputMemoryLayout(inputMemoryLayout), _outputMemoryLayout(_output.GetMemoryLayout()), _paddingValue(paddingValue), - _kernel_size(1) + _kernel_size(1) { if (inputMemoryLayout.NumDimensions() != outputMemoryLayout.NumDimensions()) { @@ -280,7 +280,7 @@ namespace nodes _inputMemoryLayout(_input.GetMemoryLayout()), _outputMemoryLayout(_output.GetMemoryLayout()), _paddingValue(0), - _kernel_size(1) + _kernel_size(1) { if (_inputMemoryLayout.NumDimensions() != order.NumDimensions()) { @@ -300,7 +300,7 @@ namespace nodes _inputMemoryLayout(_input.GetMemoryLayout()), _outputMemoryLayout(_output.GetMemoryLayout()), _paddingValue(paddingValue), - _kernel_size(1) + _kernel_size(1) { if (_inputMemoryLayout.NumDimensions() != outputMemoryLayout.NumDimensions()) { @@ -321,7 +321,7 @@ namespace nodes _inputMemoryLayout(inputMemoryLayout), _outputMemoryLayout(_output.GetMemoryLayout()), _paddingValue(paddingValue), - _kernel_size(1) + _kernel_size(1) { if (inputMemoryLayout.NumDimensions() != outputMemoryLayout.NumDimensions()) { diff --git a/libraries/nodes/include/UnaryOperationNode.h b/libraries/nodes/include/UnaryOperationNode.h index 43b103617..4e11d3f51 100644 --- a/libraries/nodes/include/UnaryOperationNode.h +++ b/libraries/nodes/include/UnaryOperationNode.h @@ -84,9 +84,9 @@ namespace nodes const model::OutputPort& UnaryOperation(const model::OutputPort& input, UnaryOperationType operation); /// @{ - /// Convenience functions for adding a node to a model. - /// The port to get the input data from - /// The output of the new node. + /// Convenience functions for adding a node to a model. + /// The port to get the input data from + /// The output of the new node. template = true> const model::OutputPort& Abs(const model::OutputPort& input); diff --git a/libraries/nodes/src/ActivationFunctions.cpp b/libraries/nodes/src/ActivationFunctions.cpp index dc9941744..6749e7c64 100644 --- a/libraries/nodes/src/ActivationFunctions.cpp +++ b/libraries/nodes/src/ActivationFunctions.cpp @@ -16,7 +16,6 @@ #include - namespace ell { namespace nodes @@ -34,13 +33,14 @@ namespace nodes constexpr auto bias = static_cast(0.5); auto lowBound = -bias / scale; auto highBound = (1 - bias) / scale; - return x < lowBound ? 0 : x > highBound ? 1 : (scale * x) + bias; + return x < lowBound ? 0 : x > highBound ? 1 + : (scale * x) + bias; } template emitters::LLVMValue HardSigmoidActivationFunction::Compile(emitters::IRFunctionEmitter& function, emitters::LLVMValue xValue) const { - emitters::IRLocalScalar x{ function,xValue }; + emitters::IRLocalScalar x{ function, xValue }; return Compile(x).value; } @@ -66,7 +66,8 @@ namespace nodes template ValueType HardTanhActivationFunction::Compute(ValueType x) const { - return x <= -1 ? -1 : x >= 1 ? 1 : x; + return x <= -1 ? -1 : x >= 1 ? 1 + : x; } template @@ -136,7 +137,7 @@ namespace nodes template emitters::LLVMValue SigmoidActivationFunction::Compile(emitters::IRFunctionEmitter& function, emitters::LLVMValue xValue) const { - emitters::IRLocalScalar x{ function,xValue }; + emitters::IRLocalScalar x{ function, xValue }; return Compile(x).value; } diff --git a/libraries/nodes/src/ActivationLayerNode.cpp b/libraries/nodes/src/ActivationLayerNode.cpp index e8e3fda54..fdd7ab8f3 100644 --- a/libraries/nodes/src/ActivationLayerNode.cpp +++ b/libraries/nodes/src/ActivationLayerNode.cpp @@ -7,18 +7,18 @@ //////////////////////////////////////////////////////////////////////////////////////////////////// #include "ActivationLayerNode.h" +#include "ActivationFunctions.h" #include "BinaryFunctionNode.h" #include "BroadcastFunctionNode.h" -#include "ActivationFunctions.h" #include "ConstantNode.h" +#include +#include #include #include #include #include -#include #include -#include namespace ell { diff --git a/libraries/nodes/src/BroadcastOperationNodes.cpp b/libraries/nodes/src/BroadcastOperationNodes.cpp index 4c283538d..5f480e9a2 100644 --- a/libraries/nodes/src/BroadcastOperationNodes.cpp +++ b/libraries/nodes/src/BroadcastOperationNodes.cpp @@ -27,7 +27,7 @@ namespace nodes std::vector resultSize(arguments[0].NumDimensions(), 1); for (const auto& layout : arguments) { - // inflatedLayout will hold the layout for this argument, expanded to the same number of + // inflatedLayout will hold the layout for this argument, expanded to the same number of // dimensions as the current result (if necessary). auto inflatedLayout = layout; int extraDimensions = layout.NumDimensions() - inflatedLayout.NumDimensions(); diff --git a/libraries/nodes/src/BufferNode.cpp b/libraries/nodes/src/BufferNode.cpp index 8a46501fb..02cd822a5 100644 --- a/libraries/nodes/src/BufferNode.cpp +++ b/libraries/nodes/src/BufferNode.cpp @@ -10,8 +10,8 @@ #include -#include #include +#include namespace ell { @@ -69,7 +69,7 @@ namespace nodes // copy the input data to the end of the buffer Vector tail = _buffer.SubVector(remainder, inputSize); - + // tail = input, should work, but it is currently broken. (bug 2208) For(data, [&tail, &input](Scalar index) { tail[index] = input[index]; @@ -83,7 +83,7 @@ namespace nodes template void BufferNode::DefineReset(FunctionDeclaration& fn) { - fn.Define([this] { + fn.Define([this] { // bugbug: how to emit a "memset" operation here instead? Scalar zero(static_cast(0)); For(_buffer, [this, zero](Scalar index) { diff --git a/libraries/nodes/src/ConvolutionalLayerNode.cpp b/libraries/nodes/src/ConvolutionalLayerNode.cpp index 26d288d62..0d05d136c 100644 --- a/libraries/nodes/src/ConvolutionalLayerNode.cpp +++ b/libraries/nodes/src/ConvolutionalLayerNode.cpp @@ -56,8 +56,7 @@ namespace nodes switch (convParams.method) { - case ConvolutionMethod::simple: - { + case ConvolutionMethod::simple: { if (isDepthwiseSeparable) { auto convNode = transformer.AddNode>(*newInput, this->GetLayer(), convOutputLayout); @@ -70,20 +69,17 @@ namespace nodes } } break; - case ConvolutionMethod::unrolled: - { + case ConvolutionMethod::unrolled: { auto convNode = transformer.AddNode>(*newInput, convInputLayout, convOutputLayout, weights, convParams.stride); convOutput = &convNode->output; } break; - case ConvolutionMethod::diagonal: - { + case ConvolutionMethod::diagonal: { auto convNode = transformer.AddNode>(*newInput, convInputLayout, convOutputLayout, weights, convParams.stride); convOutput = &convNode->output; } break; - case ConvolutionMethod::winograd: - { + case ConvolutionMethod::winograd: { auto convNode = transformer.AddNode>(*newInput, convInputLayout, convOutputLayout, weights, convParams.stride); convOutput = &convNode->output; } diff --git a/libraries/nodes/src/FFTNode.cpp b/libraries/nodes/src/FFTNode.cpp index 48f6bec15..fc4461916 100644 --- a/libraries/nodes/src/FFTNode.cpp +++ b/libraries/nodes/src/FFTNode.cpp @@ -479,9 +479,9 @@ namespace nodes #if (USE_STORED_TWIDDLE_FACTORS) auto w = function.LocalScalar(function.ValueAt(twiddleFactorsVar, k)); #else - // w = e^i(2*pi*k/N) - auto kValue = function.LocalScalar(function.CastValue(k)); - auto w = detail::ImaginaryExp(function.LocalScalar(pi / halfN) * kValue); + // w = e^i(2*pi*k/N) + auto kValue = function.LocalScalar(function.CastValue(k)); + auto w = detail::ImaginaryExp(function.LocalScalar(pi / halfN) * kValue); #endif auto e = function.LocalScalar(function.ValueAt(evens, k)); @@ -573,9 +573,9 @@ namespace nodes #if (USE_STORED_TWIDDLE_FACTORS) auto w = function.LocalScalar(function.ValueAt(twiddleFactorsVar, k)); #else - // w = e^i(2*pi*k/N) - auto kValue = function.LocalScalar(function.CastValue(k)); - auto w = detail::ImaginaryExp(function.LocalScalar(pi / halfN) * kValue); + // w = e^i(2*pi*k/N) + auto kValue = function.LocalScalar(function.CastValue(k)); + auto w = detail::ImaginaryExp(function.LocalScalar(pi / halfN) * kValue); #endif auto e = function.LocalScalar(function.ValueAt(evens, k)); diff --git a/libraries/nodes/src/FilterBankNode.cpp b/libraries/nodes/src/FilterBankNode.cpp index 9fd5c08ec..c0e4c964a 100644 --- a/libraries/nodes/src/FilterBankNode.cpp +++ b/libraries/nodes/src/FilterBankNode.cpp @@ -73,7 +73,7 @@ namespace nodes auto endVar = module.ConstantArray("filterEnd_"s + GetInternalStateIdentifier(), endBins); auto offset = function.LocalScalar(_filters.GetOffset()); - + // Get port variables emitters::LLVMValue pInput = compiler.EnsurePortEmitted(input); emitters::LLVMValue pOutput = compiler.EnsurePortEmitted(output); diff --git a/libraries/nodes/src/MatrixMatrixMultiplyCodeNode.cpp b/libraries/nodes/src/MatrixMatrixMultiplyCodeNode.cpp index bfe311f55..ba88bcd1b 100644 --- a/libraries/nodes/src/MatrixMatrixMultiplyCodeNode.cpp +++ b/libraries/nodes/src/MatrixMatrixMultiplyCodeNode.cpp @@ -24,9 +24,9 @@ #include #include +#include #include #include -#include //using namespace ell::utilities; using namespace ell::value; @@ -209,9 +209,9 @@ namespace nodes loop.AddKernel(kernel, loopnests::LoopFragmentType::body); loop.SetLoopOrder({ m, k, n }); - auto outputC = matC.GetValue(); - outputC.SetLayout({ { (int)matC.Size() } }); - // ell::DebugPrintVector(outputC); + auto outputC = matC.GetValue(); + outputC.SetLayout({ { (int)matC.Size() } }); + // ell::DebugPrintVector(outputC); loopnests::CodeGenerator generator; generator.Run(loop); } @@ -251,7 +251,7 @@ namespace nodes const int InnerDimension = (int)(A.Columns()); const int kUnroll = 4; int columnBlock = std::min(64, OutputColumns); - int innerDimensionBlock = std::min(256, InnerDimension); + int innerDimensionBlock = std::min(256, InnerDimension); // Declare indexes loopnests::Index i("i"), j("j"), k("k"); @@ -265,7 +265,7 @@ namespace nodes C_(i_, j_) += B_(k_, j_) * A_(i_, k_); }); auto& schedule = nest.GetSchedule(); - + auto topLevelJ = j; auto topLevelK = k; @@ -280,24 +280,24 @@ namespace nodes // Set the order schedule.SetOrder({ jCache, kCache, iKernelOuter, jKernelOuter2, kBlock, k, i, jKernelOuter, j }); - // Set up caching + // Set up caching if ((OutputColumns > NumColumnsInKernel) && ((OutputColumns % NumColumnsInKernel) == 0)) { auto extraCacheBParams = std::make_tuple(NumColumnsInKernel, jKernelOuter2, BoundaryConditionHandling::ZeroPadding); schedule.template Cache(B, - { topLevelK, topLevelJ }, - { innerDimensionBlock, columnBlock }, - { kCache, jCache }, - std::nullopt, // Order isn't used by BLASTCopy - extraCacheBParams); + { topLevelK, topLevelJ }, + { innerDimensionBlock, columnBlock }, + { kCache, jCache }, + std::nullopt, // Order isn't used by BLASTCopy + extraCacheBParams); } auto extraZeroInputReduceOutputParams = std::make_tuple(vectorSize); schedule.template Cache(C, - { iKernelOuter, jKernelOuter2 }, - { NumRowsInKernel, NumColumnsInKernel }, - { iKernelOuter, jKernelOuter2 }, - utilities::RowMajorMatrixOrder, - extraZeroInputReduceOutputParams); + { iKernelOuter, jKernelOuter2 }, + { NumRowsInKernel, NumColumnsInKernel }, + { iKernelOuter, jKernelOuter2 }, + utilities::RowMajorMatrixOrder, + extraZeroInputReduceOutputParams); // Set unrolling schedule.Unroll(jKernelOuter); @@ -320,83 +320,81 @@ namespace nodes template void MatrixMatrixMultiplyCodeNode::ParallelizeGemmCol(Matrix A, Matrix B, Matrix C, int numThreads) - { - const int columns = B.Columns() / numThreads; + { + const int columns = B.Columns() / numThreads; const int col_spill = B.Columns() % numThreads; Parallelize( numThreads, std::tuple{ A, B, C }, - [=](value::Scalar id, value::Matrix A, value::Matrix B, value::Matrix C) - { - value::Scalar colStart = id * value::Scalar{columns}; + [=](value::Scalar id, value::Matrix A, value::Matrix B, value::Matrix C) { + value::Scalar colStart = id * value::Scalar{ columns }; int thread_seq = 0; EmitterContext::IfContext IfCxt = If(id == thread_seq, - [&] { - GemmFn( - A, - B.SubMatrix(value::Scalar{0}, colStart, (int)B.Rows(), columns), - C.SubMatrix(value::Scalar{0}, colStart, (int)C.Rows(), columns), - thread_seq); - }); - + [&] { + GemmFn( + A, + B.SubMatrix(value::Scalar{ 0 }, colStart, (int)B.Rows(), columns), + C.SubMatrix(value::Scalar{ 0 }, colStart, (int)C.Rows(), columns), + thread_seq); + }); + thread_seq++; - for(int i = thread_seq; i < numThreads; i++) + for (int i = thread_seq; i < numThreads; i++) { IfCxt.ElseIf(id == i, - [&] { - int actualColumns = i==(numThreads-1) ? columns + col_spill : columns; - - GemmFn( - A, - B.SubMatrix(value::Scalar{0}, colStart, (int)B.Rows(), actualColumns), - C.SubMatrix(value::Scalar{0}, colStart, (int)C.Rows(), actualColumns), - i); - }); + [&] { + int actualColumns = i == (numThreads - 1) ? columns + col_spill : columns; + + GemmFn( + A, + B.SubMatrix(value::Scalar{ 0 }, colStart, (int)B.Rows(), actualColumns), + C.SubMatrix(value::Scalar{ 0 }, colStart, (int)C.Rows(), actualColumns), + i); + }); } - }); + }); } template void MatrixMatrixMultiplyCodeNode::ParallelizeGemmRow(Matrix A, Matrix B, Matrix C, int numThreads) - { + { const int rows = A.Rows() / numThreads; const int row_spill = A.Rows() % numThreads; Parallelize( numThreads, std::tuple{ A, B, C }, - [=](value::Scalar id, value::Matrix A, value::Matrix B, value::Matrix C) - { - value::Scalar rowStart = id * value::Scalar{rows}; + [=](value::Scalar id, value::Matrix A, value::Matrix B, value::Matrix C) { + value::Scalar rowStart = id * value::Scalar{ rows }; int thread_seq = 0; EmitterContext::IfContext IfCxt = If(id == thread_seq, - [&] { - GemmFn( - A.SubMatrix(rowStart, value::Scalar{0}, rows, (int)A.Columns()), - B, - C.SubMatrix(rowStart, value::Scalar{0}, rows, (int)C.Columns()), - thread_seq); - }); - + [&] { + GemmFn( + A.SubMatrix(rowStart, value::Scalar{ 0 }, rows, (int)A.Columns()), + B, + C.SubMatrix(rowStart, value::Scalar{ 0 }, rows, (int)C.Columns()), + thread_seq); + }); + thread_seq++; - for(int i = thread_seq; i < numThreads; i++) + for (int i = thread_seq; i < numThreads; i++) { IfCxt.ElseIf(id == i, - [&] { - int actualRows = i==(numThreads-1) ? rows + row_spill : rows; - GemmFn( - A.SubMatrix(rowStart, value::Scalar{0}, actualRows, (int)A.Columns()), - B, - C.SubMatrix(rowStart, value::Scalar{0}, actualRows, (int)C.Columns()), - i); - }); + [&] { + int actualRows = i == (numThreads - 1) ? rows + row_spill : rows; + GemmFn( + A.SubMatrix(rowStart, value::Scalar{ 0 }, actualRows, (int)A.Columns()), + B, + C.SubMatrix(rowStart, value::Scalar{ 0 }, actualRows, (int)C.Columns()), + i); + }); } - }); + }); } template @@ -408,7 +406,7 @@ namespace nodes const size_t maxThreads = 4; size_t numThreads = maxThreads; - if (computationSize < double(minThreadLoad * maxThreads)) + if (computationSize < double(minThreadLoad * maxThreads)) { numThreads = std::min(int(computationSize / double(minThreadLoad)) + 1, int(maxThreads)); } @@ -440,12 +438,12 @@ namespace nodes auto tempC = valueC; if (_transposeOutput) { - tempC.SetLayout(utilities::MemoryLayout({ _n, _m }, utilities::DimensionOrder{1, 0})); + tempC.SetLayout(utilities::MemoryLayout({ _n, _m }, utilities::DimensionOrder{ 1, 0 })); } else { tempC.SetLayout(utilities::MemoryLayout({ _m, _n })); - } + } auto matA = value::Matrix(tempA); auto matB = value::Matrix(tempB); diff --git a/libraries/nodes/src/MatrixMatrixMultiplyNode.cpp b/libraries/nodes/src/MatrixMatrixMultiplyNode.cpp index 421758158..38a1c3c0a 100644 --- a/libraries/nodes/src/MatrixMatrixMultiplyNode.cpp +++ b/libraries/nodes/src/MatrixMatrixMultiplyNode.cpp @@ -333,16 +333,12 @@ namespace nodes // Explicitly instantiate versions template class MatrixMatrixMultiplyNode; - template - const model::OutputPort& MatrixMatrixMultiply(const model::OutputPort& input1, const model::OutputPort& input2); - template - const model::OutputPort& MatrixMatrixMultiply(const model::OutputPort& input1, const model::OutputPort& input2, const model::PortMemoryLayout& outputMemoryLayout); + template const model::OutputPort& MatrixMatrixMultiply(const model::OutputPort& input1, const model::OutputPort& input2); + template const model::OutputPort& MatrixMatrixMultiply(const model::OutputPort& input1, const model::OutputPort& input2, const model::PortMemoryLayout& outputMemoryLayout); template class MatrixMatrixMultiplyNode; - template - const model::OutputPort& MatrixMatrixMultiply(const model::OutputPort& input1, const model::OutputPort& input2); - template - const model::OutputPort& MatrixMatrixMultiply(const model::OutputPort& input1, const model::OutputPort& input2, const model::PortMemoryLayout& outputMemoryLayout); + template const model::OutputPort& MatrixMatrixMultiply(const model::OutputPort& input1, const model::OutputPort& input2); + template const model::OutputPort& MatrixMatrixMultiply(const model::OutputPort& input1, const model::OutputPort& input2, const model::PortMemoryLayout& outputMemoryLayout); } // namespace nodes } // namespace ell diff --git a/libraries/nodes/src/MatrixVectorMultiplyNode.cpp b/libraries/nodes/src/MatrixVectorMultiplyNode.cpp index 08542818f..6e03fe963 100644 --- a/libraries/nodes/src/MatrixVectorMultiplyNode.cpp +++ b/libraries/nodes/src/MatrixVectorMultiplyNode.cpp @@ -134,11 +134,9 @@ namespace nodes template class MatrixVectorMultiplyNode; template class MatrixVectorMultiplyNode; - template - const model::OutputPort& MatrixVectorMultiply(const model::OutputPort& inputMatrix, size_t m, size_t n, size_t matrixStride, const model::OutputPort& inputVector); + template const model::OutputPort& MatrixVectorMultiply(const model::OutputPort& inputMatrix, size_t m, size_t n, size_t matrixStride, const model::OutputPort& inputVector); - template - const model::OutputPort& MatrixVectorMultiply(const model::OutputPort& inputMatrix, size_t m, size_t n, size_t matrixStride, const model::OutputPort& inputVector); + template const model::OutputPort& MatrixVectorMultiply(const model::OutputPort& inputMatrix, size_t m, size_t n, size_t matrixStride, const model::OutputPort& inputVector); } // namespace nodes } // namespace ell diff --git a/libraries/nodes/src/RegionDetectionLayerNode.cpp b/libraries/nodes/src/RegionDetectionLayerNode.cpp index 68720d5d2..a3dd4d61f 100644 --- a/libraries/nodes/src/RegionDetectionLayerNode.cpp +++ b/libraries/nodes/src/RegionDetectionLayerNode.cpp @@ -7,8 +7,8 @@ /////////////////////////////////////////////////////////////////////////////// #include "RegionDetectionLayerNode.h" -#include "ActivationLayerNode.h" #include "ActivationFunctions.h" +#include "ActivationLayerNode.h" #include diff --git a/libraries/nodes/src/ScalingNode.cpp b/libraries/nodes/src/ScalingNode.cpp index 1b9910ae0..1dc2c4960 100644 --- a/libraries/nodes/src/ScalingNode.cpp +++ b/libraries/nodes/src/ScalingNode.cpp @@ -43,8 +43,8 @@ namespace nodes fn.Define([this](const Value data, Value result) { // flatten the MemoryLayout so we can accept any shaped input data and produce any shape result. Vector input = ToVector(data); - Vector output = ToVector(result); - + Vector output = ToVector(result); + For(input, [&input, &output, this](Scalar index) { output[index] = _scale * input[index]; }); diff --git a/libraries/nodes/src/UnrolledConvolutionNode.cpp b/libraries/nodes/src/UnrolledConvolutionNode.cpp index d02495f86..6ba95741c 100644 --- a/libraries/nodes/src/UnrolledConvolutionNode.cpp +++ b/libraries/nodes/src/UnrolledConvolutionNode.cpp @@ -8,8 +8,8 @@ #include "UnrolledConvolutionNode.h" #include "ConstantNode.h" -#include "MatrixMatrixMultiplyNode.h" #include "MatrixMatrixMultiplyCodeNode.h" +#include "MatrixMatrixMultiplyNode.h" #include "ReceptiveFieldMatrixNode.h" #include "ReorderDataCodeNode.h" #include @@ -96,16 +96,16 @@ namespace nodes bool UnrolledConvolutionNode::IsELLCodeTarget(model::ModelTransformer& transformer) const { auto compiler = dynamic_cast(transformer.GetContext().GetCompiler()); - if(compiler != nullptr) + if (compiler != nullptr) { auto device_name = compiler->GetCompilerOptions().targetDevice.deviceName; bool skip_ELLCode = compiler->GetCompilerOptions().skip_ellcode; if (device_name.compare("pi3") == 0 && !skip_ELLCode) { - return true; + return true; } } - + return false; } @@ -175,7 +175,7 @@ namespace nodes if (dataOrder == rcdOrder) // don't reorder input -- use old method { auto receptiveFieldMatrixNode = transformer.AddNode>(newInput, inputLayout, filterSize, _stride, inputPadding, dataOrder, outputImageWidth, outputImageHeight); - if(isELLCodeTarget) + if (isELLCodeTarget) { auto matrixMultNode = transformer.AddNode>(weights, m, n, k, lda, false, receptiveFieldMatrixNode->output, ldb, false, ldc, true); if (outputPadding != 0) @@ -207,7 +207,6 @@ namespace nodes transformer.MapNodeOutput(this->output, matrixMultNode->output); } } - } else // reorder input to be channels x rows x columns (drc) (then we can use the 'new' receptive field matrix generation) { @@ -219,7 +218,7 @@ namespace nodes const auto& reorderedInput = ReorderDataWithCodeNode(newInput, inputLayout, transposedInputLayout); auto receptiveFieldMatrixNode = transformer.AddNode>(reorderedInput, reorderedInput.GetMemoryLayout(), _filterSize, _stride, inputPadding, dataOrder, outputImageWidth, outputImageHeight); - if(isELLCodeTarget) + if (isELLCodeTarget) { auto matrixMultNode = transformer.AddNode>(weights, m, n, k, lda, false, receptiveFieldMatrixNode->output, ldb, false, ldc, true); if (outputPadding != 0) @@ -237,7 +236,7 @@ namespace nodes } else { - auto matrixMultNode = transformer.AddNode>(weights, m, n, k, lda, false, receptiveFieldMatrixNode->output, ldb, false, ldc, true); + auto matrixMultNode = transformer.AddNode>(weights, m, n, k, lda, false, receptiveFieldMatrixNode->output, ldb, false, ldc, true); if (outputPadding != 0) { // Add padding @@ -250,7 +249,7 @@ namespace nodes { transformer.MapNodeOutput(this->output, matrixMultNode->output); } - } + } } return true; } diff --git a/libraries/nodes/src/WinogradConvolutionNode.cpp b/libraries/nodes/src/WinogradConvolutionNode.cpp index 189ff6147..f2e746da4 100644 --- a/libraries/nodes/src/WinogradConvolutionNode.cpp +++ b/libraries/nodes/src/WinogradConvolutionNode.cpp @@ -1162,15 +1162,13 @@ namespace nodes model::MemoryShape weightsShape; switch (_order) { - case FilterOrder::tilesFirst: - { + case FilterOrder::tilesFirst: { // 'tilesFirst': (windowRows * windowColumns) x (numFilters) x (numChannels) auto numFilterChannels = static_cast(_filterWeights.NumChannels()); weightsShape = model::MemoryShape({ windowSize, windowSize, numFilters, numFilterChannels }); } break; - case FilterOrder::filtersFirst: - { + case FilterOrder::filtersFirst: { // 'filtersFirst': (numFilters) x (numChannels) x (windowRows * windowColumns) auto numFilterChannels = static_cast(_filterWeights.NumColumns()); weightsShape = model::MemoryShape({ numFilters, numFilterChannels, windowSize, windowSize }); diff --git a/libraries/nodes/test/include/DSPCodeNodesTests.h b/libraries/nodes/test/include/DSPCodeNodesTests.h index 421ff40a8..41f2ef505 100644 --- a/libraries/nodes/test/include/DSPCodeNodesTests.h +++ b/libraries/nodes/test/include/DSPCodeNodesTests.h @@ -11,4 +11,3 @@ #include void TestDSPCodeNodes(const std::string& path); - diff --git a/libraries/nodes/test/src/BasicMathNodesTests.cpp b/libraries/nodes/test/src/BasicMathNodesTests.cpp index 52ded78ed..73d2aac0d 100644 --- a/libraries/nodes/test/src/BasicMathNodesTests.cpp +++ b/libraries/nodes/test/src/BasicMathNodesTests.cpp @@ -89,7 +89,7 @@ void TestUnaryOperationNodeCompute(UnaryOperationType op, double (*expectedTrans ComputeContext context("TestUnaryOperationNodeCompute"); ContextGuard<> guard(context); - std::vector> data = { { 0 , 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 } }; + std::vector> data = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 } }; model::Model model; auto inputNode = model.AddNode>(data[0].size()); diff --git a/libraries/nodes/test/src/DSPNodesTest_main.cpp b/libraries/nodes/test/src/DSPNodesTest_main.cpp index 4cad82011..569692141 100644 --- a/libraries/nodes/test/src/DSPNodesTest_main.cpp +++ b/libraries/nodes/test/src/DSPNodesTest_main.cpp @@ -6,8 +6,8 @@ // //////////////////////////////////////////////////////////////////////////////////////////////////// -#include "DSPNodesTests.h" #include "DSPCodeNodesTests.h" +#include "DSPNodesTests.h" #include diff --git a/libraries/nodes/test/src/DSPNodesTests.cpp b/libraries/nodes/test/src/DSPNodesTests.cpp index 4458aa989..c6ab9e88d 100644 --- a/libraries/nodes/test/src/DSPNodesTests.cpp +++ b/libraries/nodes/test/src/DSPNodesTests.cpp @@ -603,26 +603,22 @@ static void TestConvolutionNodeCompileVsReference(ImageShape inputShape, Filters case dsp::ConvolutionMethodOption::automatic: std::cout << "Testing 'automatic' method --- using 'simple' instead" << std::endl; // fallthrough - case dsp::ConvolutionMethodOption::simple: - { + case dsp::ConvolutionMethodOption::simple: { auto convNode = model.AddNode>(*newInput, convInputLayout, convOutputLayout, filterWeights, stride); convOutput = convNode->output; break; } - case dsp::ConvolutionMethodOption::diagonal: - { + case dsp::ConvolutionMethodOption::diagonal: { auto convNode = model.AddNode>(*newInput, convInputLayout, convOutputLayout, filterWeights, stride); convOutput = convNode->output; break; } - case dsp::ConvolutionMethodOption::unrolled: - { + case dsp::ConvolutionMethodOption::unrolled: { auto convNode = model.AddNode>(*newInput, convInputLayout, convOutputLayout, filterWeights, stride); convOutput = convNode->output; break; } - case dsp::ConvolutionMethodOption::winograd: - { + case dsp::ConvolutionMethodOption::winograd: { auto convNode = model.AddNode>(*newInput, convInputLayout, convOutputLayout, filterWeights, stride, options.winogradOptions.tileSize, options.winogradOptions.filterOrder); convOutput = convNode->output; break; diff --git a/libraries/nodes/test/src/NodesTests.cpp b/libraries/nodes/test/src/NodesTests.cpp index 3564e28bb..381db4662 100644 --- a/libraries/nodes/test/src/NodesTests.cpp +++ b/libraries/nodes/test/src/NodesTests.cpp @@ -749,36 +749,33 @@ static void TestClockNodeCompute() lagNotificationCallbackCount++; }); - std::vector> signal = - { - { start }, - { start + interval * 1 + lagThreshold / 2 }, // within threshold - { start + interval * 2 }, // on time - { start + interval * 3 + lagThreshold }, // late (expect notification) - { start + interval * 4 + lagThreshold * 20 }, // really late (expect notification) - { start + interval * 5 } // on time - }; - - std::vector> expectedResults = - { - // lastIntervalTime, currentTime - { start, start }, - { start + interval * 1, start + interval * 1 + lagThreshold / 2 }, - { start + interval * 2, start + interval * 2 }, - { start + interval * 3, start + interval * 3 + lagThreshold }, - { start + interval * 4, start + interval * 4 + lagThreshold * 20 }, - { start + interval * 5, start + interval * 5 } - }; - - std::vector expectedGetTicksResults = - { - interval, - interval - lagThreshold / 2, - interval, - interval - lagThreshold, - interval - lagThreshold * 20, - interval - }; + std::vector> signal = { + { start }, + { start + interval * 1 + lagThreshold / 2 }, // within threshold + { start + interval * 2 }, // on time + { start + interval * 3 + lagThreshold }, // late (expect notification) + { start + interval * 4 + lagThreshold * 20 }, // really late (expect notification) + { start + interval * 5 } // on time + }; + + std::vector> expectedResults = { + // lastIntervalTime, currentTime + { start, start }, + { start + interval * 1, start + interval * 1 + lagThreshold / 2 }, + { start + interval * 2, start + interval * 2 }, + { start + interval * 3, start + interval * 3 + lagThreshold }, + { start + interval * 4, start + interval * 4 + lagThreshold * 20 }, + { start + interval * 5, start + interval * 5 } + }; + + std::vector expectedGetTicksResults = { + interval, + interval - lagThreshold / 2, + interval, + interval - lagThreshold, + interval - lagThreshold * 20, + interval + }; std::vector> results; std::vector getTicksResults; diff --git a/libraries/nodes/test/src/main.cpp b/libraries/nodes/test/src/main.cpp index 8027ddfc5..4642d23d5 100644 --- a/libraries/nodes/test/src/main.cpp +++ b/libraries/nodes/test/src/main.cpp @@ -6,8 +6,8 @@ // //////////////////////////////////////////////////////////////////////////////////////////////////// -#include "BasicMathNodesTests.h" #include "AppendNodesTests.h" +#include "BasicMathNodesTests.h" #include "NodesTests.h" #include diff --git a/libraries/optimization/include/VectorSolution.h b/libraries/optimization/include/VectorSolution.h index 04e833b12..248391a1a 100644 --- a/libraries/optimization/include/VectorSolution.h +++ b/libraries/optimization/include/VectorSolution.h @@ -32,7 +32,8 @@ namespace optimization using DatasetType = IndexedContainer; /// Solutions are expected to have a ParameterType. Empty here because this solution type doesn't need any parameters. - struct ParametersType {}; + struct ParametersType + {}; /// Default constructor. VectorSolution() = default; diff --git a/libraries/optimization/test/include/Solution_test.h b/libraries/optimization/test/include/Solution_test.h index 9137d0c5c..8fb7121a0 100644 --- a/libraries/optimization/test/include/Solution_test.h +++ b/libraries/optimization/test/include/Solution_test.h @@ -243,9 +243,11 @@ void TestMaskedMatrixSolution() testing::ProcessTest("TestMaskedMatrixSolution2 frozen weights", testing::IsEqual(frozenSolutionWeights2(0, 0), frozenWeights(0, 0))); testing::ProcessTest("TestMaskedMatrixSolution2 frozen weights", testing::IsEqual(frozenSolutionWeights2(0, 0), maskedWeights2(0, 0))); - std::cout << "Original solution weights:\n" << weights << std::endl; + std::cout << "Original solution weights:\n" + << weights << std::endl; std::cout << "Original solution duality gap: " << optimizer.GetSolutionInfo().DualityGap() << std::endl; - std::cout << "Masked solution weights:\n" << maskedWeights2 << std::endl; + std::cout << "Masked solution weights:\n" + << maskedWeights2 << std::endl; std::cout << "Masked solution duality gap: " << maskedOptimizer2.GetSolutionInfo().DualityGap() << std::endl; }; diff --git a/libraries/optimization/test/src/main.cpp b/libraries/optimization/test/src/main.cpp index cf9f68cc4..9b96b6674 100644 --- a/libraries/optimization/test/src/main.cpp +++ b/libraries/optimization/test/src/main.cpp @@ -194,7 +194,7 @@ int main() TestSolutionEquivalenceSDCA(10); TestSolutionEquivalenceSDCA(10); - + TestMaskedMatrixSolution(); TestMaskedMatrixSolution(); TestMaskedMatrixSolution(); diff --git a/libraries/passes/include/DetectLowPrecisionConvolutionTransformation.h b/libraries/passes/include/DetectLowPrecisionConvolutionTransformation.h index 9c6c08f53..b15b2c255 100644 --- a/libraries/passes/include/DetectLowPrecisionConvolutionTransformation.h +++ b/libraries/passes/include/DetectLowPrecisionConvolutionTransformation.h @@ -14,7 +14,7 @@ namespace ell { namespace passes { - /// A transformation that detects when a ConvolutionalLayerNode node can be replaced with a + /// A transformation that detects when a ConvolutionalLayerNode node can be replaced with a /// reduced bit version such as BinaryConvolutionalLayerNode. class DetectLowPrecisionConvolutionTransformation : public model::Transformation { diff --git a/libraries/passes/src/StandardTransformations.cpp b/libraries/passes/src/StandardTransformations.cpp index 1e237f2e2..1b0c76f23 100644 --- a/libraries/passes/src/StandardTransformations.cpp +++ b/libraries/passes/src/StandardTransformations.cpp @@ -6,8 +6,8 @@ // //////////////////////////////////////////////////////////////////////////////////////////////////// -#include "DetectLowPrecisionConvolutionTransformation.h" #include "StandardTransformations.h" +#include "DetectLowPrecisionConvolutionTransformation.h" #include "FuseLinearOperationsTransformation.h" #include "OptimizeReorderDataNodesTransformation.h" #include "SetConvolutionMethodTransformation.h" diff --git a/libraries/passes/test/src/TransformationTest.cpp b/libraries/passes/test/src/TransformationTest.cpp index 3e3f5f24b..5430a0d35 100644 --- a/libraries/passes/test/src/TransformationTest.cpp +++ b/libraries/passes/test/src/TransformationTest.cpp @@ -41,7 +41,8 @@ using namespace ell::utilities; namespace { -[[maybe_unused]] void PrintModel(const model::Model& model) { +[[maybe_unused]] void PrintModel(const model::Model& model) +{ std::cout << "------ Model start ------" << std::endl; model.Print(std::cout); std::cout << "------ Model end ------" << std::endl; diff --git a/libraries/predictors/neural/include/ConvolutionalLayer.h b/libraries/predictors/neural/include/ConvolutionalLayer.h index 7874bb695..9f72a010b 100644 --- a/libraries/predictors/neural/include/ConvolutionalLayer.h +++ b/libraries/predictors/neural/include/ConvolutionalLayer.h @@ -303,20 +303,17 @@ namespace predictors switch (_convolutionalParameters.method) { - case ConvolutionMethod::simple: - { + case ConvolutionMethod::simple: { auto result = dsp::Convolve2DSimpleDepthwiseSeparable(inputChannelTensor, weights, numFilters, stride); outputChannelTensor.CopyFrom(result); } break; - case ConvolutionMethod::unrolled: - { + case ConvolutionMethod::unrolled: { auto result = dsp::Convolve2DUnrolled(inputChannelTensor, weights, numFilters, stride); outputChannelTensor.CopyFrom(result); } break; - case ConvolutionMethod::winograd: - { + case ConvolutionMethod::winograd: { auto result = dsp::Convolve2DWinogradDepthwiseSeparable(inputChannelTensor, weights, numFilters); // Stide of 1 is assumed outputChannelTensor.CopyFrom(result); } diff --git a/libraries/predictors/neural/include/HardTanhActivation.h b/libraries/predictors/neural/include/HardTanhActivation.h index a74f603c1..ccfea964f 100644 --- a/libraries/predictors/neural/include/HardTanhActivation.h +++ b/libraries/predictors/neural/include/HardTanhActivation.h @@ -70,7 +70,8 @@ namespace predictors template ElementType HardTanhActivation::Apply(const ElementType input) const { - return ((input < -1) ? -1 : (input > 1) ? 1 : input); + return ((input < -1) ? -1 : (input > 1) ? 1 + : input); } template @@ -80,8 +81,8 @@ namespace predictors If(input < -1, [&] { result = value::Cast(-1); }).ElseIf(input > 1, [&] { - result = value::Cast(1); - }).Else([&] { + result = value::Cast(1); + }).Else([&] { result = input; }); return result; diff --git a/libraries/predictors/neural/include/Layer.h b/libraries/predictors/neural/include/Layer.h index 7e14c6063..f07bdbf85 100644 --- a/libraries/predictors/neural/include/Layer.h +++ b/libraries/predictors/neural/include/Layer.h @@ -55,10 +55,9 @@ namespace predictors /// Vector of strings that contains the names of the neural network layer types. #define ADD_LAYER_TYPE(a, b) b, - static const std::string LayerNames[] = - { - LAYER_TYPES_LIST - }; + static const std::string LayerNames[] = { + LAYER_TYPES_LIST + }; #undef ADD_LAYER_TYPE /// Enum that represents the type of padding values in a neural network layer. @@ -417,8 +416,7 @@ namespace predictors case PaddingScheme::randomZeroAndOnes: output.Generate([] { return static_cast(std::rand() % 2); }); break; - case PaddingScheme::alternatingZeroAndOnes: - { + case PaddingScheme::alternatingZeroAndOnes: { for (size_t row = 0; row < output.NumRows(); row++) { for (size_t column = 0; column < output.NumColumns(); column++) diff --git a/libraries/predictors/neural/include/LeakyReLUActivation.h b/libraries/predictors/neural/include/LeakyReLUActivation.h index 23595b51c..f41d53291 100644 --- a/libraries/predictors/neural/include/LeakyReLUActivation.h +++ b/libraries/predictors/neural/include/LeakyReLUActivation.h @@ -111,7 +111,6 @@ namespace predictors return result; } - template void LeakyReLUActivation::WriteToArchive(utilities::Archiver& archiver) const { diff --git a/libraries/testing/include/testing.h b/libraries/testing/include/testing.h index 79ad85d6e..3c982edd6 100644 --- a/libraries/testing/include/testing.h +++ b/libraries/testing/include/testing.h @@ -8,10 +8,10 @@ #pragma once +#include #include #include #include -#include namespace ell { diff --git a/libraries/trainers/include/KMeansTrainer.h b/libraries/trainers/include/KMeansTrainer.h index 233da6b36..c0d0a637d 100644 --- a/libraries/trainers/include/KMeansTrainer.h +++ b/libraries/trainers/include/KMeansTrainer.h @@ -23,7 +23,6 @@ namespace trainers class KMeansTrainer { public: - /// Constructs an instance of KMeansTrainer trainer /// /// The input dimension. diff --git a/libraries/trainers/src/KMeansTrainer.cpp b/libraries/trainers/src/KMeansTrainer.cpp index 65fff37e4..37c3a46a5 100644 --- a/libraries/trainers/src/KMeansTrainer.cpp +++ b/libraries/trainers/src/KMeansTrainer.cpp @@ -20,13 +20,15 @@ namespace trainers _means(dim, numClusters), _isInitialized(false), _iterations(iterations), - _numClusters(numClusters) {} + _numClusters(numClusters) + {} KMeansTrainer::KMeansTrainer(size_t numClusters, size_t iters, math::ColumnMatrix means) : _means(means), _isInitialized(true), _iterations(iters), - _numClusters(numClusters) {} + _numClusters(numClusters) + {} void KMeansTrainer::RunKMeans(math::ConstMatrixReference X) { diff --git a/libraries/trainers/src/ProtoNNInit.cpp b/libraries/trainers/src/ProtoNNInit.cpp index 96a4420f5..7f0773e16 100644 --- a/libraries/trainers/src/ProtoNNInit.cpp +++ b/libraries/trainers/src/ProtoNNInit.cpp @@ -23,7 +23,8 @@ namespace trainers _dim(dim), _numPrototypesPerLabel(numPrototypesPerLabel), _B(dim, numLabels * numPrototypesPerLabel), - _Z(numLabels, numLabels * numPrototypesPerLabel) {} + _Z(numLabels, numLabels * numPrototypesPerLabel) + {} void ProtoNNInit::Initialize(math::ConstMatrixReference WX, math::ConstMatrixReference Y) { diff --git a/libraries/trainers/src/ProtoNNTrainer.cpp b/libraries/trainers/src/ProtoNNTrainer.cpp index 9c1b7d3c6..547e37f85 100644 --- a/libraries/trainers/src/ProtoNNTrainer.cpp +++ b/libraries/trainers/src/ProtoNNTrainer.cpp @@ -476,7 +476,8 @@ namespace trainers paramStepSize = _stepSize[parameterIndex] * etaVector[4]; // Call the accelerated proximal gradient_paramS method for optimizing this parameter - AcceleratedProximalGradient(parameterIndex, [&](ConstColumnMatrixReference /*W*/, const size_t begin, const size_t end) -> math::ColumnMatrix { return _modelMap[parameterIndex]->gradient(_modelMap, X, Y, WX, SimilarityKernel(X, WX, gamma, begin, end, _recomputeWX[parameterIndex]), gamma, begin, end, _parameters.lossFunction); }, [&](auto arg) { ProtoNNTrainerUtils::HardThresholding(arg, _sparsity[parameterIndex]); }, parameterMatrix, epochs, n, sgdBatchSize, paramStepSize, eta_update); + AcceleratedProximalGradient( + parameterIndex, [&](ConstColumnMatrixReference /*W*/, const size_t begin, const size_t end) -> math::ColumnMatrix { return _modelMap[parameterIndex]->gradient(_modelMap, X, Y, WX, SimilarityKernel(X, WX, gamma, begin, end, _recomputeWX[parameterIndex]), gamma, begin, end, _parameters.lossFunction); }, [&](auto arg) { ProtoNNTrainerUtils::HardThresholding(arg, _sparsity[parameterIndex]); }, parameterMatrix, epochs, n, sgdBatchSize, paramStepSize, eta_update); math::MultiplyScaleAddUpdate(1.0, _modelMap[m_projectionIndex]->GetData(), X, 0.0, WX); fOld = fCur; diff --git a/libraries/utilities/include/Archiver.h b/libraries/utilities/include/Archiver.h index 31f7e6fd7..fd3808b06 100644 --- a/libraries/utilities/include/Archiver.h +++ b/libraries/utilities/include/Archiver.h @@ -582,16 +582,28 @@ namespace utilities std::string GetArchivedTypeName(const T& value); /// Macros to make repetitive boilerplate code in archiver implementations easier to implement. -#define IMPLEMENT_ARCHIVE_VALUE(base, type) \ - void base::ArchiveValue(const char* name, type value, IsFundamental) { WriteScalar(name, value); } -#define IMPLEMENT_ARCHIVE_ARRAY(base, type) \ - void base::ArchiveArray(const char* name, const std::vector& value, IsFundamental) { WriteArray(name, value); } +#define IMPLEMENT_ARCHIVE_VALUE(base, type) \ + void base::ArchiveValue(const char* name, type value, IsFundamental) \ + { \ + WriteScalar(name, value); \ + } +#define IMPLEMENT_ARCHIVE_ARRAY(base, type) \ + void base::ArchiveArray(const char* name, const std::vector& value, IsFundamental) \ + { \ + WriteArray(name, value); \ + } /// Macros to make repetitive boilerplate code in unarchiver implementations easier to implement. -#define IMPLEMENT_UNARCHIVE_VALUE(base, type) \ - void base::UnarchiveValue(const char* name, type& value, IsFundamental) { ReadScalar(name, value); } -#define IMPLEMENT_UNARCHIVE_ARRAY(base, type) \ - void base::UnarchiveArray(const char* name, std::vector& value, IsFundamental) { ReadArray(name, value); } +#define IMPLEMENT_UNARCHIVE_VALUE(base, type) \ + void base::UnarchiveValue(const char* name, type& value, IsFundamental) \ + { \ + ReadScalar(name, value); \ + } +#define IMPLEMENT_UNARCHIVE_ARRAY(base, type) \ + void base::UnarchiveArray(const char* name, std::vector& value, IsFundamental) \ + { \ + ReadArray(name, value); \ + } } // namespace utilities } // namespace ell diff --git a/libraries/utilities/include/CallbackRegistry.h b/libraries/utilities/include/CallbackRegistry.h index af9ebf324..c17528d29 100644 --- a/libraries/utilities/include/CallbackRegistry.h +++ b/libraries/utilities/include/CallbackRegistry.h @@ -115,7 +115,7 @@ namespace utilities return !_sinkCallbacks.empty() || !_sourceCallbacks.empty(); } -} // namespace model +} // namespace utilities } // namespace ell #pragma endregion implementation diff --git a/libraries/utilities/include/FunctionUtils.h b/libraries/utilities/include/FunctionUtils.h index c51f6caea..3c8dddeee 100644 --- a/libraries/utilities/include/FunctionUtils.h +++ b/libraries/utilities/include/FunctionUtils.h @@ -31,7 +31,8 @@ namespace utilities /// /// Recursive base case with zero functions. Does nothing. - inline void InOrderFunctionEvaluator() {} + inline void InOrderFunctionEvaluator() + {} /// Invokes a series of zero-argument functions. /// @@ -93,7 +94,8 @@ namespace utilities /// FunctionTraits: A type-traits-like way to get the return type and argument types of a function /// template - struct FunctionTraits : public FunctionTraits { }; // generic base template + struct FunctionTraits : public FunctionTraits + {}; // generic base template // Function pointers template diff --git a/libraries/utilities/include/Hash.h b/libraries/utilities/include/Hash.h index 7fc006b07..d6b43b6aa 100644 --- a/libraries/utilities/include/Hash.h +++ b/libraries/utilities/include/Hash.h @@ -91,7 +91,8 @@ namespace utilities } template - [[nodiscard]] size_t HashTuple(const Tuple& tuple) { + [[nodiscard]] size_t HashTuple(const Tuple& tuple) + { size_t seed = 0; HashTupleImpl<0>(seed, tuple); return seed; diff --git a/libraries/utilities/include/MemoryLayout.h b/libraries/utilities/include/MemoryLayout.h index ca35301d7..54a1291c8 100644 --- a/libraries/utilities/include/MemoryLayout.h +++ b/libraries/utilities/include/MemoryLayout.h @@ -205,7 +205,7 @@ namespace utilities /// A class representing layout of a block of data in memory where the block can also /// contain padding such that a certain offset is required to access the "active" memory inside the - /// padded block. Coordinates that can only see the "active" region are called "logical" coordinates, and + /// padded block. Coordinates that can only see the "active" region are called "logical" coordinates, and /// coordinates that include the padding are called "physical" coordinates. class MemoryLayout : public utilities::IArchivable { @@ -555,7 +555,7 @@ namespace utilities /// The name of this type. std::string GetRuntimeTypeName() const override { return GetTypeName(); } - /// If the layout is contiguous, return a new layout that interprets this block as + /// If the layout is contiguous, return a new layout that interprets this block as /// a simple one dimensional vector, otherwise throws an exception. MemoryLayout Flatten() const; diff --git a/libraries/utilities/include/TypeName.h b/libraries/utilities/include/TypeName.h index 0e49a95e3..0db875f78 100644 --- a/libraries/utilities/include/TypeName.h +++ b/libraries/utilities/include/TypeName.h @@ -8,8 +8,8 @@ #pragma once -#include "TypeTraits.h" #include "Boolean.h" +#include "TypeTraits.h" #include #include diff --git a/libraries/utilities/include/TypeTraits.h b/libraries/utilities/include/TypeTraits.h index 5bfbe43c7..d65670c14 100644 --- a/libraries/utilities/include/TypeTraits.h +++ b/libraries/utilities/include/TypeTraits.h @@ -135,7 +135,7 @@ namespace utilities // deduction guide template - VariantVisitor(T...)->VariantVisitor; + VariantVisitor(T...) -> VariantVisitor; namespace detail { diff --git a/libraries/utilities/src/Boolean.cpp b/libraries/utilities/src/Boolean.cpp index 4e56029b1..c0b8c0cde 100644 --- a/libraries/utilities/src/Boolean.cpp +++ b/libraries/utilities/src/Boolean.cpp @@ -16,7 +16,8 @@ namespace utilities Boolean::Boolean() = default; Boolean::Boolean(bool value) : - value(value) {} + value(value) + {} bool operator==(Boolean b1, Boolean b2) { diff --git a/libraries/utilities/src/CStringParser.cpp b/libraries/utilities/src/CStringParser.cpp index e4b32de78..eed4051f4 100644 --- a/libraries/utilities/src/CStringParser.cpp +++ b/libraries/utilities/src/CStringParser.cpp @@ -62,7 +62,7 @@ namespace utilities } errno = tmp; - return ParseResult::success; + return ParseResult::success; } template @@ -96,7 +96,7 @@ namespace utilities errno = tmp; return ParseResult::success; } - + // wrapper for std::strtof inline ParseResult cParse(const char* pStr, char*& pEnd, float& value) { @@ -132,7 +132,7 @@ namespace utilities { return ParseInt(pStr, pEnd, std::strtol, value); } - + // wrapper for strtoul inline ParseResult cParse(const char* pStr, char*& pEnd, unsigned short& value) { @@ -212,7 +212,7 @@ namespace utilities return parseResult; } - // explicit instantiation + // explicit instantiation template ParseResult Parse(const char*& pStr, float& value); template ParseResult Parse(const char*& pStr, double& value); diff --git a/libraries/utilities/src/CommandLineParser.cpp b/libraries/utilities/src/CommandLineParser.cpp index ffee4d69c..fa19cd76e 100644 --- a/libraries/utilities/src/CommandLineParser.cpp +++ b/libraries/utilities/src/CommandLineParser.cpp @@ -512,8 +512,7 @@ namespace utilities { switch (entry.EntryType) { - case DocumentationEntry::Type::option: - { + case DocumentationEntry::Type::option: { const OptionInfo& info = _options[entry.EntryString]; if (info.enabled) { diff --git a/libraries/utilities/src/Files.cpp b/libraries/utilities/src/Files.cpp index 9631c8075..28a107f43 100644 --- a/libraries/utilities/src/Files.cpp +++ b/libraries/utilities/src/Files.cpp @@ -36,7 +36,7 @@ namespace utilities const auto& path = filepath; #endif // open file - if(!FileExists(filepath)) + if (!FileExists(filepath)) { throw utilities::InputException(InputExceptionErrors::invalidArgument, "file " + filepath + " doesn't exist"); } diff --git a/libraries/utilities/src/MemoryLayout.cpp b/libraries/utilities/src/MemoryLayout.cpp index fc7ca98c5..a8803867f 100644 --- a/libraries/utilities/src/MemoryLayout.cpp +++ b/libraries/utilities/src/MemoryLayout.cpp @@ -141,7 +141,10 @@ namespace utilities } } - int DimensionOrder::operator[](int index) const { return DimensionVector::operator[](index); } + int DimensionOrder::operator[](int index) const + { + return DimensionVector::operator[](index); + } bool DimensionOrder::IsCanonicalOrder() const { @@ -263,7 +266,10 @@ namespace utilities } } - bool MemoryLayout::HasPadding() const { return _size != _extent; } + bool MemoryLayout::HasPadding() const + { + return _size != _extent; + } int MemoryLayout::GetActiveSize(size_t index) const { @@ -374,11 +380,20 @@ namespace utilities return ReversePermute(physicalCoordinates, _dimensionOrder); } - MemoryShape MemoryLayout::GetLogicalDimensionActiveSize() const { return ReversePermute(_size, _dimensionOrder); } + MemoryShape MemoryLayout::GetLogicalDimensionActiveSize() const + { + return ReversePermute(_size, _dimensionOrder); + } - MemoryShape MemoryLayout::GetLogicalDimensionExtent() const { return ReversePermute(_extent, _dimensionOrder); } + MemoryShape MemoryLayout::GetLogicalDimensionExtent() const + { + return ReversePermute(_extent, _dimensionOrder); + } - MemoryShape MemoryLayout::GetLogicalDimensionOffset() const { return ReversePermute(_offset, _dimensionOrder); } + MemoryShape MemoryLayout::GetLogicalDimensionOffset() const + { + return ReversePermute(_offset, _dimensionOrder); + } MemoryShape MemoryLayout::GetLogicalDimensionIncrement() const { @@ -452,9 +467,15 @@ namespace utilities return false; } - bool MemoryLayout::IsContiguous() const { return _size == _extent && IsCanonicalOrder(); } + bool MemoryLayout::IsContiguous() const + { + return _size == _extent && IsCanonicalOrder(); + } - bool MemoryLayout::IsCanonicalOrder() const { return _dimensionOrder.IsCanonicalOrder(); } + bool MemoryLayout::IsCanonicalOrder() const + { + return _dimensionOrder.IsCanonicalOrder(); + } MemoryLayout MemoryLayout::ReorderedCopy(const DimensionOrder& newOrder) const { @@ -629,17 +650,35 @@ namespace utilities return true; } - bool operator==(const DimensionOrder& order1, const DimensionOrder& order2) { return Equal(order1, order2); } + bool operator==(const DimensionOrder& order1, const DimensionOrder& order2) + { + return Equal(order1, order2); + } - bool operator!=(const DimensionOrder& order1, const DimensionOrder& order2) { return !Equal(order1, order2); } + bool operator!=(const DimensionOrder& order1, const DimensionOrder& order2) + { + return !Equal(order1, order2); + } - bool operator==(const MemoryShape& shape1, const MemoryShape& shape2) { return Equal(shape1, shape2); } + bool operator==(const MemoryShape& shape1, const MemoryShape& shape2) + { + return Equal(shape1, shape2); + } - bool operator!=(const MemoryShape& shape1, const MemoryShape& shape2) { return !Equal(shape1, shape2); } + bool operator!=(const MemoryShape& shape1, const MemoryShape& shape2) + { + return !Equal(shape1, shape2); + } - bool operator==(const MemoryCoordinates& shape1, const MemoryCoordinates& shape2) { return Equal(shape1, shape2); } + bool operator==(const MemoryCoordinates& shape1, const MemoryCoordinates& shape2) + { + return Equal(shape1, shape2); + } - bool operator!=(const MemoryCoordinates& shape1, const MemoryCoordinates& shape2) { return !Equal(shape1, shape2); } + bool operator!=(const MemoryCoordinates& shape1, const MemoryCoordinates& shape2) + { + return !Equal(shape1, shape2); + } bool MemoryLayoutsEqual(const MemoryLayout& layout1, const MemoryLayout& layout2) { diff --git a/libraries/utilities/src/MillisecondTimer.cpp b/libraries/utilities/src/MillisecondTimer.cpp index 59a1e3933..92974aa3d 100644 --- a/libraries/utilities/src/MillisecondTimer.cpp +++ b/libraries/utilities/src/MillisecondTimer.cpp @@ -15,7 +15,8 @@ namespace utilities MillisecondTimer::MillisecondTimer() : _start(std::chrono::system_clock::now()), _elapsedTime(std::chrono::system_clock::duration::zero()), - _running(true) {} + _running(true) + {} void MillisecondTimer::Start() { diff --git a/libraries/utilities/src/ObjectArchiver.cpp b/libraries/utilities/src/ObjectArchiver.cpp index 037c6cd42..fa4ce432f 100644 --- a/libraries/utilities/src/ObjectArchiver.cpp +++ b/libraries/utilities/src/ObjectArchiver.cpp @@ -112,7 +112,7 @@ namespace utilities #define ARCHIVE_TYPE_OP(t) IMPLEMENT_UNARCHIVE_VALUE(ObjectArchiver, t); ARCHIVABLE_TYPES_LIST #undef ARCHIVE_TYPE_OP - + // strings void ObjectArchiver::UnarchiveValue(const char* name, std::string& value) { diff --git a/libraries/utilities/src/OutputStreamImpostor.cpp b/libraries/utilities/src/OutputStreamImpostor.cpp index 530f3fcd0..7161d4953 100644 --- a/libraries/utilities/src/OutputStreamImpostor.cpp +++ b/libraries/utilities/src/OutputStreamImpostor.cpp @@ -23,7 +23,8 @@ namespace utilities static std::ofstream nullStreamBuf; OutputStreamImpostor::OutputStreamImpostor() : - _outputStream(nullStreamBuf) {} + _outputStream(nullStreamBuf) + {} OutputStreamImpostor::OutputStreamImpostor(StreamType streamType) : OutputStreamImpostor() @@ -42,7 +43,8 @@ namespace utilities } OutputStreamImpostor::OutputStreamImpostor(std::ostream& stream) : - _outputStream(stream) {} + _outputStream(stream) + {} OutputStreamImpostor::OutputStreamImpostor(const std::string& filename) : _fileStream(std::make_shared(OpenOfstream(filename))), diff --git a/libraries/utilities/test/include/PropertyBag_test.h b/libraries/utilities/test/include/PropertyBag_test.h index d7d7b5681..2e6ff422d 100644 --- a/libraries/utilities/test/include/PropertyBag_test.h +++ b/libraries/utilities/test/include/PropertyBag_test.h @@ -12,4 +12,4 @@ namespace ell { void TestPropertyBag(); void TestRecursivePropertyBag(); -} +} // namespace ell diff --git a/libraries/utilities/test/include/RingBuffer_test.h b/libraries/utilities/test/include/RingBuffer_test.h index 504f22f17..0b8ec82e6 100644 --- a/libraries/utilities/test/include/RingBuffer_test.h +++ b/libraries/utilities/test/include/RingBuffer_test.h @@ -10,5 +10,5 @@ namespace ell { - void TestRingBuffer(); +void TestRingBuffer(); } diff --git a/libraries/utilities/test/src/MemoryLayout_test.cpp b/libraries/utilities/test/src/MemoryLayout_test.cpp index 5bd6b5f02..7a9280c9f 100644 --- a/libraries/utilities/test/src/MemoryLayout_test.cpp +++ b/libraries/utilities/test/src/MemoryLayout_test.cpp @@ -48,7 +48,7 @@ void TestMemoryLayoutCtors() { return ok; } - + try { if (rowPadding > 0 || columnPadding > 0) @@ -59,7 +59,7 @@ void TestMemoryLayoutCtors() } catch (InputException&) {} - + if (!ok) { return ok; diff --git a/libraries/utilities/test/src/RingBuffer_test.cpp b/libraries/utilities/test/src/RingBuffer_test.cpp index 3180d1582..5baefc647 100644 --- a/libraries/utilities/test/src/RingBuffer_test.cpp +++ b/libraries/utilities/test/src/RingBuffer_test.cpp @@ -13,42 +13,40 @@ #include - namespace ell { - using namespace utilities; +using namespace utilities; - template - std::vector ToArray(RingBuffer buffer) +template +std::vector ToArray(RingBuffer buffer) +{ + std::vector result; + for (size_t i = 0, n = buffer.Size(); i < n; i++) { - std::vector result; - for (size_t i = 0, n = buffer.Size(); i < n; i++) - { - result.push_back(buffer[i]); - } - return result; + result.push_back(buffer[i]); } + return result; +} - void TestRingBuffer() - { - RingBuffer buffer(5); - - testing::ProcessTest("TestRingBuffer is empty", testing::IsEqual(ToArray(buffer), std::vector(5))); +void TestRingBuffer() +{ + RingBuffer buffer(5); - buffer.Append(1); + testing::ProcessTest("TestRingBuffer is empty", testing::IsEqual(ToArray(buffer), std::vector(5))); - testing::ProcessTest("TestRingBuffer is empty", testing::IsEqual(ToArray(buffer), std::vector({ 1, 0, 0, 0, 0 }))); + buffer.Append(1); - buffer.Append(2); + testing::ProcessTest("TestRingBuffer is empty", testing::IsEqual(ToArray(buffer), std::vector({ 1, 0, 0, 0, 0 }))); - testing::ProcessTest("TestRingBuffer is empty", testing::IsEqual(ToArray(buffer), std::vector({ 2, 1, 0, 0, 0 }))); + buffer.Append(2); - buffer.Append(3); - buffer.Append(4); - buffer.Append(5); - buffer.Append(6); // overflows the ring buffer + testing::ProcessTest("TestRingBuffer is empty", testing::IsEqual(ToArray(buffer), std::vector({ 2, 1, 0, 0, 0 }))); - testing::ProcessTest("TestRingBuffer is empty", testing::IsEqual(ToArray(buffer), std::vector({ 6, 5, 4, 3, 2 }))); + buffer.Append(3); + buffer.Append(4); + buffer.Append(5); + buffer.Append(6); // overflows the ring buffer - } + testing::ProcessTest("TestRingBuffer is empty", testing::IsEqual(ToArray(buffer), std::vector({ 6, 5, 4, 3, 2 }))); +} } // namespace ell diff --git a/libraries/value/include/CachingProvider.h b/libraries/value/include/CachingProvider.h index fa3eb95ae..081816b9a 100644 --- a/libraries/value/include/CachingProvider.h +++ b/libraries/value/include/CachingProvider.h @@ -43,7 +43,7 @@ namespace value std::vector _kernelIndices; std::vector _atIndices; std::any _extra; - + private: virtual void HandleCachingImpl(LoopNest&) = 0; }; diff --git a/libraries/value/include/ComputeContext.h b/libraries/value/include/ComputeContext.h index 844cc97c4..a1c2cf5b2 100644 --- a/libraries/value/include/ComputeContext.h +++ b/libraries/value/include/ComputeContext.h @@ -15,8 +15,8 @@ #include #include #include -#include #include +#include #include #include diff --git a/libraries/value/include/FunctionDeclaration.h b/libraries/value/include/FunctionDeclaration.h index d53a86fed..deb67b229 100644 --- a/libraries/value/include/FunctionDeclaration.h +++ b/libraries/value/include/FunctionDeclaration.h @@ -215,7 +215,7 @@ namespace value }; template - struct StdFunctionDeductionGuideHelper + struct StdFunctionDeductionGuideHelper { using Type = ReturnT(Args...); }; @@ -232,12 +232,12 @@ namespace value // Function pointer template - Function(ReturnT (*)(Args...))->Function; + Function(ReturnT (*)(Args...)) -> Function; // Functor template ::Type> - Function(Functor)->Function; + Function(Functor) -> Function; #endif // defined(__APPLE__) } // namespace detail diff --git a/libraries/value/include/LoopNests.h b/libraries/value/include/LoopNests.h index 1b9d94018..a0a7da2e1 100644 --- a/libraries/value/include/LoopNests.h +++ b/libraries/value/include/LoopNests.h @@ -191,8 +191,8 @@ namespace value std::tuple tupleArgs = utilities::VectorToTuple(args); std::apply(fn, tupleArgs); } }, - kernelOuterIndices, - kernelId); + kernelOuterIndices, + kernelId); } template diff --git a/libraries/value/include/Matrix.h b/libraries/value/include/Matrix.h index ef4e56f60..d7b511482 100644 --- a/libraries/value/include/Matrix.h +++ b/libraries/value/include/Matrix.h @@ -157,7 +157,6 @@ namespace value return Matrix(Allocate(utilities::MemoryLayout({ rows, columns })), name); } - /// Constructs a statically-allocated instance with the specified dimensions /// The number of rows of the allocated matrix /// The number of columns of the allocated matrix @@ -167,7 +166,7 @@ namespace value { return Matrix(StaticAllocate(name, type, utilities::MemoryLayout({ rows, columns }))); } - + /// Constructs a statically-allocated instance with the specified dimensions /// Any fundamental type accepted by Value /// The number of rows of the allocated matrix diff --git a/libraries/value/include/Value.h b/libraries/value/include/Value.h index 55a07fd9a..56dbc8212 100644 --- a/libraries/value/include/Value.h +++ b/libraries/value/include/Value.h @@ -439,7 +439,7 @@ namespace value inline operator Value&() { return _value; } inline Value& GetValue() { return _value; } - inline const Value& GetValue() const { return _value; } + inline const Value& GetValue() const { return _value; } private: Value _value; diff --git a/libraries/value/include/loopnests/Kernel.h b/libraries/value/include/loopnests/Kernel.h index 80403d110..a316b5ad5 100644 --- a/libraries/value/include/loopnests/Kernel.h +++ b/libraries/value/include/loopnests/Kernel.h @@ -69,8 +69,14 @@ namespace value std::function arguments, std::vector indices)> _kernel; }; - inline bool operator==(const Kernel& i1, const Kernel& i2) { return i1.GetId() == i2.GetId(); } - inline bool operator!=(const Kernel& i1, const Kernel& i2) { return !(i1 == i2); } + inline bool operator==(const Kernel& i1, const Kernel& i2) + { + return i1.GetId() == i2.GetId(); + } + inline bool operator!=(const Kernel& i1, const Kernel& i2) + { + return !(i1 == i2); + } } // namespace loopnests } // namespace value } // namespace ell diff --git a/libraries/value/include/loopnests/SplitIndexRange.h b/libraries/value/include/loopnests/SplitIndexRange.h index 4148c506b..a6cff0b37 100644 --- a/libraries/value/include/loopnests/SplitIndexRange.h +++ b/libraries/value/include/loopnests/SplitIndexRange.h @@ -11,8 +11,8 @@ #include "Index.h" #include "IndexRange.h" -#include #include +#include #include #include @@ -59,9 +59,9 @@ namespace value std::vector GetComputedIndices() const; std::vector GetDependentIndices(const Index& index, bool includeSelf = false) const; std::vector GetDependentLoopIndices(const Index& index, bool includeSelf = false) const; - + bool HasParentIndex(const Index& parent) const; - + /// Get the index that was split in order to create the given index Index GetParentIndex(const Index& parent) const; @@ -69,19 +69,19 @@ namespace value bool IsInnerSplitIndex(const Index& index) const; Index GetOuterSplitIndex(const Index& parent) const; Index GetInnerSplitIndex(const Index& parent) const; - + std::vector GetAllParentIndices(const Index& index) const; std::vector GetChildIndices(const Index& index) const; void Print(std::ostream& os) const; - + private: friend class SplitIterationDomain; SplitIndex Split(int size); // add a split --- must be smaller than last split SplitIndex Split(Index index, int size); // split the given index SplitIndex SplitNode(int node, int size); // split the given index - + int GetNode(const Index& index) const; // returns the offset (index into a vector) for the given index int GetParent(int node) const; int GetLeftChild(int node) const; diff --git a/libraries/value/src/Array.cpp b/libraries/value/src/Array.cpp index d8f2505a1..aa8cbe928 100644 --- a/libraries/value/src/Array.cpp +++ b/libraries/value/src/Array.cpp @@ -62,7 +62,10 @@ namespace value return *this; } - Value Array::GetValue() const { return _value; } + Value Array::GetValue() const + { + return _value; + } Array Array::Copy() const { @@ -95,13 +98,25 @@ namespace value return Scalar(indexedValue).Copy(); } - size_t Array::Size() const { return _value.GetLayout().NumElements(); } + size_t Array::Size() const + { + return _value.GetLayout().NumElements(); + } - ValueType Array::Type() const { return _value.GetBaseType(); } + ValueType Array::Type() const + { + return _value.GetBaseType(); + } - void Array::SetName(const std::string& name) { _value.SetName(name); } + void Array::SetName(const std::string& name) + { + _value.SetName(name); + } - std::string Array::GetName() const { return _value.GetName(); } + std::string Array::GetName() const + { + return _value.GetName(); + } void For(Array array, std::function&)> fn) { diff --git a/libraries/value/src/CachingStrategies.cpp b/libraries/value/src/CachingStrategies.cpp index cb51732e3..83f8a1caf 100644 --- a/libraries/value/src/CachingStrategies.cpp +++ b/libraries/value/src/CachingStrategies.cpp @@ -592,11 +592,7 @@ namespace value underlyingNest.RenameVariable(_value, cacheRef, _atIndices, { cacheFillKernel, viewInitKernel }); } - // namespace value - - - - + // namespace value // Helper class to hold a binary tree with a MemoryLayout at each leaf node corresponding to a different // boundary condition and with a number of levels equal to the number of dimensions in a cache layout diff --git a/libraries/value/src/ComputeContext.cpp b/libraries/value/src/ComputeContext.cpp index 04eb411ca..425d9478b 100644 --- a/libraries/value/src/ComputeContext.cpp +++ b/libraries/value/src/ComputeContext.cpp @@ -337,7 +337,6 @@ namespace value } }; - struct InitializeVectorFunctionIntrinsic { auto operator()(std::vector args) const -> Value @@ -1585,7 +1584,10 @@ namespace value return {}; } - void ComputeContext::ImportCodeFileImpl(std::string) { throw LogicException(LogicExceptionErrors::notImplemented); } + void ComputeContext::ImportCodeFileImpl(std::string) + { + throw LogicException(LogicExceptionErrors::notImplemented); + } Scalar ComputeContext::GetFunctionAddressImpl(const FunctionDeclaration& fn) { @@ -1691,7 +1693,10 @@ namespace value throw LogicException(LogicExceptionErrors::illegalState); } - std::string ComputeContext::GetGlobalScopedName(std::string name) const { return _moduleName + "_" + name; } + std::string ComputeContext::GetGlobalScopedName(std::string name) const + { + return _moduleName + "_" + name; + } std::string ComputeContext::GetCurrentFunctionScopedName(std::string name) const { @@ -1702,9 +1707,15 @@ namespace value return GetGlobalScopedName(GetTopFrame().first + "_" + name); } - ComputeContext::Frame& ComputeContext::GetTopFrame() { return _stack.top(); } + ComputeContext::Frame& ComputeContext::GetTopFrame() + { + return _stack.top(); + } - const ComputeContext::Frame& ComputeContext::GetTopFrame() const { return _stack.top(); } + const ComputeContext::Frame& ComputeContext::GetTopFrame() const + { + return _stack.top(); + } void swap(ComputeContext& l, ComputeContext& r) noexcept { diff --git a/libraries/value/src/CppEmitterContext.cpp b/libraries/value/src/CppEmitterContext.cpp index e3b8ce406..6fb679214 100644 --- a/libraries/value/src/CppEmitterContext.cpp +++ b/libraries/value/src/CppEmitterContext.cpp @@ -213,10 +213,16 @@ namespace value { \ } #define ADD_TYPE_TO_CTYPE_STRING_STRING(TYPE, STR) \ - else if constexpr (std::is_same_v) { return #STR; } + else if constexpr (std::is_same_v) \ + { \ + return #STR; \ + } #define ADD_TYPE_TO_CTYPE_STRING(TYPE) ADD_TYPE_TO_CTYPE_STRING_STRING(TYPE, TYPE) -#define END_TYPE_TO_CTYPE_STRING_MAP \ - else { static_assert(utilities::FalseType{}, "Unknown type"); } +#define END_TYPE_TO_CTYPE_STRING_MAP \ + else \ + { \ + static_assert(utilities::FalseType{}, "Unknown type"); \ + } BEGIN_TYPE_TO_CTYPE_STRING_MAP ADD_TYPE_TO_CTYPE_STRING(bool) @@ -1520,9 +1526,15 @@ namespace value return value.IsConstant() ? _computeContext.GetName(value) : value.Get().GetDataAs()->name; } - void CppEmitterContext::ImportCodeFileImpl(std::string) { throw LogicException(LogicExceptionErrors::notImplemented); } + void CppEmitterContext::ImportCodeFileImpl(std::string) + { + throw LogicException(LogicExceptionErrors::notImplemented); + } - Scalar CppEmitterContext::GetFunctionAddressImpl(const FunctionDeclaration& fn) { throw LogicException(LogicExceptionErrors::notImplemented); } + Scalar CppEmitterContext::GetFunctionAddressImpl(const FunctionDeclaration& fn) + { + throw LogicException(LogicExceptionErrors::notImplemented); + } std::string CppEmitterContext::GetScopeAdjustedName(GlobalAllocationScope scope, std::string name) const { @@ -1562,7 +1574,10 @@ namespace value return _globalStream; } - std::ostream& CppEmitterContext::FnDecl() { return _fnDeclStream; } + std::ostream& CppEmitterContext::FnDecl() + { + return _fnDeclStream; + } Value CppEmitterContext::PromoteConstantData(Value value) { diff --git a/libraries/value/src/Emittable.cpp b/libraries/value/src/Emittable.cpp index 9660614eb..9314e4ae1 100644 --- a/libraries/value/src/Emittable.cpp +++ b/libraries/value/src/Emittable.cpp @@ -16,7 +16,8 @@ namespace value Emittable::Emittable() = default; Emittable::Emittable(void* data) : - _data(data) {} + _data(data) + {} } // namespace value } // namespace ell \ No newline at end of file diff --git a/libraries/value/src/EmitterContext.cpp b/libraries/value/src/EmitterContext.cpp index a7c810bb6..cb84b15a5 100644 --- a/libraries/value/src/EmitterContext.cpp +++ b/libraries/value/src/EmitterContext.cpp @@ -141,7 +141,10 @@ namespace value return std::nullopt; } - detail::ValueTypeDescription EmitterContext::GetType(Emittable emittable) { return GetTypeImpl(emittable); } + detail::ValueTypeDescription EmitterContext::GetType(Emittable emittable) + { + return GetTypeImpl(emittable); + } EmitterContext::DefinedFunction EmitterContext::CreateFunction(FunctionDeclaration decl, EmitterContext::DefinedFunction fn) { @@ -153,7 +156,10 @@ namespace value return IsFunctionDefinedImpl(decl); } - Value EmitterContext::StoreConstantData(ConstantData data) { return StoreConstantDataImpl(data); } + Value EmitterContext::StoreConstantData(ConstantData data) + { + return StoreConstantDataImpl(data); + } void EmitterContext::For(MemoryLayout layout, std::function)> fn, const std::string& name) { @@ -180,11 +186,20 @@ namespace value return ForImpl(start, stop, step, fn, name); } - void EmitterContext::MoveData(Value& source, Value& destination) { return MoveDataImpl(source, destination); } + void EmitterContext::MoveData(Value& source, Value& destination) + { + return MoveDataImpl(source, destination); + } - void EmitterContext::CopyData(const Value& source, Value& destination) { return CopyDataImpl(source, destination); } + void EmitterContext::CopyData(const Value& source, Value& destination) + { + return CopyDataImpl(source, destination); + } - Value EmitterContext::Reference(Value source) { return ReferenceImpl(source); } + Value EmitterContext::Reference(Value source) + { + return ReferenceImpl(source); + } Value EmitterContext::Dereference(Value source) { @@ -200,7 +215,10 @@ namespace value return DereferenceImpl(source); } - Value EmitterContext::Offset(Value begin, Value index) { return OffsetImpl(begin, index); } + Value EmitterContext::Offset(Value begin, Value index) + { + return OffsetImpl(begin, index); + } Value EmitterContext::Offset(Value begin, std::vector coordinates) { @@ -210,7 +228,10 @@ namespace value return Offset(begin, result.GetValue()); } - Value EmitterContext::UnaryOperation(ValueUnaryOperation op, Value value) { return UnaryOperationImpl(op, value); } + Value EmitterContext::UnaryOperation(ValueUnaryOperation op, Value value) + { + return UnaryOperationImpl(op, value); + } Value EmitterContext::BinaryOperation(ValueBinaryOperation op, Value destination, Value source) { @@ -430,9 +451,15 @@ namespace value return *s_context; } - void SetContext(EmitterContext& context) { s_context = &context; } + void SetContext(EmitterContext& context) + { + s_context = &context; + } - void ClearContext() noexcept { s_context = nullptr; } + void ClearContext() noexcept + { + s_context = nullptr; + } ContextGuard<>::ContextGuard(EmitterContext& context) : _oldContext(s_context) @@ -440,7 +467,10 @@ namespace value SetContext(context); } - ContextGuard<>::~ContextGuard() { _oldContext ? SetContext(*_oldContext) : ClearContext(); } + ContextGuard<>::~ContextGuard() + { + _oldContext ? SetContext(*_oldContext) : ClearContext(); + } Value Allocate(ValueType type, size_t size, size_t align, AllocateFlags flags) { @@ -462,7 +492,10 @@ namespace value return GetContext().GlobalAllocate(name, type, layout, flags); } - EmitterContext::IfContext If(Scalar test, std::function fn) { return GetContext().If(test, fn); } + EmitterContext::IfContext If(Scalar test, std::function fn) + { + return GetContext().If(test, fn); + } void While(Scalar test, std::function fn) { @@ -710,8 +743,7 @@ namespace value { If(v == Cast(0, v.GetType()), [&] { r = Cast(1, v.GetType()); - }) - .Else([&] { + }).Else([&] { r = Cast(0, v.GetType()); }); } diff --git a/libraries/value/src/FunctionDeclaration.cpp b/libraries/value/src/FunctionDeclaration.cpp index ad21470c3..3c6f6b6b9 100644 --- a/libraries/value/src/FunctionDeclaration.cpp +++ b/libraries/value/src/FunctionDeclaration.cpp @@ -95,7 +95,7 @@ namespace value if (!_decoratedFunctionName) { size_t hash = 0; - if(_returnType) + if (_returnType) { HashCombine(hash, static_cast(_returnType->GetBaseType())); HashCombine(hash, _returnType->PointerLevel()); @@ -104,7 +104,7 @@ namespace value HashCombine(hash, _returnType->GetLayout()); } } - for(auto p: _paramTypes) + for (auto p : _paramTypes) { HashCombine(hash, static_cast(p.GetBaseType())); HashCombine(hash, p.PointerLevel()); @@ -158,7 +158,10 @@ namespace value return !_importedSource.empty(); } - bool FunctionDeclaration::IsEmpty() const { return _isEmpty; } + bool FunctionDeclaration::IsEmpty() const + { + return _isEmpty; + } FunctionInlining FunctionDeclaration::InlineState() const { diff --git a/libraries/value/src/LLVMContext.cpp b/libraries/value/src/LLVMContext.cpp index 9d860dbea..e15c0c48e 100644 --- a/libraries/value/src/LLVMContext.cpp +++ b/libraries/value/src/LLVMContext.cpp @@ -56,16 +56,14 @@ namespace value default: break; } - case llvm::Type::TypeID::PointerTyID: - { + case llvm::Type::TypeID::PointerTyID: { auto elementType = type->getPointerElementType(); auto underlyingType = LLVMTypeToValueType(elementType); underlyingType.second += 1; return underlyingType; } - case llvm::Type::TypeID::ArrayTyID: - { + case llvm::Type::TypeID::ArrayTyID: { auto elementType = type->getArrayElementType(); auto underlyingType = LLVMTypeToValueType(elementType); @@ -979,7 +977,10 @@ namespace value return _emitter.HasFunction(decl.GetFunctionName()); } - Value LLVMContext::StoreConstantDataImpl(ConstantData data) { return _computeContext.StoreConstantData(data); } + Value LLVMContext::StoreConstantDataImpl(ConstantData data) + { + return _computeContext.StoreConstantData(data); + } void LLVMContext::ForImpl(MemoryLayout layout, std::function)> fn, const std::string& name) { @@ -1152,8 +1153,7 @@ namespace value case CopyType::DirectScalarPassThrough: destination.SetData(Emittable{ srcValue }); break; - case CopyType::DirectScalarCopy: - { + case CopyType::DirectScalarCopy: { auto destAtOffset = irEmitter.PointerOffset(destValue, irEmitter.Zero(VariableType::Int32)); irEmitter.Store(destAtOffset, srcValue); break; @@ -1180,8 +1180,7 @@ namespace value ""); } break; - case CopyType::Reference: - { + case CopyType::Reference: { auto srcAtOffset = irEmitter.Load(srcValue); irEmitter.Store(destValue, srcAtOffset); destination.SetLayout(source.GetLayout()); diff --git a/libraries/value/src/LoopNests.cpp b/libraries/value/src/LoopNests.cpp index 9a39f2c53..ac077c808 100644 --- a/libraries/value/src/LoopNests.cpp +++ b/libraries/value/src/LoopNests.cpp @@ -165,13 +165,16 @@ namespace value LoopNest::LoopNest() : _impl(std::make_unique()), - _schedule(*this) {} + _schedule(*this) + {} LoopNest::LoopNest(const LoopNest& other) : _impl(std::make_unique(*other._impl)), - _schedule(*this) {} + _schedule(*this) + {} LoopNest::LoopNest(LoopNest&& other) noexcept : _impl(std::move(other._impl)), - _schedule(*this) {} + _schedule(*this) + {} LoopNest& LoopNest::operator=(const LoopNest& other) { diff --git a/libraries/value/src/Matrix.cpp b/libraries/value/src/Matrix.cpp index ba00f16ae..9e16b7b36 100644 --- a/libraries/value/src/Matrix.cpp +++ b/libraries/value/src/Matrix.cpp @@ -62,7 +62,7 @@ namespace value Scalar Matrix::operator()(Scalar rowIndex, Scalar columnIndex) { - Value indexedValue = GetContext().Offset(_value, { rowIndex, columnIndex }); + Value indexedValue = GetContext().Offset(_value, { rowIndex, columnIndex }); indexedValue.SetLayout(ScalarLayout); return indexedValue; @@ -76,7 +76,10 @@ namespace value return Scalar(indexedValue).Copy(); } - Value Matrix::GetValue() const { return _value; } + Value Matrix::GetValue() const + { + return _value; + } Matrix Matrix::SubMatrix(Scalar row, Scalar column, int numRows, int numColumns) const { @@ -111,7 +114,10 @@ namespace value return newValue; } - size_t Matrix::Size() const { return _value.GetLayout().NumElements(); } + size_t Matrix::Size() const + { + return _value.GetLayout().NumElements(); + } Vector Matrix::Row(Scalar index) const { @@ -131,20 +137,35 @@ namespace value return indexedValue; } - size_t Matrix::Rows() const { return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(0)); } + size_t Matrix::Rows() const + { + return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(0)); + } - size_t Matrix::Columns() const { return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(1)); } + size_t Matrix::Columns() const + { + return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(1)); + } Matrix::MatrixLayout Matrix::GetMatrixLayout() const { return _value.GetLayout().IsCanonicalOrder() ? MatrixLayout::rowMajor : MatrixLayout::columnMajor; } - ValueType Matrix::Type() const { return _value.GetBaseType(); } + ValueType Matrix::Type() const + { + return _value.GetBaseType(); + } - void Matrix::SetName(const std::string& name) { _value.SetName(name); } + void Matrix::SetName(const std::string& name) + { + _value.SetName(name); + } - std::string Matrix::GetName() const { return _value.GetName(); } + std::string Matrix::GetName() const + { + return _value.GetName(); + } Matrix& Matrix::operator+=(Matrix m) { diff --git a/libraries/value/src/MatrixOperations.cpp b/libraries/value/src/MatrixOperations.cpp index c90150ad2..cef13cad6 100644 --- a/libraries/value/src/MatrixOperations.cpp +++ b/libraries/value/src/MatrixOperations.cpp @@ -66,7 +66,10 @@ namespace value name); } - Matrix GEMM(Matrix m1, Matrix m2) { throw LogicException(LogicExceptionErrors::notImplemented); } + Matrix GEMM(Matrix m1, Matrix m2) + { + throw LogicException(LogicExceptionErrors::notImplemented); + } Vector GEMV(Matrix m, Vector v) { diff --git a/libraries/value/src/Reference.cpp b/libraries/value/src/Reference.cpp index ac92419ed..25620e484 100644 --- a/libraries/value/src/Reference.cpp +++ b/libraries/value/src/Reference.cpp @@ -13,10 +13,12 @@ namespace ell namespace value { Ref::Ref(Value value) : - _value(value.Reference()) {} + _value(value.Reference()) + {} Ref::Ref(std::in_place_t, Value value) : - _value(value) {} + _value(value) + {} Ref::Ref(const Ref& other) : _value(other._value) @@ -46,13 +48,25 @@ namespace value Ref::~Ref() = default; - Value Ref::operator*() const { return _value.Dereference(); } + Value Ref::operator*() const + { + return _value.Dereference(); + } - Value Ref::GetValue() const { return _value; } + Value Ref::GetValue() const + { + return _value; + } - void Ref::SetName(const std::string& name) { _value.SetName(name); } + void Ref::SetName(const std::string& name) + { + _value.SetName(name); + } - std::string Ref::GetName() const { return _value.GetName(); } + std::string Ref::GetName() const + { + return _value.GetName(); + } } // namespace value } // namespace ell diff --git a/libraries/value/src/Scalar.cpp b/libraries/value/src/Scalar.cpp index c5cfa7530..771ca4b80 100644 --- a/libraries/value/src/Scalar.cpp +++ b/libraries/value/src/Scalar.cpp @@ -55,7 +55,10 @@ namespace value return *this; } - Value Scalar::GetValue() const { return _value; } + Value Scalar::GetValue() const + { + return _value; + } Scalar Scalar::Copy() const { @@ -64,11 +67,20 @@ namespace value return s; } - ValueType Scalar::GetType() const { return _value.GetBaseType(); } + ValueType Scalar::GetType() const + { + return _value.GetBaseType(); + } - void Scalar::SetName(const std::string& name) { _value.SetName(name); } + void Scalar::SetName(const std::string& name) + { + _value.SetName(name); + } - std::string Scalar::GetName() const { return _value.GetName(); } + std::string Scalar::GetName() const + { + return _value.GetName(); + } Scalar& Scalar::operator+=(Scalar s) { diff --git a/libraries/value/src/Tensor.cpp b/libraries/value/src/Tensor.cpp index e7028d55d..3ba264134 100644 --- a/libraries/value/src/Tensor.cpp +++ b/libraries/value/src/Tensor.cpp @@ -75,7 +75,10 @@ namespace value return Scalar(indexedValue).Copy(); } - Value Tensor::GetValue() const { return _value; } + Value Tensor::GetValue() const + { + return _value; + } Tensor Tensor::SubTensor(Scalar row, Scalar column, Scalar channel, int numRows, int numColumns, int numChannels) const { @@ -111,7 +114,10 @@ namespace value return newValue; } - size_t Tensor::Size() const { return _value.GetLayout().NumElements(); } + size_t Tensor::Size() const + { + return _value.GetLayout().NumElements(); + } Matrix Tensor::Slice(Scalar row, value::Slice mode1, value::Slice mode2) const { @@ -179,17 +185,35 @@ namespace value return indexedValue; } - size_t Tensor::Rows() const { return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(0)); } + size_t Tensor::Rows() const + { + return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(0)); + } - size_t Tensor::Columns() const { return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(1)); } + size_t Tensor::Columns() const + { + return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(1)); + } - size_t Tensor::Channels() const { return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(2)); } + size_t Tensor::Channels() const + { + return static_cast(_value.GetLayout().GetLogicalDimensionActiveSize(2)); + } - ValueType Tensor::Type() const { return _value.GetBaseType(); } + ValueType Tensor::Type() const + { + return _value.GetBaseType(); + } - void Tensor::SetName(const std::string& name) { _value.SetName(name); } + void Tensor::SetName(const std::string& name) + { + _value.SetName(name); + } - std::string Tensor::GetName() const { return _value.GetName(); } + std::string Tensor::GetName() const + { + return _value.GetName(); + } Tensor& Tensor::operator+=(Scalar s) { diff --git a/libraries/value/src/Value.cpp b/libraries/value/src/Value.cpp index 4b60ed4f5..5f3433bbb 100644 --- a/libraries/value/src/Value.cpp +++ b/libraries/value/src/Value.cpp @@ -260,7 +260,10 @@ namespace value value._data); } - bool Value::IsDefined() const { return _type.first != ValueType::Undefined; } + bool Value::IsDefined() const + { + return _type.first != ValueType::Undefined; + } bool Value::IsEmpty() const { @@ -271,7 +274,10 @@ namespace value _data); } - bool Value::IsConstant() const { return !std::holds_alternative(_data); } + bool Value::IsConstant() const + { + return !std::holds_alternative(_data); + } bool Value::IsIntegral() const { @@ -299,24 +305,45 @@ namespace value } } - bool Value::IsBoolean() const { return _type.first == ValueType::Boolean && !IsReference(); } + bool Value::IsBoolean() const + { + return _type.first == ValueType::Boolean && !IsReference(); + } - bool Value::IsInt16() const { return _type.first == ValueType::Int16 && !IsReference(); } + bool Value::IsInt16() const + { + return _type.first == ValueType::Int16 && !IsReference(); + } - bool Value::IsInt32() const { return _type.first == ValueType::Int32 && !IsReference(); } + bool Value::IsInt32() const + { + return _type.first == ValueType::Int32 && !IsReference(); + } - bool Value::IsInt64() const { return _type.first == ValueType::Int64 && !IsReference(); } + bool Value::IsInt64() const + { + return _type.first == ValueType::Int64 && !IsReference(); + } bool Value::IsFloatingPoint() const { return (_type.first == ValueType::Float || _type.first == ValueType::Double) && !IsReference(); } - bool Value::IsFloat32() const { return _type.first == ValueType::Float && !IsReference(); } + bool Value::IsFloat32() const + { + return _type.first == ValueType::Float && !IsReference(); + } - bool Value::IsDouble() const { return _type.first == ValueType::Double && !IsReference(); } + bool Value::IsDouble() const + { + return _type.first == ValueType::Double && !IsReference(); + } - bool Value::IsReference() const { return _type.second > 1; } + bool Value::IsReference() const + { + return _type.second > 1; + } bool Value::IsIntegralReference() const { @@ -344,34 +371,70 @@ namespace value } } - bool Value::IsBooleanReference() const { return _type.first == ValueType::Boolean && IsReference(); } + bool Value::IsBooleanReference() const + { + return _type.first == ValueType::Boolean && IsReference(); + } - bool Value::IsShortReference() const { return _type.first == ValueType::Int16 && IsReference(); } + bool Value::IsShortReference() const + { + return _type.first == ValueType::Int16 && IsReference(); + } - bool Value::IsInt32Reference() const { return _type.first == ValueType::Int32 && IsReference(); } + bool Value::IsInt32Reference() const + { + return _type.first == ValueType::Int32 && IsReference(); + } - bool Value::IsInt64Reference() const { return _type.first == ValueType::Int64 && IsReference(); } + bool Value::IsInt64Reference() const + { + return _type.first == ValueType::Int64 && IsReference(); + } bool Value::IsFloatingPointReference() const { return (_type.first == ValueType::Float || _type.first == ValueType::Double) && IsReference(); } - bool Value::IsFloat32Reference() const { return _type.first == ValueType::Float && IsReference(); } + bool Value::IsFloat32Reference() const + { + return _type.first == ValueType::Float && IsReference(); + } - bool Value::IsDoubleReference() const { return _type.first == ValueType::Double && IsReference(); } + bool Value::IsDoubleReference() const + { + return _type.first == ValueType::Double && IsReference(); + } - bool Value::IsConstrained() const { return _layout.has_value(); } + bool Value::IsConstrained() const + { + return _layout.has_value(); + } - const MemoryLayout& Value::GetLayout() const { return _layout.value(); } + const MemoryLayout& Value::GetLayout() const + { + return _layout.value(); + } - Value Value::Reference() const { return GetContext().Reference(*this); } + Value Value::Reference() const + { + return GetContext().Reference(*this); + } - Value Value::Dereference() const { return GetContext().Dereference(*this); } + Value Value::Dereference() const + { + return GetContext().Dereference(*this); + } - Value Value::Offset(Value index) const { return GetContext().Offset(*this, index); } + Value Value::Offset(Value index) const + { + return GetContext().Offset(*this, index); + } - Value Value::Offset(Scalar index) const { return Offset(index.GetValue()); } + Value Value::Offset(Scalar index) const + { + return Offset(index.GetValue()); + } Value Value::Offset(const std::vector& indices) const { @@ -382,19 +445,40 @@ namespace value return GetContext().Offset(*this, indices); } - ValueType Value::GetBaseType() const { return _type.first; } + ValueType Value::GetBaseType() const + { + return _type.first; + } - void Value::SetLayout(MemoryLayout layout) { _layout = layout; } + void Value::SetLayout(MemoryLayout layout) + { + _layout = layout; + } - void Value::ClearLayout() { _layout.reset(); } + void Value::ClearLayout() + { + _layout.reset(); + } - void Value::ClearData() { _data = Emittable{ nullptr }; } + void Value::ClearData() + { + _data = Emittable{ nullptr }; + } - int Value::PointerLevel() const { return _type.second; } + int Value::PointerLevel() const + { + return _type.second; + } - Value::UnderlyingDataType& Value::GetUnderlyingData() { return _data; } + Value::UnderlyingDataType& Value::GetUnderlyingData() + { + return _data; + } - const Value::UnderlyingDataType& Value::GetUnderlyingData() const { return _data; } + const Value::UnderlyingDataType& Value::GetUnderlyingData() const + { + return _data; + } void Value::SetName(const std::string& name) { @@ -407,11 +491,17 @@ namespace value return GetContext().GetName(*this); } - bool Value::HasCustomName() const { return _hasName; } + bool Value::HasCustomName() const + { + return _hasName; + } namespace detail { - Value StoreConstantData(ConstantData data) { return GetContext().StoreConstantData(data); } + Value StoreConstantData(ConstantData data) + { + return GetContext().StoreConstantData(data); + } } // namespace detail #define ADD_TO_STRING_ENTRY(NAMESPACE, OPERATOR) \ diff --git a/libraries/value/src/Vector.cpp b/libraries/value/src/Vector.cpp index 315e52ce2..138be828a 100644 --- a/libraries/value/src/Vector.cpp +++ b/libraries/value/src/Vector.cpp @@ -53,7 +53,10 @@ namespace value return *this; } - Scalar Vector::operator[](Scalar index) { return (*this)(index); } + Scalar Vector::operator[](Scalar index) + { + return (*this)(index); + } Scalar Vector::operator()(Scalar index) { @@ -63,7 +66,10 @@ namespace value return indexedValue; } - Scalar Vector::operator[](Scalar index) const { return (*this)(index); } + Scalar Vector::operator[](Scalar index) const + { + return (*this)(index); + } Scalar Vector::operator()(Scalar index) const { @@ -74,7 +80,10 @@ namespace value return Scalar(indexedValue).Copy(); } - Value Vector::GetValue() const { return _value; } + Value Vector::GetValue() const + { + return _value; + } Vector Vector::SubVector(Scalar offset, int size) const { @@ -95,13 +104,25 @@ namespace value return newValue; } - size_t Vector::Size() const { return _value.GetLayout().NumElements(); } + size_t Vector::Size() const + { + return _value.GetLayout().NumElements(); + } - ValueType Vector::GetType() const { return _value.GetBaseType(); } + ValueType Vector::GetType() const + { + return _value.GetBaseType(); + } - void Vector::SetName(const std::string& name) { _value.SetName(name); } + void Vector::SetName(const std::string& name) + { + _value.SetName(name); + } - std::string Vector::GetName() const { return _value.GetName(); } + std::string Vector::GetName() const + { + return _value.GetName(); + } Vector& Vector::operator+=(Scalar s) { diff --git a/libraries/value/src/VectorOperations.cpp b/libraries/value/src/VectorOperations.cpp index c61cadd1c..3afd4efc8 100644 --- a/libraries/value/src/VectorOperations.cpp +++ b/libraries/value/src/VectorOperations.cpp @@ -86,7 +86,6 @@ namespace value #else return defaultImpl(v1, v2); #endif - }); if (result) diff --git a/libraries/value/src/loopnests/Kernel.cpp b/libraries/value/src/loopnests/Kernel.cpp index 649f27cb2..524f87983 100644 --- a/libraries/value/src/loopnests/Kernel.cpp +++ b/libraries/value/src/loopnests/Kernel.cpp @@ -16,11 +16,13 @@ namespace value { Kernel::Kernel(std::string name) : _id(name), - _kernelName(name) {} + _kernelName(name) + {} Kernel::Kernel(std::string name, Id id) : _id(id.empty() ? name : id), - _kernelName(name) {} + _kernelName(name) + {} Kernel& Kernel::Inputs(const std::vector& inputs) { diff --git a/libraries/value/src/loopnests/KernelPredicate.cpp b/libraries/value/src/loopnests/KernelPredicate.cpp index ba3ac4db2..19c3c9a6d 100644 --- a/libraries/value/src/loopnests/KernelPredicate.cpp +++ b/libraries/value/src/loopnests/KernelPredicate.cpp @@ -445,25 +445,32 @@ namespace value // KernelPredicate // KernelPredicate::KernelPredicate(const EmptyPredicate& predicate) : - _expr(predicate) {} + _expr(predicate) + {} KernelPredicate::KernelPredicate(const ConstantPredicate& predicate) : - _expr(predicate) {} + _expr(predicate) + {} KernelPredicate::KernelPredicate(const FragmentTypePredicate& predicate) : - _expr(predicate) {} + _expr(predicate) + {} KernelPredicate::KernelPredicate(const PlacementPredicate& predicate) : - _expr(predicate) {} + _expr(predicate) + {} KernelPredicate::KernelPredicate(const IndexDefinedPredicate& predicate) : - _expr(predicate) {} + _expr(predicate) + {} KernelPredicate::KernelPredicate(const KernelPredicateConjunction& predicate) : - _expr(predicate) {} + _expr(predicate) + {} KernelPredicate::KernelPredicate(const KernelPredicateDisjunction& predicate) : - _expr(predicate) {} + _expr(predicate) + {} KernelPredicate KernelPredicate::Simplify() const { diff --git a/libraries/value/src/loopnests/LoopNest.cpp b/libraries/value/src/loopnests/LoopNest.cpp index 67a412df0..291b3264f 100644 --- a/libraries/value/src/loopnests/LoopNest.cpp +++ b/libraries/value/src/loopnests/LoopNest.cpp @@ -738,7 +738,10 @@ namespace value return _domain; } - Index LoopNest::GetBaseIndex(const Index& index) const { return _domain.GetBaseIndex(index); } + Index LoopNest::GetBaseIndex(const Index& index) const + { + return _domain.GetBaseIndex(index); + } bool LoopNest::IsLoopIndex(const Index& index) const { diff --git a/libraries/value/src/loopnests/LoopNestVisitor.cpp b/libraries/value/src/loopnests/LoopNestVisitor.cpp index 46708b358..a5885d256 100644 --- a/libraries/value/src/loopnests/LoopNestVisitor.cpp +++ b/libraries/value/src/loopnests/LoopNestVisitor.cpp @@ -538,8 +538,7 @@ namespace value case Fragment::first: splitVal = loopRange.Begin() + loopRange.Increment(); break; - case Fragment::last: - { + case Fragment::last: { // take into account last range being a boundary condition auto extra = loopRange.End() % loopRange.Increment(); if (extra == 0) diff --git a/libraries/value/src/loopnests/Range.cpp b/libraries/value/src/loopnests/Range.cpp index 4960efc8f..0ef8bf53b 100644 --- a/libraries/value/src/loopnests/Range.cpp +++ b/libraries/value/src/loopnests/Range.cpp @@ -19,15 +19,28 @@ namespace value Range::Range(int begin, int end, int increment) : _begin(begin), _end(end), - _increment(increment) {} + _increment(increment) + {} - int Range::Begin() const { return _begin; } + int Range::Begin() const + { + return _begin; + } - int Range::End() const { return _end; } + int Range::End() const + { + return _end; + } - int Range::Size() const { return _end - _begin; } + int Range::Size() const + { + return _end - _begin; + } - int Range::Increment() const { return _increment; } + int Range::Increment() const + { + return _increment; + } std::ostream& operator<<(std::ostream& os, const Range& r) { diff --git a/libraries/value/test/src/Functions_test.cpp b/libraries/value/test/src/Functions_test.cpp index b28ecb1fc..8a068212d 100644 --- a/libraries/value/test/src/Functions_test.cpp +++ b/libraries/value/test/src/Functions_test.cpp @@ -34,10 +34,10 @@ Scalar FunctionArgType_test() { auto fn = DeclareFunction("FunctionArgType_test") .Parameters( - Value(ValueType::Float, ScalarLayout), - Value({ValueType::Float, 0}, ScalarLayout), - Value(ValueType::Int32, ScalarLayout), - Value({ValueType::Int32, 0}, ScalarLayout)) + Value(ValueType::Float, ScalarLayout), + Value({ ValueType::Float, 0 }, ScalarLayout), + Value(ValueType::Int32, ScalarLayout), + Value({ ValueType::Int32, 0 }, ScalarLayout)) .Returns(Value(ValueType::Int32, ScalarLayout)) .Define([](Scalar f, Scalar f0, Scalar i, Scalar i0) { auto ff = MakeScalar(); diff --git a/libraries/value/test/src/Scalar_test.cpp b/libraries/value/test/src/Scalar_test.cpp index 4dbdb9de3..17e320f82 100644 --- a/libraries/value/test/src/Scalar_test.cpp +++ b/libraries/value/test/src/Scalar_test.cpp @@ -211,21 +211,21 @@ Scalar SequenceLogicalAndTest() Scalar ok = Allocate(ScalarLayout); ok = 1; If((fourGTTwo && fourGTFour), - [&]() { - DebugPrint("Error! 4 > 2 && 4 > 4\n"); - }) - .ElseIf(fourGTTwo, - [&]() { - ok = 0; - }) - .ElseIf(fourGTFour, - [&]() { - DebugPrint("Error! 4 <= 2 && 4 > 4\n"); - }) - .Else( - [&]() { - DebugPrint("Error! 4 <= 2 && 4 <= 4\n"); - }); + [&]() { + DebugPrint("Error! 4 > 2 && 4 > 4\n"); + }) + .ElseIf(fourGTTwo, + [&]() { + ok = 0; + }) + .ElseIf(fourGTFour, + [&]() { + DebugPrint("Error! 4 <= 2 && 4 > 4\n"); + }) + .Else( + [&]() { + DebugPrint("Error! 4 <= 2 && 4 <= 4\n"); + }); return ok; } @@ -243,21 +243,21 @@ Scalar SequenceLogicalAndTestWithCopy() Scalar ok = Allocate(ScalarLayout); ok = 1; If((fourGTTwo && fourGTFour), - [&]() { - DebugPrint("Error! 4 > 2 && 4 > 4\n"); - }) - .ElseIf(copyFourGTTwo, - [&]() { - ok = 0; - }) - .ElseIf(copyFourGTFour, - [&]() { - DebugPrint("Error! 4 <= 2 && 4 > 4\n"); - }) - .Else( - [&]() { - DebugPrint("Error! 4 <= 2 && 4 <= 4\n"); - }); + [&]() { + DebugPrint("Error! 4 > 2 && 4 > 4\n"); + }) + .ElseIf(copyFourGTTwo, + [&]() { + ok = 0; + }) + .ElseIf(copyFourGTFour, + [&]() { + DebugPrint("Error! 4 <= 2 && 4 > 4\n"); + }) + .Else( + [&]() { + DebugPrint("Error! 4 <= 2 && 4 <= 4\n"); + }); return ok; } diff --git a/libraries/value/test/src/TestUtil.cpp b/libraries/value/test/src/TestUtil.cpp index 935b2e2a1..54597379e 100644 --- a/libraries/value/test/src/TestUtil.cpp +++ b/libraries/value/test/src/TestUtil.cpp @@ -153,13 +153,12 @@ Scalar EqualEpsilon(Scalar x, Scalar y, double epsilon) DebugPrint("\n"); #endif // 0 result = 1; - }) - .Else([&] { - if (auto type = x.GetType(); type == ValueType::Float || type == ValueType::Double) - { - auto tolerance = Cast(epsilon, type); - If((x - y) <= tolerance, [&] { - If((y - x) <= tolerance, [&] { + }).Else([&] { + if (auto type = x.GetType(); type == ValueType::Float || type == ValueType::Double) + { + auto tolerance = Cast(epsilon, type); + If((x - y) <= tolerance, [&] { + If((y - x) <= tolerance, [&] { #if 0 // Useful for debugging DebugPrint("## Scalar compare passed\n"); DebugPrint(" Expected: "); @@ -169,15 +168,15 @@ Scalar EqualEpsilon(Scalar x, Scalar y, double epsilon) DebugPrintVector(AsVector(y)); DebugPrint("\n"); #endif // 0 - result = 1; - }); + result = 1; }); - } - else - { - result = 0; - } - }); + }); + } + else + { + result = 0; + } + }); return result; } diff --git a/tools/trainers/linearTrainer/src/main.cpp b/tools/trainers/linearTrainer/src/main.cpp index 15138ad62..21515c38a 100644 --- a/tools/trainers/linearTrainer/src/main.cpp +++ b/tools/trainers/linearTrainer/src/main.cpp @@ -147,14 +147,12 @@ int main(int argc, char* argv[]) case LinearTrainerArguments::Algorithm::SparseDataSGD: trainer = common::MakeSparseDataSGDTrainer(trainerArguments.lossFunctionArguments, { linearTrainerArguments.regularization, linearTrainerArguments.randomSeedString }); break; - case LinearTrainerArguments::Algorithm::SparseDataCenteredSGD: - { + case LinearTrainerArguments::Algorithm::SparseDataCenteredSGD: { auto mean = trainers::CalculateMean(mappedDataset.GetAnyDataset()); trainer = common::MakeSparseDataCenteredSGDTrainer(trainerArguments.lossFunctionArguments, mean, { linearTrainerArguments.regularization, linearTrainerArguments.randomSeedString }); break; } - case LinearTrainerArguments::Algorithm::SDCA: - { + case LinearTrainerArguments::Algorithm::SDCA: { trainer = common::MakeSDCATrainer(trainerArguments.lossFunctionArguments, { linearTrainerArguments.regularization, linearTrainerArguments.desiredPrecision, linearTrainerArguments.maxEpochs, linearTrainerArguments.permute, linearTrainerArguments.randomSeedString }); break; } @@ -192,14 +190,12 @@ int main(int argc, char* argv[]) // Create a new map with the linear predictor appended. switch (map.GetOutputType()) { - case model::Port::PortType::smallReal: - { + case model::Port::PortType::smallReal: { auto outputMap = AppendTrainedLinearPredictorToMap(trainer->GetPredictor(), map, mappedDatasetDimension); common::SaveMap(outputMap, modelSaveArguments.outputModelFilename); } break; - case model::Port::PortType::real: - { + case model::Port::PortType::real: { auto outputMap = AppendTrainedLinearPredictorToMap(trainer->GetPredictor(), map, mappedDatasetDimension); common::SaveMap(outputMap, modelSaveArguments.outputModelFilename); } diff --git a/tools/trainers/retargetTrainer/src/main.cpp b/tools/trainers/retargetTrainer/src/main.cpp index 9671bf80d..9b3b8918c 100644 --- a/tools/trainers/retargetTrainer/src/main.cpp +++ b/tools/trainers/retargetTrainer/src/main.cpp @@ -315,13 +315,11 @@ model::Map GetRetargetedModel(std::vector& binaryPredictors, mode // Create a new map with the output of the combined linear predictors appended. switch (map.GetOutputType()) { - case model::Port::PortType::smallReal: - { + case model::Port::PortType::smallReal: { result = GetMultiClassMapFromBinaryPredictors(binaryPredictors, map); break; } - case model::Port::PortType::real: - { + case model::Port::PortType::real: { result = GetMultiClassMapFromBinaryPredictors(binaryPredictors, map); break; } @@ -339,13 +337,11 @@ model::Map GetRetargetedModel(const PredictorType& trainedPredictor, model::Map& // Create a new map with the output of the linear predictor appended. switch (map.GetOutputType()) { - case model::Port::PortType::smallReal: - { + case model::Port::PortType::smallReal: { result = AppendTrainedLinearPredictorToMap(trainedPredictor, map, mappedDatasetDimension); break; } - case model::Port::PortType::real: - { + case model::Port::PortType::real: { result = AppendTrainedLinearPredictorToMap(trainedPredictor, map, mappedDatasetDimension); break; } diff --git a/tools/utilities/debugCompiler/src/ModelComparison.cpp b/tools/utilities/debugCompiler/src/ModelComparison.cpp index 2d40c2934..828dfee33 100644 --- a/tools/utilities/debugCompiler/src/ModelComparison.cpp +++ b/tools/utilities/debugCompiler/src/ModelComparison.cpp @@ -16,9 +16,9 @@ #include -#include #include #include +#include #include #include diff --git a/tools/utilities/finetune/include/DataStatistics.h b/tools/utilities/finetune/include/DataStatistics.h index f753b953f..376261e13 100644 --- a/tools/utilities/finetune/include/DataStatistics.h +++ b/tools/utilities/finetune/include/DataStatistics.h @@ -27,10 +27,10 @@ struct Sparsity double GetSparsity() const { return static_cast(numZeros) / static_cast(numValues); } }; -/// +/// /// A struct to hold basic statistics about a block of data, including the overall sparsity as well as -/// the mean, variance, and standard deviation. -/// The `mean`, `variance`, and `stdDev` staistics are vectors so that they can represent the statistics +/// the mean, variance, and standard deviation. +/// The `mean`, `variance`, and `stdDev` staistics are vectors so that they can represent the statistics /// either of the data as a whole (in which case, they have a size of 1), or along one dimension of a matrix. /// struct DataStatistics diff --git a/tools/utilities/finetune/include/PredictorUtils.h b/tools/utilities/finetune/include/PredictorUtils.h index dfa10826d..eb9864299 100644 --- a/tools/utilities/finetune/include/PredictorUtils.h +++ b/tools/utilities/finetune/include/PredictorUtils.h @@ -31,4 +31,4 @@ WeightsAndBias GetWeightsAndBias(const ScalarPredictor& predictor); template WeightsAndBias GetWeightsAndBias(const VectorPredictor& predictor); -} \ No newline at end of file +} // namespace ell \ No newline at end of file diff --git a/tools/utilities/finetune/include/TransformData.h b/tools/utilities/finetune/include/TransformData.h index cbbe3ba40..93a4f2d14 100644 --- a/tools/utilities/finetune/include/TransformData.h +++ b/tools/utilities/finetune/include/TransformData.h @@ -39,4 +39,4 @@ double GetModelAccuracy(const ell::model::OutputPortBase& output, const VectorLa template DataVectorType RemovePadding(const DataVectorType& data, const ell::utilities::MemoryLayout& layout); -} \ No newline at end of file +} // namespace ell \ No newline at end of file diff --git a/tools/utilities/finetune/src/DataStatistics.cpp b/tools/utilities/finetune/src/DataStatistics.cpp index aab347709..01735d958 100644 --- a/tools/utilities/finetune/src/DataStatistics.cpp +++ b/tools/utilities/finetune/src/DataStatistics.cpp @@ -28,183 +28,183 @@ namespace ell // Utilities namespace { -void ThrowIfEmpty(const UnlabeledDataContainer& dataset) -{ - if (dataset.Size() == 0) + void ThrowIfEmpty(const UnlabeledDataContainer& dataset) { - throw utilities::InputException(utilities::InputExceptionErrors::badData, "Empty dataset"); + if (dataset.Size() == 0) + { + throw utilities::InputException(utilities::InputExceptionErrors::badData, "Empty dataset"); + } } -} -template -void ThrowIfNotSameSize(const std::vector& a, const std::vector& b) -{ - if (a.size() != b.size()) + template + void ThrowIfNotSameSize(const std::vector& a, const std::vector& b) { - throw utilities::InputException(utilities::InputExceptionErrors::badData, "Sizes don't match"); + if (a.size() != b.size()) + { + throw utilities::InputException(utilities::InputExceptionErrors::badData, "Sizes don't match"); + } } -} -template -void ThrowIfNotSameSize(const math::RowVector& a, const math::RowVector& b) -{ - if (a.Size() != b.Size()) + template + void ThrowIfNotSameSize(const math::RowVector& a, const math::RowVector& b) { - throw utilities::InputException(utilities::InputExceptionErrors::badData, "Sizes don't match"); + if (a.Size() != b.Size()) + { + throw utilities::InputException(utilities::InputExceptionErrors::badData, "Sizes don't match"); + } } -} -template -math::RowVector operator-(const math::RowVector& a, const math::RowVector& b) -{ - ThrowIfNotSameSize(a, b); - auto v = a; - v -= b; - return v; -} - -template -math::RowVector& operator*=(math::RowVector& a, const math::RowVector& b) -{ - ThrowIfNotSameSize(a, b); - auto size = a.Size(); - for (size_t i = 0; i < size; ++i) + template + math::RowVector operator-(const math::RowVector& a, const math::RowVector& b) { - a[i] *= b[i]; + ThrowIfNotSameSize(a, b); + auto v = a; + v -= b; + return v; } - return a; -} -template -math::RowVector operator*(const math::RowVector& a, const math::RowVector& b) -{ - ThrowIfNotSameSize(a, b); - auto v = a; - v *= b; - return v; -} - -template -math::RowVector operator/(const math::RowVector& a, T denom) -{ - auto v = a; - v /= denom; - return v; -} - -template -math::RowVector& operator/=(math::RowVector& a, const math::RowVector& b) -{ - ThrowIfNotSameSize(a, b); - auto size = a.Size(); - for (size_t i = 0; i < size; ++i) + template + math::RowVector& operator*=(math::RowVector& a, const math::RowVector& b) { - a[i] /= b[i]; + ThrowIfNotSameSize(a, b); + auto size = a.Size(); + for (size_t i = 0; i < size; ++i) + { + a[i] *= b[i]; + } + return a; } - return a; -} -template -math::RowVector Sqrt(const math::RowVector& a) -{ - auto v = a; - auto xform = math::SquareRootTransformation; - v.Transform(xform); - return v; -} + template + math::RowVector operator*(const math::RowVector& a, const math::RowVector& b) + { + ThrowIfNotSameSize(a, b); + auto v = a; + v *= b; + return v; + } -int GetNumRows(const UnlabeledDataContainer& dataset) -{ - return dataset.Size(); -} + template + math::RowVector operator/(const math::RowVector& a, T denom) + { + auto v = a; + v /= denom; + return v; + } -int GetNumColumns(const UnlabeledDataContainer& dataset) -{ - ThrowIfEmpty(dataset); - return dataset[0].Size(); -} + template + math::RowVector& operator/=(math::RowVector& a, const math::RowVector& b) + { + ThrowIfNotSameSize(a, b); + auto size = a.Size(); + for (size_t i = 0; i < size; ++i) + { + a[i] /= b[i]; + } + return a; + } -struct BasicDataStatistics -{ - int numRows; - std::vector numZeros; - math::RowVector sumElements; - math::RowVector sumSquaredElements; -}; + template + math::RowVector Sqrt(const math::RowVector& a) + { + auto v = a; + auto xform = math::SquareRootTransformation; + v.Transform(xform); + return v; + } -BasicDataStatistics GetBasicDataStatistics(const UnlabeledDataContainer& dataset, const utilities::MemoryLayout& layout) -{ - ThrowIfEmpty(dataset); + int GetNumRows(const UnlabeledDataContainer& dataset) + { + return dataset.Size(); + } - BasicDataStatistics result; - auto columns = GetNumColumns(dataset); - auto numZeros = std::vector(columns); - auto sum = math::RowVector(columns); - auto sumSquares = math::RowVector(columns); - for (const auto& row : dataset) + int GetNumColumns(const UnlabeledDataContainer& dataset) { - auto x = CastVector(row); - for (int i = 0; i < columns; ++i) - { - if (x[i] == 0.0) - ++numZeros[i]; - } - sum += x; - sumSquares += math::Square(x); + ThrowIfEmpty(dataset); + return dataset[0].Size(); } - return { - GetNumRows(dataset), - numZeros, - sum, - sumSquares, + struct BasicDataStatistics + { + int numRows; + std::vector numZeros; + math::RowVector sumElements; + math::RowVector sumSquaredElements; }; -} - -BasicDataStatistics GetBasicDataStatistics(const UnlabeledDataContainer& dataset) -{ - auto columns = GetNumColumns(dataset); - utilities::MemoryLayout layout({ columns }); - return GetBasicDataStatistics(dataset, layout); -} -template -Sparsity GetWeightsSparsity(const WeightsType& weights) // TODO: add layout -{ - auto weightsVec = weights.ToArray(); - auto numZeros = std::count_if(weightsVec.begin(), weightsVec.end(), [](auto a) { return a == 0; }); - return { static_cast(weightsVec.size()), numZeros }; -} + BasicDataStatistics GetBasicDataStatistics(const UnlabeledDataContainer& dataset, const utilities::MemoryLayout& layout) + { + ThrowIfEmpty(dataset); + + BasicDataStatistics result; + auto columns = GetNumColumns(dataset); + auto numZeros = std::vector(columns); + auto sum = math::RowVector(columns); + auto sumSquares = math::RowVector(columns); + for (const auto& row : dataset) + { + auto x = CastVector(row); + for (int i = 0; i < columns; ++i) + { + if (x[i] == 0.0) + ++numZeros[i]; + } + sum += x; + sumSquares += math::Square(x); + } -template -Sparsity GetNodeWeightsSparsity(const NodeType& node) -{ - return GetWeightsSparsity(node.GetLayer().GetWeights()); -} + return { + GetNumRows(dataset), + numZeros, + sum, + sumSquares, + }; + } -Sparsity GetNodeWeightsSparsity(const model::Node& node) -{ - if (IsConvolutionalLayerNode(&node)) + BasicDataStatistics GetBasicDataStatistics(const UnlabeledDataContainer& dataset) { - return GetNodeWeightsSparsity(static_cast&>(node)); + auto columns = GetNumColumns(dataset); + utilities::MemoryLayout layout({ columns }); + return GetBasicDataStatistics(dataset, layout); } - if (IsConvolutionalLayerNode(&node)) + template + Sparsity GetWeightsSparsity(const WeightsType& weights) // TODO: add layout { - return GetNodeWeightsSparsity(static_cast&>(node)); + auto weightsVec = weights.ToArray(); + auto numZeros = std::count_if(weightsVec.begin(), weightsVec.end(), [](auto a) { return a == 0; }); + return { static_cast(weightsVec.size()), numZeros }; } - if (IsFullyConnectedLayerNode(&node)) + template + Sparsity GetNodeWeightsSparsity(const NodeType& node) { - return GetNodeWeightsSparsity(static_cast&>(node)); + return GetWeightsSparsity(node.GetLayer().GetWeights()); } - if (IsFullyConnectedLayerNode(&node)) + Sparsity GetNodeWeightsSparsity(const model::Node& node) { - return GetNodeWeightsSparsity(static_cast&>(node)); + if (IsConvolutionalLayerNode(&node)) + { + return GetNodeWeightsSparsity(static_cast&>(node)); + } + + if (IsConvolutionalLayerNode(&node)) + { + return GetNodeWeightsSparsity(static_cast&>(node)); + } + + if (IsFullyConnectedLayerNode(&node)) + { + return GetNodeWeightsSparsity(static_cast&>(node)); + } + + if (IsFullyConnectedLayerNode(&node)) + { + return GetNodeWeightsSparsity(static_cast&>(node)); + } + return { 0, 0 }; } - return { 0, 0 }; -} } // namespace DataStatistics GetScalarDataStatistics(const UnlabeledDataContainer& dataset) @@ -337,7 +337,7 @@ UnlabeledDataContainer GetNormalizedData(const UnlabeledDataContainer& dataset, auto newRow = CastVector(row).ToArray(); MultidimArray newRowArray(newRow, layout); int size = layout.GetMemorySize(); - for(int i = 0; i < size; ++i) + for (int i = 0; i < size; ++i) { auto coords = layout.GetPhysicalCoordinatesFromOffset(i); newRowArray[coords] -= stats.mean[coords[dimension]]; @@ -399,4 +399,4 @@ Sparsity GetSubmodelWeightsSparsity(const model::Submodel& submodel) }); return sparsity; } -} \ No newline at end of file +} // namespace ell \ No newline at end of file diff --git a/tools/utilities/finetune/src/FineTuneArguments.cpp b/tools/utilities/finetune/src/FineTuneArguments.cpp index d7335902b..54ace1229 100644 --- a/tools/utilities/finetune/src/FineTuneArguments.cpp +++ b/tools/utilities/finetune/src/FineTuneArguments.cpp @@ -397,4 +397,4 @@ ParsedFineTuneArguments ParsedFineTuneArguments::ParseCommandLine(int argc, char } } -} \ No newline at end of file +} // namespace ell \ No newline at end of file diff --git a/tools/utilities/finetune/src/OptimizationUtils.cpp b/tools/utilities/finetune/src/OptimizationUtils.cpp index cd1f2b4fc..6addab8f2 100644 --- a/tools/utilities/finetune/src/OptimizationUtils.cpp +++ b/tools/utilities/finetune/src/OptimizationUtils.cpp @@ -59,28 +59,23 @@ ScalarOptimizerResult TrainScalarPredictor(BinaryLabelDataContainer dataset, con switch (optimizerParameters.lossFunction) { - case LossFunction::hinge: - { + case LossFunction::hinge: { optimization::HingeLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } - case LossFunction::huber: - { + case LossFunction::huber: { optimization::HuberLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } - case LossFunction::logistic: - { + case LossFunction::logistic: { optimization::LogisticLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } - case LossFunction::smoothedHinge: - { + case LossFunction::smoothedHinge: { optimization::SmoothedHingeLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } - case LossFunction::square: - { + case LossFunction::square: { optimization::SquareLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } @@ -170,28 +165,23 @@ VectorOptimizerResult TrainVectorPredictor(VectorLabelDataContainer dataset, con switch (optimizerParameters.lossFunction) { - case LossFunction::hinge: - { + case LossFunction::hinge: { optimization::MultivariateLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } - case LossFunction::huber: - { + case LossFunction::huber: { optimization::MultivariateLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } - case LossFunction::logistic: - { + case LossFunction::logistic: { optimization::MultivariateLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } - case LossFunction::smoothedHinge: - { + case LossFunction::smoothedHinge: { optimization::MultivariateLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } - case LossFunction::square: - { + case LossFunction::square: { optimization::MultivariateLoss loss; return TrainPredictor(examples, loss, optimizerParameters); } @@ -215,28 +205,23 @@ VectorOptimizerResult ReoptimizeSparsePredictor(VectorOptimizerResult& sparseSol switch (optimizerParameters.lossFunction) { - case LossFunction::hinge: - { + case LossFunction::hinge: { optimization::MultivariateLoss loss; return ReoptimizeSparsePredictor(sparseSolution, examples, loss, optimizerParameters); } - case LossFunction::huber: - { + case LossFunction::huber: { optimization::MultivariateLoss loss; return ReoptimizeSparsePredictor(sparseSolution, examples, loss, optimizerParameters); } - case LossFunction::logistic: - { + case LossFunction::logistic: { optimization::MultivariateLoss loss; return ReoptimizeSparsePredictor(sparseSolution, examples, loss, optimizerParameters); } - case LossFunction::smoothedHinge: - { + case LossFunction::smoothedHinge: { optimization::MultivariateLoss loss; return ReoptimizeSparsePredictor(sparseSolution, examples, loss, optimizerParameters); } - case LossFunction::square: - { + case LossFunction::square: { optimization::MultivariateLoss loss; return ReoptimizeSparsePredictor(sparseSolution, examples, loss, optimizerParameters); } diff --git a/tools/utilities/finetune/src/TransformData.cpp b/tools/utilities/finetune/src/TransformData.cpp index fd6379667..d021d768e 100644 --- a/tools/utilities/finetune/src/TransformData.cpp +++ b/tools/utilities/finetune/src/TransformData.cpp @@ -117,13 +117,11 @@ namespace { switch (type) { - case Port::PortType::smallReal: - { + case Port::PortType::smallReal: { return model.AddNode>(layout)->output; break; } - case Port::PortType::real: - { + case Port::PortType::real: { return model.AddNode>(layout)->output; break; } @@ -259,13 +257,11 @@ UnlabeledDataContainer TransformDataWithSubmodel(const UnlabeledDataContainer& d { switch (submodel.GetOutputs()[0]->GetType()) { - case Port::PortType::smallReal: - { + case Port::PortType::smallReal: { return TransformDataWithSubmodelImpl(dataset, submodel); break; } - case Port::PortType::real: - { + case Port::PortType::real: { return TransformDataWithSubmodelImpl(dataset, submodel); break; } @@ -402,13 +398,11 @@ double GetModelAccuracyImpl(const OutputPortBase& output, const DatasetType& tes { switch (output.GetType()) { - case Port::PortType::smallReal: - { + case Port::PortType::smallReal: { return GetModelAccuracyImpl(static_cast&>(output), testDataset); break; } - case Port::PortType::real: - { + case Port::PortType::real: { return GetModelAccuracyImpl(static_cast&>(output), testDataset); break; } diff --git a/tools/utilities/makeExamples/src/main.cpp b/tools/utilities/makeExamples/src/main.cpp index 48d077023..88e4b9dde 100644 --- a/tools/utilities/makeExamples/src/main.cpp +++ b/tools/utilities/makeExamples/src/main.cpp @@ -24,7 +24,7 @@ void SaveModels(const std::string& ext, std::string outputPath) { outputPath = "."; } - + ell::utilities::EnsureDirectoryExists(outputPath); common::SaveModel(GenerateIdentityModel(3), outputPath + "/identity." + ext); diff --git a/tools/utilities/print/src/PrintGraph.cpp b/tools/utilities/print/src/PrintGraph.cpp index de8b0bedd..2c572a10b 100644 --- a/tools/utilities/print/src/PrintGraph.cpp +++ b/tools/utilities/print/src/PrintGraph.cpp @@ -336,11 +336,11 @@ void PrintGraph(const Model& model, const std::string& outputFormat, std::ostrea } auto outputs = node.GetOutputPorts(); - for(auto output: outputs) + for (auto output : outputs) { auto outputName = output->GetName(); auto dependencies = output->GetReferences(); - for(auto dependentInput: dependencies) + for (auto dependentInput : dependencies) { auto inputName = dependentInput->GetName(); auto dependentNode = dependentInput->GetNode(); diff --git a/tools/utilities/profile/src/CompiledProfile_main.cpp b/tools/utilities/profile/src/CompiledProfile_main.cpp index 8001822a1..cdaca0463 100644 --- a/tools/utilities/profile/src/CompiledProfile_main.cpp +++ b/tools/utilities/profile/src/CompiledProfile_main.cpp @@ -17,7 +17,6 @@ #include #include #include -#include struct ProfileArguments {