Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
151 changes: 132 additions & 19 deletions README.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion examples/CaiOptimizedDenseNet/CaiOptimizedDenseNet.lpr
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ TTestCNNAlgo = class(TCustomApplication)
NN.AddLayer( TNNetDropout.Create(0.5) );
NN.AddLayer( TNNetMaxChannel.Create() );
NN.AddLayer( TNNetFullConnectLinear.Create(NumClasses) );
NN.AddLayer( TNNetSoftMax.Create() );
NN.AddLayer( TNNetSoftMax.Create({SkipBackpropDerivative=}1) );
NN.Layers[ NN.GetFirstImageNeuronalLayerIdx() ].InitBasicPatterns();
(*
// First block shouldn't be separable.
Expand Down
2 changes: 1 addition & 1 deletion examples/CaiOptimizedDenseNet/CaiOptimizedDenseNet48.lpr
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ TTestCNNAlgo = class(TCustomApplication)
NN.AddLayer( TNNetDropout.Create(0.25) );
NN.AddLayer( TNNetMaxChannel.Create() );
NN.AddLayer( TNNetFullConnectLinear.Create(NumClasses) );
NN.AddLayer( TNNetSoftMax.Create() );
NN.AddLayer( TNNetSoftMax.Create({SkipBackpropDerivative=}1) );
NN.Layers[ NN.GetFirstImageNeuronalLayerIdx() ].InitBasicPatterns();

WriteLn('Learning rate set to: [',fLearningRate:7:5,']');
Expand Down
2 changes: 1 addition & 1 deletion examples/CaiOptimizedDenseNet/kOptimizedDenseNet.lpr
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ TTestCNNAlgo = class(TCustomApplication)
NN.AddLayer( TNNetMaxChannel.Create() );
NN.AddLayer( TNNetReLU6.Create() );
NN.AddLayer( TNNetFullConnectLinear.Create(NumClasses) );
NN.AddLayer( TNNetSoftMax.Create() );
NN.AddLayer( TNNetSoftMax.Create({SkipBackpropDerivative=}1) );
NN.Layers[ NN.GetFirstImageNeuronalLayerIdx() ].InitBasicPatterns();

CreateCifar10Volumes(ImgTrainingVolumes, ImgValidationVolumes, ImgTestVolumes);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
///This file has an implementation to classify
// the Colorectal Cancer Dataset:
// https://zenodo.org/record/53169/
// https://zenodo.org/record/53169/files/Kather_texture_2016_image_tiles_5000.zip?download=1
// https://www.tensorflow.org/datasets/catalog/colorectal_histology

// Change ProportionToLoad to a smaller number if you don't have available 4GB of RAM.

program ColorectalImageClassification;
(*
Coded by Joao Paulo Schwarz Schuler.
https://github.com/joaopauloschuler/neural-api
*)
{$mode objfpc}{$H+}

uses {$IFDEF UNIX} {$IFDEF UseCThreads}
cthreads, {$ENDIF} {$ENDIF}
Classes, SysUtils, CustApp, neuralnetwork, neuralvolume, Math, neuraldatasets,
neuralfit;

type
TTestCNNAlgo = class(TCustomApplication)
protected
procedure DoRun; override;
end;

procedure TTestCNNAlgo.DoRun;
var
NN: TNNet;
NeuralFit: TNeuralImageFit;
ImgTrainingVolumes, ImgValidationVolumes, ImgTestVolumes: TNNetVolumeList;
ProportionToLoad: Single;
begin
WriteLn('Creating Neural Network...');
NN := TNNet.Create();
NN.AddLayer([
TNNetInput.Create(128, 128, 3),
TNNetConvolutionLinear.Create({Features=}64, {FeatureSize=}5, {Padding=}4, {Stride=}2),
TNNetMaxPool.Create(2),
TNNetMovingStdNormalization.Create(),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}1),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}1),
TNNetMaxPool.Create(2),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}1),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}1),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}2),
TNNetDropout.Create(0.5),
TNNetMaxPool.Create(2),
TNNetFullConnectLinear.Create(8),
TNNetSoftMax.Create()
]);
NN.DebugStructure();
// change ProportionToLoad to a smaller number if you don't have available 8GB of RAM.
ProportionToLoad := 1;
WriteLn('Loading ', Round(ProportionToLoad*100), '% of the Plant leave disease dataset into memory.');
CreateVolumesFromImagesFromFolder
(
ImgTrainingVolumes, ImgValidationVolumes, ImgTestVolumes,
{FolderName=}'Kather_texture_2016_image_tiles_5000', {pImageSubFolder=}'',
{color_encoding=}0{RGB},
{TrainingProp=}0.9*ProportionToLoad,
{ValidationProp=}0.05*ProportionToLoad,
{TestProp=}0.05*ProportionToLoad,
{NewSizeX=}128, {NewSizeY=}128
);

WriteLn
(
'Training Images:', ImgTrainingVolumes.Count,
' Validation Images:', ImgValidationVolumes.Count,
' Test Images:', ImgTestVolumes.Count
);

NeuralFit := TNeuralImageFit.Create;
NeuralFit.FileNameBase := 'Colorectal';
NeuralFit.InitialLearningRate := 0.001;
NeuralFit.LearningRateDecay := 0.01;
NeuralFit.CyclicalLearningRateLen := 10;
NeuralFit.StaircaseEpochs := 10;
NeuralFit.Inertia := 0.9;
NeuralFit.L2Decay := 0.00001;
NeuralFit.Fit(NN, ImgTrainingVolumes, ImgValidationVolumes, ImgTestVolumes, {NumClasses=}8, {batchsize=}64, {epochs=}250);
NeuralFit.Free;

NN.Free;
ImgTestVolumes.Free;
ImgValidationVolumes.Free;
ImgTrainingVolumes.Free;
Terminate;
end;

var
Application: TTestCNNAlgo;
begin
Application := TTestCNNAlgo.Create(nil);
Application.Title:='Colorectal Cancer Image Classification';
Application.Run;
Application.Free;
end.
184 changes: 184 additions & 0 deletions examples/DelphiTemplate/Unit1.pas
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
unit Unit1;

interface

uses
System.SysUtils, System.Types, System.UITypes, System.Classes, System.Variants,
FMX.Types, FMX.Controls, FMX.Forms, FMX.Graphics, FMX.Dialogs,
FMX.Controls.Presentation, FMX.StdCtrls,
// Neural specifc files.
neuralnetwork, neuralvolume, neuraldatasets, neuralfit, neuralthread;

// In Delphi, in project options:
// * At compiler, search path (-U), you'll add the "neural" folder: ..\..\neural\
// * Still at the compiler, set the final output directory (-E) to: ..\..\bin\x86_64-win64\bin\
// * In "generate console application", set it to true.

// In your "uses" section, include:
// neuralnetwork, neuralvolume, neuraldatasets, neuralfit, neuralthread;

type
TForm1 = class(TForm)
Button1: TButton;
Button2: TButton;
procedure Button1Click(Sender: TObject);
procedure Button2Click(Sender: TObject);
private
{ Private declarations }
public
{ Public declarations }
end;

var
Form1: TForm1;

implementation


type
// Define the input and output types for training data
TBackInput = array[0..3] of array[0..1] of TNeuralFloat; // Input data for OR operation
TBackOutput = array[0..3] of array[0..0] of TNeuralFloat; // Expected output for OR operation

const
cs_false = 0.1; // Encoding for "false" value
cs_true = 0.8; // Encoding for "true" value
cs_threshold = (cs_false + cs_true) / 2; // Threshold for neuron activation

const
cs_inputs : TBackInput =
(
// Input data for OR operation
(cs_false, cs_false),
(cs_false, cs_true),
(cs_true, cs_false),
(cs_true, cs_true)
);

const
cs_outputs : TBackOutput =
(
// Expected outputs for OR operation
(cs_false),
(cs_true),
(cs_true),
(cs_true)
);

procedure RunSimpleLearning();
var
NN: TNNet;
EpochCnt: integer;
Cnt: integer;
pOutPut: TNNetVolume;
vInputs: TBackInput;
vOutput: TBackOutput;
begin
NN := TNNet.Create();

// Create the neural network layers
NN.AddLayer(TNNetInput.Create(2)); // Input layer with 2 neurons
NN.AddLayer(TNNetFullConnectLinear.Create(1)); // Single neuron layer connected to both inputs from the previous layer.

NN.SetLearningRate(0.01, 0.9); // Set the learning rate and momentum

vInputs := cs_inputs; // Assign the input data
vOutput := cs_outputs; // Assign the expected output data
pOutPut := TNNetVolume.Create(1, 1, 1, 1); // Create a volume to hold the output

WriteLn('Value encoding FALSE is: ', cs_false:4:2); // Display the encoding for "false"
WriteLn('Value encoding TRUE is: ', cs_true:4:2); // Display the encoding for "true"
WriteLn('Threshold is: ', cs_threshold:4:2); // Display the threshold value
WriteLn;

for EpochCnt := 1 to 1200 do
begin
for Cnt := Low(cs_inputs) to High(cs_inputs) do
begin
// Feed forward and backpropagation
NN.Compute(vInputs[Cnt]); // Perform feedforward computation
NN.GetOutput(pOutPut); // Get the output of the network
NN.Backpropagate(vOutput[Cnt]); // Perform backpropagation to adjust weights

if EpochCnt mod 100 = 0 then
WriteLn(
EpochCnt:7, 'x', Cnt,
' Inputs: ', cs_inputs[Cnt][0]:3:1,', ' ,cs_inputs[Cnt][1]:3:1,
' Output:', pOutPut.Raw[0]:5:2,' ',
' - Training/Desired Output: ', vOutput[cnt][0]:5:2,' '
);
end;

if EpochCnt mod 100 = 0 then
begin
WriteLn('');
end;

end;

NN.DebugWeights(); // Display the final weights of the network

pOutPut.Free; // Free the memory allocated for output
NN.Free; // Free the memory allocated for the network

end;

procedure RunNeuralNetwork;
var
NN: TNNet;
NeuralFit: TNeuralImageFit;
ImgTrainingVolumes, ImgValidationVolumes, ImgTestVolumes: TNNetVolumeList;
begin
if not CheckCIFARFile() then
begin
exit;
end;
WriteLn('Creating Neural Network...');
NN := TNNet.Create();
NN.AddLayer([
TNNetInput.Create(32, 32, 3),
TNNetConvolutionLinear.Create({Features=}64, {FeatureSize=}5, {Padding=}2, {Stride=}1, {SuppressBias=}1),
TNNetMaxPool.Create(4),
TNNetMovingStdNormalization.Create(),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}1, {SuppressBias=}1),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}1, {SuppressBias=}1),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}1, {SuppressBias=}1),
TNNetConvolutionReLU.Create({Features=}64, {FeatureSize=}3, {Padding=}1, {Stride=}1, {SuppressBias=}1),
TNNetDropout.Create(0.5),
TNNetMaxPool.Create(2),
TNNetFullConnectLinear.Create(10),
TNNetSoftMax.Create()
]);
NN.DebugStructure();
CreateCifar10Volumes(ImgTrainingVolumes, ImgValidationVolumes, ImgTestVolumes);

NeuralFit := TNeuralImageFit.Create;
NeuralFit.FileNameBase := 'SimpleImageClassifier-'+IntToStr(GetProcessId());
NeuralFit.InitialLearningRate := 0.001;
NeuralFit.LearningRateDecay := 0.01;
NeuralFit.StaircaseEpochs := 10;
NeuralFit.Inertia := 0.9;
NeuralFit.L2Decay := 0;
NeuralFit.Fit(NN, ImgTrainingVolumes, ImgValidationVolumes, ImgTestVolumes, {NumClasses=}10, {batchsize=}64, {epochs=}50);
NeuralFit.Free;

NN.Free;
ImgTestVolumes.Free;
ImgValidationVolumes.Free;
ImgTrainingVolumes.Free;
end;


{$R *.fmx}

procedure TForm1.Button1Click(Sender: TObject);
begin
RunNeuralNetwork;
end;

procedure TForm1.Button2Click(Sender: TObject);
begin
RunSimpleLearning;
end;

end.
3 changes: 3 additions & 0 deletions examples/Hypotenuse/README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# Learning Hypotenuse Function

## 2 Minutes Intro Video
[![Watch the video](https://img.youtube.com/vi/PdNTgI_qSyo/0.jpg)](https://youtu.be/PdNTgI_qSyo)

This example has these main steps:
* Preparing training data
* Creating the neural network
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ TTestCNNAlgo = class(TCustomApplication)
TNNetDropout.Create(0.5),
TNNetMaxPool.Create(4),
TNNetFullConnectLinear.Create(10),
TNNetSoftMax.Create()
TNNetSoftMax.Create({SkipBackpropDerivative=}1)
]);
NN.DebugWeights();
NN.DebugStructure();
Expand Down
6 changes: 3 additions & 3 deletions examples/IdentityShortcutConnection/README.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# Identity Shortcut Connection
The **identity shortcut connection** is a connection that skips few layers and then is summed to the output of a following
The **identity shortcut connection** is a connection that skips few layers (usually 2 layers) and then is summed with the output of a following
layer. You can find more about it in the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
and [here](https://towardsdatascience.com/an-overview-of-resnet-and-its-variants-5281e2f56035).

The main point of attention is the **summation** of outputs. In CAI, this is done via the `TNNetSum` class. This class gets an array
of layers as an input and sums all inputs. For this summation to work, the shape of each input must be the same otherwise you'll
The main point of attention is the **summation** of outputs. In CAI, this is done via the `TNNetSum` class. `TNNetSum` sums
an array of input layers. For this summation to work, the shape of each input must be the same otherwise you'll
get a run time error. The current example shows this:
```
GlueLayer := NN.AddLayer(TNNetReLU.Create());
Expand Down
2 changes: 1 addition & 1 deletion examples/ImageClassifierSELU/ImageClassifierSELU.lpr
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ TTestCNNAlgo = class(TCustomApplication)
NN.AddLayer( TNNetMaxPool.Create(2) );
NN.AddLayer( TNNetSELU.Create() );
NN.AddLayer( TNNetFullConnectLinear.Create(10) );
NN.AddLayer( TNNetSoftMax.Create() );
NN.AddLayer( TNNetSoftMax.Create({SkipBackpropDerivative=}1) );

CreateCifar10Volumes(ImgTrainingVolumes, ImgValidationVolumes, ImgTestVolumes);

Expand Down
Loading