Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/clang_linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
push:
branches:
- master
- develop

jobs:
build:
Expand Down
14 changes: 11 additions & 3 deletions .github/workflows/gcc_linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
push:
branches:
- master
- develop

jobs:
build:
Expand Down Expand Up @@ -74,10 +75,17 @@ jobs:
./build/tests/dataset_tests/DatasetTests --gtest_filter=DailyMinTemperaturesTest*
- name: MNIST
run: |
./build/tests/dataset_tests/DatasetTests --gtest_filter=MnistTest*
./build/tests/dataset_tests/DatasetTests --gtest_filter=MnistTest*:-*evaluateBestNeuralNetwork
- name: Fashion MNIST
run: |
./build/tests/dataset_tests/DatasetTests --gtest_filter=FashionMnistTest*
./build/tests/dataset_tests/DatasetTests --gtest_filter=FashionMnistTest*:-*evaluateBestNeuralNetwork
- name: CIFAR-10
run: |
./build/tests/dataset_tests/DatasetTests --gtest_filter=Cifar10Test*
./build/tests/dataset_tests/DatasetTests --gtest_filter=Cifar10Test*:-*evaluateBestNeuralNetwork
- name: AudioCatsAndDogs
run: |
./build/tests/dataset_tests/DatasetTests --gtest_filter=AudioCatsAndDogs*:-*evaluateBestNeuralNetwork
- name: Evaluate best models
if: github.ref_name == 'master'
run: |
./build/tests/dataset_tests/DatasetTests --gtest_filter=*evaluateBestNeuralNetwork
1 change: 1 addition & 0 deletions .github/workflows/msvc_windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
push:
branches:
- master
- develop

jobs:
build:
Expand Down
6 changes: 0 additions & 6 deletions include/snn/data/Dataset.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,6 @@ class Dataset
std::vector<std::vector<float>>& trainingLabels, std::vector<std::vector<float>>& testingInputs,
std::vector<std::vector<float>>& testingLabels, nature typeOfTemporal = nature::nonTemporal,
int numberOfRecurrences = 0);
Dataset(const Dataset&) = delete;
Dataset(Dataset&&) = delete;
auto operator=(const Dataset&) -> Dataset& = delete;
auto operator=(Dataset&&) -> Dataset& = delete;

Dataset(problem typeOfProblem, std::vector<std::vector<float>>& inputs, std::vector<std::vector<float>>& labels,
nature temporal = nature::nonTemporal, int numberOfRecurrences = 0);
Expand All @@ -74,8 +70,6 @@ class Dataset

internal::Data data;

virtual ~Dataset() = default;

void normalize(float min, float max);

void shuffle();
Expand Down
9 changes: 5 additions & 4 deletions include/snn/neural_network/layer/neuron/Circular.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,17 @@
namespace snn::internal
{
template <typename T>
class Circular final
class Circular
{
private:
protected:
friend class Circular<std::vector<float>>;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& archive, uint32_t version);

std::vector<T> queue;
size_t indexPush = 0;
size_t indexGet = 0;
size_t indexPush{};
size_t indexGet{};
float divider = 1.0F;

public:
Expand All @@ -30,6 +30,7 @@ class Circular final

void initialize(size_t queueSize, size_t dataSize = 1); // Should be call after the ctor.

[[nodiscard]] auto popFront() -> const T*;
[[nodiscard]] auto getBack() -> const T*;
[[nodiscard]] auto getSum() const -> T;
[[nodiscard]] auto getAverage() const -> T;
Expand Down
17 changes: 12 additions & 5 deletions include/snn/neural_network/layer/neuron/Circular.tpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,31 +7,38 @@

namespace snn::internal
{

template <typename T>
auto Circular<T>::getBack() -> const T*
{
assert(this->indexGet <= this->queue.size());
return &this->queue[this->indexPush];
}

template <typename T>
auto Circular<T>::popFront() -> const T*
{
this->indexGet++;
if (this->indexGet >= this->queue.size())
{
this->indexGet = 0;
}
return &this->queue[this->indexGet++];
return &this->queue[this->indexGet];
}

template <typename T>
void Circular<T>::pushBack(const T& data)
{
assert(this->indexPush <= this->queue.size());
this->indexPush++;
if (this->indexPush >= this->queue.size())
{
this->indexPush = 0;
}
this->queue[this->indexPush++] = data;
this->queue[this->indexPush] = data;
}

template <typename T>
auto Circular<T>::MultiplyAndAccumulate([[maybe_unused]] const Circular<float>& multiplier) const -> T
{
throw NotImplementedException();
}
} // namespace snn::internal
} // namespace snn::internal
38 changes: 38 additions & 0 deletions include/snn/neural_network/layer/neuron/InputCircular.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#pragma once
#include <algorithm>
#include <vector>

#include "Circular.hpp"

namespace snn::internal
{
class InputCircular final : public Circular<std::vector<float>>
{
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& archive, uint32_t version);

public:
template <std::same_as<float>... Values>
void pushBack(const std::vector<float>& data, [[maybe_unused]] Values... extraValues)
{
this->indexPush++;
if (this->indexPush >= this->queue.size())
{
this->indexPush = 0;
}
auto size = data.size();
auto& inputs = this->queue[this->indexPush];
std::ranges::copy(data, inputs.begin());
((inputs[size++] = extraValues), ...);
}
};

template <class Archive>
void InputCircular::serialize(Archive& archive, [[maybe_unused]] const uint32_t version)
{
boost::serialization::void_cast_register<InputCircular, Circular<std::vector<float>>>();
archive& boost::serialization::base_object<Circular<std::vector<float>>>(*this);
}
} // namespace snn::internal
14 changes: 8 additions & 6 deletions include/snn/neural_network/layer/neuron/Neuron.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

#include "../../optimizer/StochasticGradientDescent.hpp"
#include "Circular.hpp"
#include "InputCircular.hpp"
#include "NeuronModel.hpp"
#include "activation_function/ActivationFunction.hpp"

Expand All @@ -12,6 +13,7 @@ namespace snn::internal
class Neuron
{
private:
friend class StochasticGradientDescent;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& archive, uint32_t version);
Expand All @@ -23,7 +25,7 @@ class Neuron
float bias{};

std::vector<float> deltaWeights;
Circular<std::vector<float>> lastInputs;
InputCircular lastInputs;
std::vector<float> errors;
Circular<float> lastError;
Circular<float> lastSum;
Expand All @@ -35,15 +37,15 @@ class Neuron

public:
Neuron() = default; // use restricted to Boost library only
Neuron(Neuron&&) = delete;
auto operator=(const Neuron&) -> Neuron& = delete;
auto operator=(Neuron&&) -> Neuron& = delete;
Neuron(NeuronModel model, std::shared_ptr<NeuralNetworkOptimizer> optimizer);
Neuron(const Neuron& neuron) = default;
~Neuron() = default;

std::shared_ptr<ActivationFunction> outputFunction;

[[nodiscard]] auto computeOutput() -> float;
[[nodiscard]] auto backOutput(float error) -> std::vector<float>&;
void back(float error);
void train();

[[nodiscard]] auto isValid() const -> errorType;

[[nodiscard]] auto getWeights() const -> std::vector<float>;
Expand Down
7 changes: 0 additions & 7 deletions include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,11 @@ class RecurrentNeuron final : public Neuron
{
private:
friend class GatedRecurrentUnit;
friend class StochasticGradientDescent;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& archive, uint32_t version);

float lastOutput = 0;
float previousOutput = 0;
float recurrentError = 0;
float previousSum = 0;

Expand All @@ -25,12 +23,8 @@ class RecurrentNeuron final : public Neuron
public:
RecurrentNeuron() = default; // use restricted to Boost library only
RecurrentNeuron(NeuronModel model, std::shared_ptr<NeuralNetworkOptimizer> optimizer);
RecurrentNeuron(const RecurrentNeuron& recurrentNeuron) = default;
~RecurrentNeuron() = default;

[[nodiscard]] auto output(const std::vector<float>& inputs, bool reset) -> float;
[[nodiscard]] auto backOutput(float error) -> std::vector<float>&;
void back(float error);
void train();

[[nodiscard]] auto isValid() const -> errorType;
Expand All @@ -45,7 +39,6 @@ void RecurrentNeuron::serialize(Archive& archive, [[maybe_unused]] const uint32_
boost::serialization::void_cast_register<RecurrentNeuron, Neuron>();
archive& boost::serialization::base_object<Neuron>(*this);
archive& this->lastOutput;
archive& this->previousOutput;
archive& this->recurrentError;
archive& this->previousSum;
}
Expand Down
5 changes: 0 additions & 5 deletions include/snn/neural_network/layer/neuron/SimpleNeuron.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,15 @@ namespace snn::internal
class SimpleNeuron final : public Neuron
{
private:
friend class StochasticGradientDescent;
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& archive, uint32_t version);

public:
SimpleNeuron() = default; // use restricted to Boost library only
SimpleNeuron(NeuronModel model, std::shared_ptr<NeuralNetworkOptimizer> optimizer);
SimpleNeuron(const SimpleNeuron& neuron) = default;
~SimpleNeuron() = default;

[[nodiscard]] auto output(const std::vector<float>& inputs) -> float;
[[nodiscard]] auto backOutput(float error) -> std::vector<float>&;
void back(float error);
void train();

[[nodiscard]] auto isValid() const -> errorType;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ class ActivationFunction

static std::vector<std::shared_ptr<ActivationFunction>> activationFunctions;

protected:
static constexpr float largeFloat = 1e4;

public:
const float min;
const float max;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,14 @@ class GaussianErrorLinearUnit final : public ActivationFunction

public:
GaussianErrorLinearUnit()
: ActivationFunction(0, std::numeric_limits<float>::infinity())
: ActivationFunction(0, largeFloat)
{
}

[[nodiscard]] auto function(const float x) const -> float final
{
return x * (std::tanhf(1.702F * x / 2.0F) + 1.0F) / 2.0F; // NOLINT(*magic-numbers)
const float y = x * (std::tanhf(1.702F * x / 2.0F) + 1.0F) / 2.0F; // NOLINT(*magic-numbers)
return std::min(y, this->max);
}

[[nodiscard]] auto derivative(const float x) const -> float final
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#pragma once
#include <algorithm>
#include <limits>

#include "ActivationFunction.hpp"
Expand All @@ -14,12 +15,12 @@ class Identity final : public ActivationFunction

public:
Identity()
: ActivationFunction(-std::numeric_limits<float>::infinity(), +std::numeric_limits<float>::infinity())
: ActivationFunction(-largeFloat, largeFloat)
{
}

[[nodiscard]] auto function(const float x) const -> float final { return x; }
[[nodiscard]] auto function(const float x) const -> float final { return std::clamp(x, this->min, this->max); }

[[nodiscard]] auto derivative([[maybe_unused]] const float x) const -> float final { return 1.0F; }
};
} // namespace snn::internal
} // namespace snn::internal
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <limits>

#include "ActivationFunction.hpp"

Expand All @@ -15,18 +15,19 @@ class ImprovedSigmoid final : public ActivationFunction

public:
ImprovedSigmoid()
: ActivationFunction(-std::numeric_limits<float>::infinity(), +std::numeric_limits<float>::infinity())
: ActivationFunction(-largeFloat, largeFloat)
{
}

[[nodiscard]] auto function(const float x) const -> float final
{
return (1.0F / (1.0F + expf(-x))) + (x * 0.05F); // NOLINT(*magic-numbers)
const float y = (1.0F / (1.0F + expf(-x))) + (x * 0.05F); // NOLINT(*magic-numbers)
return std::clamp(y, this->min, this->max);
}

[[nodiscard]] auto derivative(const float x) const -> float final
{
return expf(x) / powf((expf(x) + 1.0F), 2);
}
};
} // namespace snn::internal
} // namespace snn::internal
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#pragma once
#include <algorithm>
#include <limits>

#include "ActivationFunction.hpp"
Expand All @@ -14,15 +15,16 @@ class LeakyRectifiedLinearUnit final : public ActivationFunction

public:
LeakyRectifiedLinearUnit()
: ActivationFunction(0, std::numeric_limits<float>::infinity())
: ActivationFunction(0, largeFloat)
{
}

static constexpr float negativeSlopeAngle = 0.001F;

[[nodiscard]] auto function(const float x) const -> float final
{
return (x > 0.0F) ? x : negativeSlopeAngle * x;
const float y = (x > 0.0F) ? x : negativeSlopeAngle * x;
return std::min(y, this->max);
}

[[nodiscard]] auto derivative(const float x) const -> float final
Expand Down
Loading