From 15af5c91108d04d46e04c4df7b859ba60343baba Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Sun, 16 Nov 2025 22:45:28 +0100 Subject: [PATCH 01/10] Apply rule of zero on Dataset --- include/snn/data/Dataset.hpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/include/snn/data/Dataset.hpp b/include/snn/data/Dataset.hpp index 8c61e9a9..5b6829ef 100644 --- a/include/snn/data/Dataset.hpp +++ b/include/snn/data/Dataset.hpp @@ -50,10 +50,6 @@ class Dataset std::vector>& trainingLabels, std::vector>& testingInputs, std::vector>& testingLabels, nature typeOfTemporal = nature::nonTemporal, int numberOfRecurrences = 0); - Dataset(const Dataset&) = delete; - Dataset(Dataset&&) = delete; - auto operator=(const Dataset&) -> Dataset& = delete; - auto operator=(Dataset&&) -> Dataset& = delete; Dataset(problem typeOfProblem, std::vector>& inputs, std::vector>& labels, nature temporal = nature::nonTemporal, int numberOfRecurrences = 0); @@ -74,8 +70,6 @@ class Dataset internal::Data data; - virtual ~Dataset() = default; - void normalize(float min, float max); void shuffle(); From 34a65640a40d47c320f86bc4a02eee8b2038e308 Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Sun, 16 Nov 2025 22:47:27 +0100 Subject: [PATCH 02/10] Clamp ReLU and leaky ReLU --- .../layer/neuron/activation_function/LeakyReLU.hpp | 4 +++- .../neural_network/layer/neuron/activation_function/ReLU.hpp | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/include/snn/neural_network/layer/neuron/activation_function/LeakyReLU.hpp b/include/snn/neural_network/layer/neuron/activation_function/LeakyReLU.hpp index 6f6ca7b3..8a5b8801 100644 --- a/include/snn/neural_network/layer/neuron/activation_function/LeakyReLU.hpp +++ b/include/snn/neural_network/layer/neuron/activation_function/LeakyReLU.hpp @@ -1,4 +1,5 @@ #pragma once +#include #include #include "ActivationFunction.hpp" @@ -22,7 +23,8 @@ class LeakyRectifiedLinearUnit final : public ActivationFunction [[nodiscard]] auto function(const float x) const -> float final { - return (x > 0.0F) ? x : negativeSlopeAngle * x; + return std::clamp(negativeSlopeAngle * x, 0.0F, 1.0e4F); + ; } [[nodiscard]] auto derivative(const float x) const -> float final diff --git a/include/snn/neural_network/layer/neuron/activation_function/ReLU.hpp b/include/snn/neural_network/layer/neuron/activation_function/ReLU.hpp index 6471a402..4657d377 100644 --- a/include/snn/neural_network/layer/neuron/activation_function/ReLU.hpp +++ b/include/snn/neural_network/layer/neuron/activation_function/ReLU.hpp @@ -1,4 +1,5 @@ #pragma once +#include #include #include "ActivationFunction.hpp" @@ -18,7 +19,7 @@ class RectifiedLinearUnit final : public ActivationFunction { } - [[nodiscard]] auto function(const float x) const -> float final { return (x > 0.0F) ? x : 0.0F; } + [[nodiscard]] auto function(const float x) const -> float final { return std::clamp(x, 0.0F, 1.0e4F); } [[nodiscard]] auto derivative(const float x) const -> float final { return (x > 0.0F) ? 1.0F : 0.0F; } }; From 210d9d53d4f047092a207c0e04c56536ffb7deae Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Sun, 16 Nov 2025 23:30:50 +0100 Subject: [PATCH 03/10] Simplify RecurrentNeuron --- src/neural_network/layer/neuron/RecurrentNeuron.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/neural_network/layer/neuron/RecurrentNeuron.cpp b/src/neural_network/layer/neuron/RecurrentNeuron.cpp index 5c4c9496..037abf6d 100644 --- a/src/neural_network/layer/neuron/RecurrentNeuron.cpp +++ b/src/neural_network/layer/neuron/RecurrentNeuron.cpp @@ -86,4 +86,4 @@ auto RecurrentNeuron::operator==(const RecurrentNeuron& neuron) const -> bool } auto RecurrentNeuron::operator!=(const RecurrentNeuron& neuron) const -> bool { return !(*this == neuron); } -} // namespace snn::internal \ No newline at end of file +} // namespace snn::internal From 21a8f956e7f6fa71c9ee67fc6b797ea74a6ead47 Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Tue, 18 Nov 2025 01:03:04 +0100 Subject: [PATCH 04/10] Create InputCircular and simplify Neuron --- .../neural_network/layer/neuron/Circular.hpp | 9 +++-- .../neural_network/layer/neuron/Circular.tpp | 17 +++++--- .../layer/neuron/InputCircular.hpp | 39 +++++++++++++++++++ .../neural_network/layer/neuron/Neuron.hpp | 3 +- .../layer/neuron/RecurrentNeuron.hpp | 2 - .../layer/neuron/RecurrentNeuron.cpp | 13 +++---- .../layer/neuron/SimpleNeuron.cpp | 26 +++++-------- .../optimizer/StochasticGradientDescent.cpp | 12 ++---- 8 files changed, 75 insertions(+), 46 deletions(-) create mode 100644 include/snn/neural_network/layer/neuron/InputCircular.hpp diff --git a/include/snn/neural_network/layer/neuron/Circular.hpp b/include/snn/neural_network/layer/neuron/Circular.hpp index 14dc897e..ebaff273 100644 --- a/include/snn/neural_network/layer/neuron/Circular.hpp +++ b/include/snn/neural_network/layer/neuron/Circular.hpp @@ -7,17 +7,17 @@ namespace snn::internal { template -class Circular final +class Circular { - private: + protected: friend class Circular>; friend class boost::serialization::access; template void serialize(Archive& archive, uint32_t version); std::vector queue; - size_t indexPush = 0; - size_t indexGet = 0; + size_t indexPush = -1; + size_t indexGet = -1; float divider = 1.0F; public: @@ -30,6 +30,7 @@ class Circular final void initialize(size_t queueSize, size_t dataSize = 1); // Should be call after the ctor. + [[nodiscard]] auto popFront() -> const T*; [[nodiscard]] auto getBack() -> const T*; [[nodiscard]] auto getSum() const -> T; [[nodiscard]] auto getAverage() const -> T; diff --git a/include/snn/neural_network/layer/neuron/Circular.tpp b/include/snn/neural_network/layer/neuron/Circular.tpp index 5ddea703..81601764 100644 --- a/include/snn/neural_network/layer/neuron/Circular.tpp +++ b/include/snn/neural_network/layer/neuron/Circular.tpp @@ -7,26 +7,33 @@ namespace snn::internal { + template auto Circular::getBack() -> const T* { - assert(this->indexGet <= this->queue.size()); + return &this->queue[this->indexPush]; +} + +template +auto Circular::popFront() -> const T* +{ + this->indexGet++; if (this->indexGet >= this->queue.size()) { this->indexGet = 0; } - return &this->queue[this->indexGet++]; + return &this->queue[this->indexGet]; } template void Circular::pushBack(const T& data) { - assert(this->indexPush <= this->queue.size()); + this->indexPush++; if (this->indexPush >= this->queue.size()) { this->indexPush = 0; } - this->queue[this->indexPush++] = data; + this->queue[this->indexPush] = data; } template @@ -34,4 +41,4 @@ auto Circular::MultiplyAndAccumulate([[maybe_unused]] const Circular& { throw NotImplementedException(); } -} // namespace snn::internal \ No newline at end of file +} // namespace snn::internal diff --git a/include/snn/neural_network/layer/neuron/InputCircular.hpp b/include/snn/neural_network/layer/neuron/InputCircular.hpp new file mode 100644 index 00000000..f93ed735 --- /dev/null +++ b/include/snn/neural_network/layer/neuron/InputCircular.hpp @@ -0,0 +1,39 @@ +#pragma once +#include +#include + +#include "Circular.hpp" + +namespace snn::internal +{ +class InputCircular final : public Circular> +{ + private: + friend class boost::serialization::access; + template + void serialize(Archive& archive, uint32_t version); + + public: + template ... Values> + void pushBack(const std::vector& data, [[maybe_unused]] Values... extraValues) + { + this->indexPush++; + if (this->indexPush >= this->queue.size()) + { + this->indexPush = 0; + } + auto size = data.size(); + auto& inputs = this->queue[this->indexPush]; + inputs.resize(size + sizeof...(extraValues)); + std::ranges::copy(data, inputs.begin()); + ((inputs[size++] = extraValues), ...); + } +}; + +template +void InputCircular::serialize(Archive& archive, [[maybe_unused]] const uint32_t version) +{ + boost::serialization::void_cast_register>>(); + archive& boost::serialization::base_object>>(*this); +} +} // namespace snn::internal diff --git a/include/snn/neural_network/layer/neuron/Neuron.hpp b/include/snn/neural_network/layer/neuron/Neuron.hpp index 1d054dea..f990ce61 100644 --- a/include/snn/neural_network/layer/neuron/Neuron.hpp +++ b/include/snn/neural_network/layer/neuron/Neuron.hpp @@ -4,6 +4,7 @@ #include "../../optimizer/StochasticGradientDescent.hpp" #include "Circular.hpp" +#include "InputCircular.hpp" #include "NeuronModel.hpp" #include "activation_function/ActivationFunction.hpp" @@ -23,7 +24,7 @@ class Neuron float bias{}; std::vector deltaWeights; - Circular> lastInputs; + InputCircular lastInputs; std::vector errors; Circular lastError; Circular lastSum; diff --git a/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp b/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp index 03d27f35..533bdc20 100644 --- a/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp +++ b/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp @@ -16,7 +16,6 @@ class RecurrentNeuron final : public Neuron void serialize(Archive& archive, uint32_t version); float lastOutput = 0; - float previousOutput = 0; float recurrentError = 0; float previousSum = 0; @@ -45,7 +44,6 @@ void RecurrentNeuron::serialize(Archive& archive, [[maybe_unused]] const uint32_ boost::serialization::void_cast_register(); archive& boost::serialization::base_object(*this); archive& this->lastOutput; - archive& this->previousOutput; archive& this->recurrentError; archive& this->previousSum; } diff --git a/src/neural_network/layer/neuron/RecurrentNeuron.cpp b/src/neural_network/layer/neuron/RecurrentNeuron.cpp index 037abf6d..7c308d61 100644 --- a/src/neural_network/layer/neuron/RecurrentNeuron.cpp +++ b/src/neural_network/layer/neuron/RecurrentNeuron.cpp @@ -1,7 +1,6 @@ #include "RecurrentNeuron.hpp" #include -#include namespace snn::internal { @@ -20,7 +19,6 @@ auto RecurrentNeuron::output(const std::vector& inputs, bool temporalRese this->reset(); } this->lastInputs.pushBack(inputs); - this->previousOutput = this->lastOutput; float sum = 0.0F; // to activate the SIMD optimization size_t w = 0; assert(this->weights.size() == inputs.size() + 2); @@ -29,7 +27,8 @@ auto RecurrentNeuron::output(const std::vector& inputs, bool temporalRese { sum += inputs[w] * this->weights[w]; } - sum += this->previousOutput * this->weights[w] + this->bias * this->weights[w + 1]; + sum += this->lastOutput * this->weights[w]; + sum += this->bias * this->weights[w + 1]; this->lastSum.pushBack(sum); const float output = outputFunction->function(sum); this->lastOutput = output; @@ -41,7 +40,7 @@ auto RecurrentNeuron::output(const std::vector& inputs, bool temporalRese auto RecurrentNeuron::backOutput(float error) -> std::vector& { - const auto& sum = *this->lastSum.getBack(); + const auto& sum = *this->lastSum.popFront(); const auto e = error * this->outputFunction->derivative(sum); this->lastError.pushBack(e); assert(this->weights.size() == this->errors.size() + 2); @@ -55,7 +54,7 @@ auto RecurrentNeuron::backOutput(float error) -> std::vector& void RecurrentNeuron::back(float error) { - const auto& sum = *this->lastSum.getBack(); + const auto& sum = *this->lastSum.popFront(); const auto e = error * this->outputFunction->derivative(sum); this->lastError.pushBack(e); } @@ -64,7 +63,6 @@ void RecurrentNeuron::train() { this->optimizer->updateWeights(*this); } inline void RecurrentNeuron::reset() { - this->previousOutput = 0; this->recurrentError = 0; this->previousSum = 0; } @@ -81,8 +79,7 @@ auto RecurrentNeuron::isValid() const -> errorType auto RecurrentNeuron::operator==(const RecurrentNeuron& neuron) const -> bool { return this->Neuron::operator==(neuron) && this->lastOutput == neuron.lastOutput && - this->previousOutput == neuron.previousOutput && this->recurrentError == neuron.recurrentError && - this->previousSum == neuron.previousSum; + this->recurrentError == neuron.recurrentError && this->previousSum == neuron.previousSum; } auto RecurrentNeuron::operator!=(const RecurrentNeuron& neuron) const -> bool { return !(*this == neuron); } diff --git a/src/neural_network/layer/neuron/SimpleNeuron.cpp b/src/neural_network/layer/neuron/SimpleNeuron.cpp index 34aadff4..5016b63f 100644 --- a/src/neural_network/layer/neuron/SimpleNeuron.cpp +++ b/src/neural_network/layer/neuron/SimpleNeuron.cpp @@ -1,6 +1,8 @@ #include "SimpleNeuron.hpp" +#include #include +#include #include namespace snn::internal @@ -12,37 +14,27 @@ SimpleNeuron::SimpleNeuron(NeuronModel model, std::shared_ptr& inputs) -> float { - this->lastInputs.pushBack(inputs); - float sum = 0.0F; // to activate the SIMD optimization - assert(this->weights.size() == inputs.size() + 1); - size_t w = 0; -#pragma omp simd - for (w = 0; w < inputs.size(); ++w) - { - sum += inputs[w] * this->weights[w]; - } - sum += this->weights[w] * this->bias; + this->lastInputs.pushBack(inputs, this->bias); + const auto fullInput = *this->lastInputs.getBack(); + assert(this->weights.size() == fullInput.size()); + const auto sum = std::inner_product(weights.begin(), weights.end(), fullInput.begin(), 0.0F); this->lastSum.pushBack(sum); return this->outputFunction->function(sum); } auto SimpleNeuron::backOutput(float error) -> std::vector& { - const auto& sum = *this->lastSum.getBack(); + const auto& sum = *this->lastSum.popFront(); const auto e = error * this->outputFunction->derivative(sum); this->lastError.pushBack(e); assert(this->weights.size() == this->errors.size() + 1); -#pragma omp simd // seems to do nothing - for (size_t w = 0; w < this->errors.size(); ++w) - { - this->errors[w] = e * this->weights[w]; - } + std::ranges::transform(errors, weights, errors.begin(), [e](float, float w) -> float { return e * w; }); return this->errors; } void SimpleNeuron::back(float error) { - const auto& sum = *this->lastSum.getBack(); + const auto& sum = *this->lastSum.popFront(); const auto e = error * this->outputFunction->derivative(sum); this->lastError.pushBack(e); } diff --git a/src/neural_network/optimizer/StochasticGradientDescent.cpp b/src/neural_network/optimizer/StochasticGradientDescent.cpp index 7b73a220..d95e81e7 100644 --- a/src/neural_network/optimizer/StochasticGradientDescent.cpp +++ b/src/neural_network/optimizer/StochasticGradientDescent.cpp @@ -23,22 +23,16 @@ auto StochasticGradientDescent::clone() const -> std::shared_ptrmomentum; - const auto& numberOfInputs = neuron.numberOfInputs; - const auto error = neuron.lastError.getSum(); const auto input_error = neuron.lastInputs.MultiplyAndAccumulate(neuron.lastError); auto& deltaWeights = neuron.deltaWeights; auto& weights = neuron.weights; const auto lr = this->learningRate; - // #pragma omp simd - for (w = 0; w < numberOfInputs; ++w) + for (size_t w = 0; w < neuron.weights.size(); ++w) { - deltaWeights[w] = lr * input_error[w] + m * deltaWeights[w]; + deltaWeights[w] = (lr * input_error[w]) + (m * deltaWeights[w]); weights[w] += deltaWeights[w]; } - deltaWeights[w] = lr * error * neuron.bias + m * deltaWeights[w]; - weights[w] += deltaWeights[w]; } void StochasticGradientDescent::updateWeights(RecurrentNeuron& neuron) const @@ -59,7 +53,7 @@ void StochasticGradientDescent::updateWeights(RecurrentNeuron& neuron) const } // TODO(matth): previousOutput should be a Circular like lastInputs and do previousOutput.MultiplyAndAccumulate // (neuron.lastError). And also rename previousOutput as lastOutput. - deltaWeights[w] = this->learningRate * neuron.recurrentError * neuron.previousOutput + m * neuron.deltaWeights[w]; + deltaWeights[w] = this->learningRate * neuron.recurrentError * neuron.lastOutput + m * neuron.deltaWeights[w]; weights[w] += deltaWeights[w]; neuron.recurrentError = error; // + neuron.recurrentError * // neuron.outputFunction->derivative(neuron.previousSum) * weights[w]; From 252cbd953d63aaaa191c5fa001c1f4a7b439d33e Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Tue, 18 Nov 2025 23:51:26 +0100 Subject: [PATCH 05/10] Simplify Neuron a lot --- .../neural_network/layer/neuron/Neuron.hpp | 10 ++--- .../layer/neuron/RecurrentNeuron.hpp | 2 - .../layer/neuron/SimpleNeuron.hpp | 4 -- src/neural_network/layer/neuron/Neuron.cpp | 27 +++++++++++ .../layer/neuron/RecurrentNeuron.cpp | 45 ++----------------- .../layer/neuron/SimpleNeuron.cpp | 25 +---------- 6 files changed, 36 insertions(+), 77 deletions(-) diff --git a/include/snn/neural_network/layer/neuron/Neuron.hpp b/include/snn/neural_network/layer/neuron/Neuron.hpp index f990ce61..067ac682 100644 --- a/include/snn/neural_network/layer/neuron/Neuron.hpp +++ b/include/snn/neural_network/layer/neuron/Neuron.hpp @@ -36,15 +36,15 @@ class Neuron public: Neuron() = default; // use restricted to Boost library only - Neuron(Neuron&&) = delete; - auto operator=(const Neuron&) -> Neuron& = delete; - auto operator=(Neuron&&) -> Neuron& = delete; Neuron(NeuronModel model, std::shared_ptr optimizer); - Neuron(const Neuron& neuron) = default; - ~Neuron() = default; std::shared_ptr outputFunction; + [[nodiscard]] auto computeOutput() -> float; + [[nodiscard]] auto backOutput(float error) -> std::vector&; + void back(float error); + void train(); + [[nodiscard]] auto isValid() const -> errorType; [[nodiscard]] auto getWeights() const -> std::vector; diff --git a/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp b/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp index 533bdc20..e8a7db3d 100644 --- a/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp +++ b/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp @@ -28,8 +28,6 @@ class RecurrentNeuron final : public Neuron ~RecurrentNeuron() = default; [[nodiscard]] auto output(const std::vector& inputs, bool reset) -> float; - [[nodiscard]] auto backOutput(float error) -> std::vector&; - void back(float error); void train(); [[nodiscard]] auto isValid() const -> errorType; diff --git a/include/snn/neural_network/layer/neuron/SimpleNeuron.hpp b/include/snn/neural_network/layer/neuron/SimpleNeuron.hpp index 30310ebe..4d78f27d 100644 --- a/include/snn/neural_network/layer/neuron/SimpleNeuron.hpp +++ b/include/snn/neural_network/layer/neuron/SimpleNeuron.hpp @@ -17,12 +17,8 @@ class SimpleNeuron final : public Neuron public: SimpleNeuron() = default; // use restricted to Boost library only SimpleNeuron(NeuronModel model, std::shared_ptr optimizer); - SimpleNeuron(const SimpleNeuron& neuron) = default; - ~SimpleNeuron() = default; [[nodiscard]] auto output(const std::vector& inputs) -> float; - [[nodiscard]] auto backOutput(float error) -> std::vector&; - void back(float error); void train(); [[nodiscard]] auto isValid() const -> errorType; diff --git a/src/neural_network/layer/neuron/Neuron.cpp b/src/neural_network/layer/neuron/Neuron.cpp index 7250a9d4..256bdebd 100644 --- a/src/neural_network/layer/neuron/Neuron.cpp +++ b/src/neural_network/layer/neuron/Neuron.cpp @@ -1,5 +1,6 @@ #include "Neuron.hpp" +#include #include #include "../../../tools/Tools.hpp" @@ -34,6 +35,32 @@ auto Neuron::randomInitializeWeight(int numberOfWeights) -> float return tools::randomBetween(-valueMax, valueMax); } +auto Neuron::computeOutput() -> float +{ + const auto fullInput = *this->lastInputs.getBack(); + assert(this->weights.size() == fullInput.size()); + const auto sum = std::inner_product(weights.begin(), weights.end(), fullInput.begin(), 0.0F); + this->lastSum.pushBack(sum); + return this->outputFunction->function(sum); +} + +auto Neuron::backOutput(float error) -> std::vector& +{ + const auto& sum = *this->lastSum.popFront(); + const auto e = error * this->outputFunction->derivative(sum); + this->lastError.pushBack(e); + assert(this->weights.size() == this->errors.size() + 1); + std::ranges::transform(errors, weights, errors.begin(), [e](float, float w) -> float { return e * w; }); + return this->errors; +} + +void Neuron::back(float error) +{ + const auto& sum = *this->lastSum.popFront(); + const auto e = error * this->outputFunction->derivative(sum); + this->lastError.pushBack(e); +} + auto Neuron::isValid() const -> errorType { const auto outlier_float = 100000.0F; diff --git a/src/neural_network/layer/neuron/RecurrentNeuron.cpp b/src/neural_network/layer/neuron/RecurrentNeuron.cpp index 7c308d61..0b0bd330 100644 --- a/src/neural_network/layer/neuron/RecurrentNeuron.cpp +++ b/src/neural_network/layer/neuron/RecurrentNeuron.cpp @@ -9,54 +9,15 @@ RecurrentNeuron::RecurrentNeuron(NeuronModel model, std::shared_ptr& inputs, bool temporalReset) -> float { if (temporalReset) { this->reset(); } - this->lastInputs.pushBack(inputs); - float sum = 0.0F; // to activate the SIMD optimization - size_t w = 0; - assert(this->weights.size() == inputs.size() + 2); -#pragma omp simd - for (w = 0; w < inputs.size(); ++w) - { - sum += inputs[w] * this->weights[w]; - } - sum += this->lastOutput * this->weights[w]; - sum += this->bias * this->weights[w + 1]; - this->lastSum.pushBack(sum); - const float output = outputFunction->function(sum); - this->lastOutput = output; - return output; -#ifdef _MSC_VER -#pragma warning(default : 4701) -#endif -} - -auto RecurrentNeuron::backOutput(float error) -> std::vector& -{ - const auto& sum = *this->lastSum.popFront(); - const auto e = error * this->outputFunction->derivative(sum); - this->lastError.pushBack(e); - assert(this->weights.size() == this->errors.size() + 2); -#pragma omp simd // seems to do nothing - for (int w = 0; w < this->numberOfInputs; ++w) - { - this->errors[w] = e * this->weights[w]; - } - return this->errors; -} - -void RecurrentNeuron::back(float error) -{ - const auto& sum = *this->lastSum.popFront(); - const auto e = error * this->outputFunction->derivative(sum); - this->lastError.pushBack(e); + this->lastInputs.pushBack(inputs, this->lastOutput, this->bias); + this->lastOutput = Neuron::computeOutput(); + return this->lastOutput; } void RecurrentNeuron::train() { this->optimizer->updateWeights(*this); } diff --git a/src/neural_network/layer/neuron/SimpleNeuron.cpp b/src/neural_network/layer/neuron/SimpleNeuron.cpp index 5016b63f..d09eed38 100644 --- a/src/neural_network/layer/neuron/SimpleNeuron.cpp +++ b/src/neural_network/layer/neuron/SimpleNeuron.cpp @@ -1,8 +1,6 @@ #include "SimpleNeuron.hpp" -#include #include -#include #include namespace snn::internal @@ -15,28 +13,7 @@ SimpleNeuron::SimpleNeuron(NeuronModel model, std::shared_ptr& inputs) -> float { this->lastInputs.pushBack(inputs, this->bias); - const auto fullInput = *this->lastInputs.getBack(); - assert(this->weights.size() == fullInput.size()); - const auto sum = std::inner_product(weights.begin(), weights.end(), fullInput.begin(), 0.0F); - this->lastSum.pushBack(sum); - return this->outputFunction->function(sum); -} - -auto SimpleNeuron::backOutput(float error) -> std::vector& -{ - const auto& sum = *this->lastSum.popFront(); - const auto e = error * this->outputFunction->derivative(sum); - this->lastError.pushBack(e); - assert(this->weights.size() == this->errors.size() + 1); - std::ranges::transform(errors, weights, errors.begin(), [e](float, float w) -> float { return e * w; }); - return this->errors; -} - -void SimpleNeuron::back(float error) -{ - const auto& sum = *this->lastSum.popFront(); - const auto e = error * this->outputFunction->derivative(sum); - this->lastError.pushBack(e); + return Neuron::computeOutput(); } void SimpleNeuron::train() { this->optimizer->updateWeights(*this); } From 88aa865363760e939b828ca237edeb136e52805b Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Wed, 19 Nov 2025 00:42:58 +0100 Subject: [PATCH 06/10] Simplify StochasticGradientDescent a lot --- .../neural_network/layer/neuron/Neuron.hpp | 1 + .../layer/neuron/RecurrentNeuron.hpp | 3 -- .../layer/neuron/SimpleNeuron.hpp | 1 - .../optimizer/NeuralNetworkOptimizer.hpp | 10 +++-- .../optimizer/StochasticGradientDescent.hpp | 9 +++-- .../optimizer/StochasticGradientDescent.cpp | 40 ++----------------- 6 files changed, 16 insertions(+), 48 deletions(-) diff --git a/include/snn/neural_network/layer/neuron/Neuron.hpp b/include/snn/neural_network/layer/neuron/Neuron.hpp index 067ac682..a9783d97 100644 --- a/include/snn/neural_network/layer/neuron/Neuron.hpp +++ b/include/snn/neural_network/layer/neuron/Neuron.hpp @@ -13,6 +13,7 @@ namespace snn::internal class Neuron { private: + friend class StochasticGradientDescent; friend class boost::serialization::access; template void serialize(Archive& archive, uint32_t version); diff --git a/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp b/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp index e8a7db3d..bb9a8c42 100644 --- a/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp +++ b/include/snn/neural_network/layer/neuron/RecurrentNeuron.hpp @@ -10,7 +10,6 @@ class RecurrentNeuron final : public Neuron { private: friend class GatedRecurrentUnit; - friend class StochasticGradientDescent; friend class boost::serialization::access; template void serialize(Archive& archive, uint32_t version); @@ -24,8 +23,6 @@ class RecurrentNeuron final : public Neuron public: RecurrentNeuron() = default; // use restricted to Boost library only RecurrentNeuron(NeuronModel model, std::shared_ptr optimizer); - RecurrentNeuron(const RecurrentNeuron& recurrentNeuron) = default; - ~RecurrentNeuron() = default; [[nodiscard]] auto output(const std::vector& inputs, bool reset) -> float; void train(); diff --git a/include/snn/neural_network/layer/neuron/SimpleNeuron.hpp b/include/snn/neural_network/layer/neuron/SimpleNeuron.hpp index 4d78f27d..d40d57d4 100644 --- a/include/snn/neural_network/layer/neuron/SimpleNeuron.hpp +++ b/include/snn/neural_network/layer/neuron/SimpleNeuron.hpp @@ -9,7 +9,6 @@ namespace snn::internal class SimpleNeuron final : public Neuron { private: - friend class StochasticGradientDescent; friend class boost::serialization::access; template void serialize(Archive& archive, uint32_t version); diff --git a/include/snn/neural_network/optimizer/NeuralNetworkOptimizer.hpp b/include/snn/neural_network/optimizer/NeuralNetworkOptimizer.hpp index 00089ece..2b58e378 100644 --- a/include/snn/neural_network/optimizer/NeuralNetworkOptimizer.hpp +++ b/include/snn/neural_network/optimizer/NeuralNetworkOptimizer.hpp @@ -7,8 +7,7 @@ namespace snn::internal { -class SimpleNeuron; -class RecurrentNeuron; +class Neuron; class NeuralNetworkOptimizer { @@ -21,11 +20,14 @@ class NeuralNetworkOptimizer public: NeuralNetworkOptimizer() = default; + NeuralNetworkOptimizer(const NeuralNetworkOptimizer&) = default; // Can cause slicing. + NeuralNetworkOptimizer(NeuralNetworkOptimizer&&) = delete; + NeuralNetworkOptimizer& operator=(const NeuralNetworkOptimizer&) = delete; + NeuralNetworkOptimizer& operator=(NeuralNetworkOptimizer&&) = delete; virtual ~NeuralNetworkOptimizer() = default; [[nodiscard]] virtual auto clone() const -> std::shared_ptr = 0; - virtual void updateWeights(SimpleNeuron& neuron) const = 0; - virtual void updateWeights(RecurrentNeuron& neuron) const = 0; + virtual void updateWeights(Neuron& neuron) const = 0; [[nodiscard]] virtual auto isValid() const -> errorType = 0; diff --git a/include/snn/neural_network/optimizer/StochasticGradientDescent.hpp b/include/snn/neural_network/optimizer/StochasticGradientDescent.hpp index 4aa6a231..d1f15d16 100644 --- a/include/snn/neural_network/optimizer/StochasticGradientDescent.hpp +++ b/include/snn/neural_network/optimizer/StochasticGradientDescent.hpp @@ -19,13 +19,16 @@ class StochasticGradientDescent final : public NeuralNetworkOptimizer float momentum{}; StochasticGradientDescent() = default; + StochasticGradientDescent(const StochasticGradientDescent&) = default; + StochasticGradientDescent(StochasticGradientDescent&&) = delete; + StochasticGradientDescent& operator=(const StochasticGradientDescent&) = delete; + StochasticGradientDescent& operator=(StochasticGradientDescent&&) = delete; StochasticGradientDescent(float learningRate, float momentum); - StochasticGradientDescent(const StochasticGradientDescent& sgd) = default; ~StochasticGradientDescent() final = default; + [[nodiscard]] auto clone() const -> std::shared_ptr final; - void updateWeights(SimpleNeuron& neuron) const final; - void updateWeights(RecurrentNeuron& neuron) const final; + void updateWeights(Neuron& neuron) const final; [[nodiscard]] auto isValid() const -> errorType final; diff --git a/src/neural_network/optimizer/StochasticGradientDescent.cpp b/src/neural_network/optimizer/StochasticGradientDescent.cpp index d95e81e7..81e3acf2 100644 --- a/src/neural_network/optimizer/StochasticGradientDescent.cpp +++ b/src/neural_network/optimizer/StochasticGradientDescent.cpp @@ -3,8 +3,7 @@ #include #include -#include "RecurrentNeuron.hpp" -#include "SimpleNeuron.hpp" +#include "Neuron.hpp" namespace snn::internal { @@ -18,10 +17,8 @@ auto StochasticGradientDescent::clone() const -> std::shared_ptr(*this); } -// #ifdef _MSC_VER -// #pragma warning(disable : 4701) -// #endif -void StochasticGradientDescent::updateWeights(SimpleNeuron& neuron) const + +void StochasticGradientDescent::updateWeights(Neuron& neuron) const { const auto& m = this->momentum; const auto input_error = neuron.lastInputs.MultiplyAndAccumulate(neuron.lastError); @@ -35,37 +32,6 @@ void StochasticGradientDescent::updateWeights(SimpleNeuron& neuron) const } } -void StochasticGradientDescent::updateWeights(RecurrentNeuron& neuron) const -{ - auto w = 0; - const auto& m = this->momentum; - const auto& numberOfInputs = neuron.numberOfInputs; - const auto error = neuron.lastError.getSum(); - const auto input_error = neuron.lastInputs.MultiplyAndAccumulate(neuron.lastError); - auto& deltaWeights = neuron.deltaWeights; - auto& weights = neuron.weights; - const auto lr = this->learningRate; - // #pragma omp simd // info C5002: Omp simd loop not vectorized due to reason '1305' (Not enough type information.) - for (w = 0; w < numberOfInputs; ++w) - { - deltaWeights[w] = lr * input_error[w] + m * deltaWeights[w]; - weights[w] += deltaWeights[w]; - } - // TODO(matth): previousOutput should be a Circular like lastInputs and do previousOutput.MultiplyAndAccumulate - // (neuron.lastError). And also rename previousOutput as lastOutput. - deltaWeights[w] = this->learningRate * neuron.recurrentError * neuron.lastOutput + m * neuron.deltaWeights[w]; - weights[w] += deltaWeights[w]; - neuron.recurrentError = error; // + neuron.recurrentError * - // neuron.outputFunction->derivative(neuron.previousSum) * weights[w]; - - w++; - deltaWeights[w] = lr * error * neuron.bias + m * deltaWeights[w]; - weights[w] += deltaWeights[w]; -} -// #ifdef _MSC_VER -// #pragma warning(default : 4701) -// #endif - auto StochasticGradientDescent::isValid() const -> errorType { if (this->learningRate < 0.0F || this->learningRate >= 1.0F) From 4a874a828f83c8f308aad4b1e8e68da5e4c3af6d Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Wed, 19 Nov 2025 01:35:21 +0100 Subject: [PATCH 07/10] Fix MSVC compilation --- include/snn/neural_network/layer/neuron/Circular.hpp | 4 ++-- src/neural_network/layer/neuron/Circular.cpp | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/snn/neural_network/layer/neuron/Circular.hpp b/include/snn/neural_network/layer/neuron/Circular.hpp index ebaff273..24ab5522 100644 --- a/include/snn/neural_network/layer/neuron/Circular.hpp +++ b/include/snn/neural_network/layer/neuron/Circular.hpp @@ -16,8 +16,8 @@ class Circular void serialize(Archive& archive, uint32_t version); std::vector queue; - size_t indexPush = -1; - size_t indexGet = -1; + size_t indexPush{}; + size_t indexGet{}; float divider = 1.0F; public: diff --git a/src/neural_network/layer/neuron/Circular.cpp b/src/neural_network/layer/neuron/Circular.cpp index 4f60c485..59e82c09 100644 --- a/src/neural_network/layer/neuron/Circular.cpp +++ b/src/neural_network/layer/neuron/Circular.cpp @@ -16,7 +16,10 @@ void Circular::initialize(const size_t size, [[maybe_unused]] const size_ this->divider = static_cast(size); this->queue.clear(); this->queue.resize(size); + this->indexPush = size; + this->indexGet = size; } + template <> void Circular>::initialize(const size_t size, const size_t dataSize) { @@ -27,6 +30,8 @@ void Circular>::initialize(const size_t size, const size_t da { d = std::vector(dataSize, 0.0F); } + this->indexPush = size; + this->indexGet = size; } template <> From 4e78e491f01d3d1fdcdb66c287544a02519ed22c Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Wed, 19 Nov 2025 22:39:18 +0100 Subject: [PATCH 08/10] Optimize InputCircular --- include/snn/neural_network/layer/neuron/InputCircular.hpp | 1 - src/neural_network/layer/neuron/Neuron.cpp | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/include/snn/neural_network/layer/neuron/InputCircular.hpp b/include/snn/neural_network/layer/neuron/InputCircular.hpp index f93ed735..47318f18 100644 --- a/include/snn/neural_network/layer/neuron/InputCircular.hpp +++ b/include/snn/neural_network/layer/neuron/InputCircular.hpp @@ -24,7 +24,6 @@ class InputCircular final : public Circular> } auto size = data.size(); auto& inputs = this->queue[this->indexPush]; - inputs.resize(size + sizeof...(extraValues)); std::ranges::copy(data, inputs.begin()); ((inputs[size++] = extraValues), ...); } diff --git a/src/neural_network/layer/neuron/Neuron.cpp b/src/neural_network/layer/neuron/Neuron.cpp index 256bdebd..db1a6401 100644 --- a/src/neural_network/layer/neuron/Neuron.cpp +++ b/src/neural_network/layer/neuron/Neuron.cpp @@ -23,7 +23,7 @@ Neuron::Neuron(NeuronModel model, std::shared_ptr optimi weight = randomInitializeWeight(model.numberOfWeights); } this->weights.back() = std::abs(this->weights.back()); - this->lastInputs.initialize(this->batchSize, model.numberOfInputs); + this->lastInputs.initialize(this->batchSize, model.numberOfWeights); this->lastError.initialize(this->batchSize); this->lastSum.initialize(this->batchSize); this->deltaWeights.resize(model.numberOfWeights, 0); From 7594d5be3b03af9e5fb461174def434d1fa7fbd2 Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Thu, 20 Nov 2025 00:19:45 +0100 Subject: [PATCH 09/10] Fix activation function --- .../neuron/activation_function/ActivationFunction.hpp | 3 +++ .../layer/neuron/activation_function/GELU.hpp | 5 +++-- .../layer/neuron/activation_function/Identity.hpp | 7 ++++--- .../layer/neuron/activation_function/ImprovedSigmoid.hpp | 9 +++++---- .../layer/neuron/activation_function/LeakyReLU.hpp | 6 +++--- .../layer/neuron/activation_function/ReLU.hpp | 4 ++-- tests/unit_tests/LocallyConnectedTests.cpp | 4 ++-- 7 files changed, 22 insertions(+), 16 deletions(-) diff --git a/include/snn/neural_network/layer/neuron/activation_function/ActivationFunction.hpp b/include/snn/neural_network/layer/neuron/activation_function/ActivationFunction.hpp index 3160fe02..e2ab2a9b 100644 --- a/include/snn/neural_network/layer/neuron/activation_function/ActivationFunction.hpp +++ b/include/snn/neural_network/layer/neuron/activation_function/ActivationFunction.hpp @@ -29,6 +29,9 @@ class ActivationFunction static std::vector> activationFunctions; + protected: + static constexpr float largeFloat = 1e4; + public: const float min; const float max; diff --git a/include/snn/neural_network/layer/neuron/activation_function/GELU.hpp b/include/snn/neural_network/layer/neuron/activation_function/GELU.hpp index ebc031bb..4d720b57 100644 --- a/include/snn/neural_network/layer/neuron/activation_function/GELU.hpp +++ b/include/snn/neural_network/layer/neuron/activation_function/GELU.hpp @@ -15,13 +15,14 @@ class GaussianErrorLinearUnit final : public ActivationFunction public: GaussianErrorLinearUnit() - : ActivationFunction(0, std::numeric_limits::infinity()) + : ActivationFunction(0, largeFloat) { } [[nodiscard]] auto function(const float x) const -> float final { - return x * (std::tanhf(1.702F * x / 2.0F) + 1.0F) / 2.0F; // NOLINT(*magic-numbers) + const float y = x * (std::tanhf(1.702F * x / 2.0F) + 1.0F) / 2.0F; // NOLINT(*magic-numbers) + return std::min(y, this->max); } [[nodiscard]] auto derivative(const float x) const -> float final diff --git a/include/snn/neural_network/layer/neuron/activation_function/Identity.hpp b/include/snn/neural_network/layer/neuron/activation_function/Identity.hpp index 9c882ae3..ddec8ed3 100644 --- a/include/snn/neural_network/layer/neuron/activation_function/Identity.hpp +++ b/include/snn/neural_network/layer/neuron/activation_function/Identity.hpp @@ -1,4 +1,5 @@ #pragma once +#include #include #include "ActivationFunction.hpp" @@ -14,12 +15,12 @@ class Identity final : public ActivationFunction public: Identity() - : ActivationFunction(-std::numeric_limits::infinity(), +std::numeric_limits::infinity()) + : ActivationFunction(-largeFloat, largeFloat) { } - [[nodiscard]] auto function(const float x) const -> float final { return x; } + [[nodiscard]] auto function(const float x) const -> float final { return std::clamp(x, this->min, this->max); } [[nodiscard]] auto derivative([[maybe_unused]] const float x) const -> float final { return 1.0F; } }; -} // namespace snn::internal \ No newline at end of file +} // namespace snn::internal diff --git a/include/snn/neural_network/layer/neuron/activation_function/ImprovedSigmoid.hpp b/include/snn/neural_network/layer/neuron/activation_function/ImprovedSigmoid.hpp index fbfb211e..50da7590 100644 --- a/include/snn/neural_network/layer/neuron/activation_function/ImprovedSigmoid.hpp +++ b/include/snn/neural_network/layer/neuron/activation_function/ImprovedSigmoid.hpp @@ -1,6 +1,6 @@ #pragma once +#include #include -#include #include "ActivationFunction.hpp" @@ -15,13 +15,14 @@ class ImprovedSigmoid final : public ActivationFunction public: ImprovedSigmoid() - : ActivationFunction(-std::numeric_limits::infinity(), +std::numeric_limits::infinity()) + : ActivationFunction(-largeFloat, largeFloat) { } [[nodiscard]] auto function(const float x) const -> float final { - return (1.0F / (1.0F + expf(-x))) + (x * 0.05F); // NOLINT(*magic-numbers) + const float y = (1.0F / (1.0F + expf(-x))) + (x * 0.05F); // NOLINT(*magic-numbers) + return std::clamp(y, this->min, this->max); } [[nodiscard]] auto derivative(const float x) const -> float final @@ -29,4 +30,4 @@ class ImprovedSigmoid final : public ActivationFunction return expf(x) / powf((expf(x) + 1.0F), 2); } }; -} // namespace snn::internal \ No newline at end of file +} // namespace snn::internal diff --git a/include/snn/neural_network/layer/neuron/activation_function/LeakyReLU.hpp b/include/snn/neural_network/layer/neuron/activation_function/LeakyReLU.hpp index 8a5b8801..4fdedebf 100644 --- a/include/snn/neural_network/layer/neuron/activation_function/LeakyReLU.hpp +++ b/include/snn/neural_network/layer/neuron/activation_function/LeakyReLU.hpp @@ -15,7 +15,7 @@ class LeakyRectifiedLinearUnit final : public ActivationFunction public: LeakyRectifiedLinearUnit() - : ActivationFunction(0, std::numeric_limits::infinity()) + : ActivationFunction(0, largeFloat) { } @@ -23,8 +23,8 @@ class LeakyRectifiedLinearUnit final : public ActivationFunction [[nodiscard]] auto function(const float x) const -> float final { - return std::clamp(negativeSlopeAngle * x, 0.0F, 1.0e4F); - ; + const float y = (x > 0.0F) ? x : negativeSlopeAngle * x; + return std::min(y, this->max); } [[nodiscard]] auto derivative(const float x) const -> float final diff --git a/include/snn/neural_network/layer/neuron/activation_function/ReLU.hpp b/include/snn/neural_network/layer/neuron/activation_function/ReLU.hpp index 4657d377..f94b5c09 100644 --- a/include/snn/neural_network/layer/neuron/activation_function/ReLU.hpp +++ b/include/snn/neural_network/layer/neuron/activation_function/ReLU.hpp @@ -15,11 +15,11 @@ class RectifiedLinearUnit final : public ActivationFunction public: RectifiedLinearUnit() - : ActivationFunction(0, std::numeric_limits::infinity()) + : ActivationFunction(0, largeFloat) { } - [[nodiscard]] auto function(const float x) const -> float final { return std::clamp(x, 0.0F, 1.0e4F); } + [[nodiscard]] auto function(const float x) const -> float final { return std::clamp(x, 0.0F, this->max); } [[nodiscard]] auto derivative(const float x) const -> float final { return (x > 0.0F) ? 1.0F : 0.0F; } }; diff --git a/tests/unit_tests/LocallyConnectedTests.cpp b/tests/unit_tests/LocallyConnectedTests.cpp index ef6c8b06..899d552f 100644 --- a/tests/unit_tests/LocallyConnectedTests.cpp +++ b/tests/unit_tests/LocallyConnectedTests.cpp @@ -73,7 +73,7 @@ TEST(LocallyConnected, ComplexeLayerLocallyConnected2D) std::iota(std::begin(input), std::end(input), 1.0F); std::iota(std::begin(error), std::end(error), 1.0F); - const std::vector expectedOutput{5920, 6163, 12535, 12757, 39239, 39701, 41325, 41673}; + const std::vector expectedOutput{5920, 6163, 10000, 10000, 10000, 10000, 10000, 10000}; const std::vector expectedBackOutput{ 5, 11, 17, 23, 29, 35, 263, 277, 291, 305, 41, 47, 53, 59, 65, 71, 347, 361, 375, 389, 77, 83, 89, 95, 101, 107, 431, 445, 459, 473, 809, 831, 853, 875, @@ -127,4 +127,4 @@ auto createDataForLocallyConnectedTests() -> Dataset vector2D expectedOutputs = {{0, 1}, {0, 1}, {1, 0}}; return {problem::classification, inputData, expectedOutputs}; -} \ No newline at end of file +} From e40f817adc07dd7a9edc124d00a3a9b0c2091a35 Mon Sep 17 00:00:00 2001 From: Matthieu HERNANDEZ Date: Thu, 20 Nov 2025 00:57:26 +0100 Subject: [PATCH 10/10] Evaluate best model only on master --- .github/workflows/clang_linux.yml | 1 + .github/workflows/gcc_linux.yml | 14 +++++++++++--- .github/workflows/msvc_windows.yml | 1 + 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/.github/workflows/clang_linux.yml b/.github/workflows/clang_linux.yml index 6719e664..c8fc5f7e 100644 --- a/.github/workflows/clang_linux.yml +++ b/.github/workflows/clang_linux.yml @@ -7,6 +7,7 @@ on: push: branches: - master + - develop jobs: build: diff --git a/.github/workflows/gcc_linux.yml b/.github/workflows/gcc_linux.yml index 729f46de..59fb206d 100644 --- a/.github/workflows/gcc_linux.yml +++ b/.github/workflows/gcc_linux.yml @@ -7,6 +7,7 @@ on: push: branches: - master + - develop jobs: build: @@ -74,10 +75,17 @@ jobs: ./build/tests/dataset_tests/DatasetTests --gtest_filter=DailyMinTemperaturesTest* - name: MNIST run: | - ./build/tests/dataset_tests/DatasetTests --gtest_filter=MnistTest* + ./build/tests/dataset_tests/DatasetTests --gtest_filter=MnistTest*:-*evaluateBestNeuralNetwork - name: Fashion MNIST run: | - ./build/tests/dataset_tests/DatasetTests --gtest_filter=FashionMnistTest* + ./build/tests/dataset_tests/DatasetTests --gtest_filter=FashionMnistTest*:-*evaluateBestNeuralNetwork - name: CIFAR-10 run: | - ./build/tests/dataset_tests/DatasetTests --gtest_filter=Cifar10Test* \ No newline at end of file + ./build/tests/dataset_tests/DatasetTests --gtest_filter=Cifar10Test*:-*evaluateBestNeuralNetwork + - name: AudioCatsAndDogs + run: | + ./build/tests/dataset_tests/DatasetTests --gtest_filter=AudioCatsAndDogs*:-*evaluateBestNeuralNetwork + - name: Evaluate best models + if: github.ref_name == 'master' + run: | + ./build/tests/dataset_tests/DatasetTests --gtest_filter=*evaluateBestNeuralNetwork diff --git a/.github/workflows/msvc_windows.yml b/.github/workflows/msvc_windows.yml index 2a1f2127..c575fe5e 100644 --- a/.github/workflows/msvc_windows.yml +++ b/.github/workflows/msvc_windows.yml @@ -7,6 +7,7 @@ on: push: branches: - master + - develop jobs: build: