mirror of
https://github.com/Sosokker/CPP-Neural-Network.git
synced 2025-12-18 18:14:04 +01:00
Update Layers/ Use as parent class/ Add Dense Layers
This commit is contained in:
parent
0fd852cab9
commit
09a17dfa24
16
src/Activation/Activation.hpp
Normal file
16
src/Activation/Activation.hpp
Normal file
@ -0,0 +1,16 @@
|
||||
#include <functional>
|
||||
#include <Eigen/Dense>
|
||||
#include <Layers/Layers.hpp>
|
||||
|
||||
class Activation : public Layer {
|
||||
public:
|
||||
Activation(std::function<Eigen::VectorXd(const Eigen::VectorXd&)> activation,
|
||||
std::function<Eigen::VectorXd(const Eigen::VectorXd&)> activation_prime);
|
||||
|
||||
void forward(const Eigen::VectorXd& input) override;
|
||||
void backward(const Eigen::VectorXd& output_gradient, double learning_rate) override;
|
||||
|
||||
private:
|
||||
std::function<Eigen::VectorXd(const Eigen::VectorXd&)> activation;
|
||||
std::function<Eigen::VectorXd(const Eigen::VectorXd&)> activation_prime;
|
||||
};
|
||||
26
src/Layers/DenseLayer.cpp
Normal file
26
src/Layers/DenseLayer.cpp
Normal file
@ -0,0 +1,26 @@
|
||||
#include "DenseLayer.hpp"
|
||||
|
||||
DenseLayer::DenseLayer(int input_size, int output_size) {
|
||||
weights = Eigen::MatrixXd::Random(output_size, input_size);
|
||||
bias = Eigen::VectorXd::Random(output_size);
|
||||
}
|
||||
|
||||
void DenseLayer::forward(const Eigen::VectorXd& input) {
|
||||
this->input = input;
|
||||
output = weights * input + bias;
|
||||
}
|
||||
|
||||
void DenseLayer::backward(const Eigen::VectorXd& output_gradient, double learning_rate) {
|
||||
Eigen::MatrixXd weights_gradient = output_gradient * input.transpose();
|
||||
input_gradient = weights.transpose() * output_gradient;
|
||||
weights -= learning_rate * weights_gradient;
|
||||
bias -= learning_rate * output_gradient;
|
||||
}
|
||||
|
||||
const Eigen::VectorXd& DenseLayer::getOutput() const {
|
||||
return output;
|
||||
}
|
||||
|
||||
const Eigen::VectorXd& DenseLayer::getInputGradient() const {
|
||||
return input_gradient;
|
||||
}
|
||||
17
src/Layers/DenseLayer.hpp
Normal file
17
src/Layers/DenseLayer.hpp
Normal file
@ -0,0 +1,17 @@
|
||||
#include <Eigen/Dense>
|
||||
#include "Layers.hpp"
|
||||
|
||||
class DenseLayer : public Layer {
|
||||
public:
|
||||
DenseLayer(int input_size, int output_size);
|
||||
|
||||
void forward(const Eigen::VectorXd& input) override;
|
||||
void backward(const Eigen::VectorXd& output_gradient, double learning_rate) override;
|
||||
|
||||
const Eigen::VectorXd& getOutput() const;
|
||||
const Eigen::VectorXd& getInputGradient() const;
|
||||
|
||||
private:
|
||||
Eigen::MatrixXd weights;
|
||||
Eigen::VectorXd bias;
|
||||
};
|
||||
@ -1,51 +1,22 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#ifndef LAYERS_HPP
|
||||
#define LAYERS_HPP
|
||||
|
||||
#include <Eigen/Dense>
|
||||
|
||||
class Layer {
|
||||
public:
|
||||
virtual void forward(const std::vector<double>& inputs, bool training) = 0;
|
||||
virtual void backward(std::vector<double>& dvalues) = 0;
|
||||
virtual std::pair<std::vector<std::vector<double>>, std::vector<std::vector<double>>> get_parameters() const = 0;
|
||||
virtual void set_parameters(const std::vector<std::vector<double>>& weights, const std::vector<std::vector<double>>& biases) = 0;
|
||||
Layer();
|
||||
|
||||
virtual void forward(const Eigen::VectorXd& input_data);
|
||||
virtual void backward(const Eigen::VectorXd& output_gradient, double learning_rate);
|
||||
|
||||
const Eigen::VectorXd& getOutput() const;
|
||||
const Eigen::VectorXd& getInputGradient() const;
|
||||
|
||||
protected:
|
||||
Eigen::VectorXd input;
|
||||
Eigen::VectorXd output;
|
||||
Eigen::VectorXd input_gradient;
|
||||
};
|
||||
|
||||
class Layer_Dense : public Layer {
|
||||
private:
|
||||
double weight_regularizer_l1, weight_regularizer_l2,
|
||||
bias_regularizer_l1, bias_regularizer_l2;
|
||||
std::vector<std::vector<double>> weights, biases;
|
||||
std::vector<double> inputs;
|
||||
|
||||
public:
|
||||
Layer_Dense(int n_inputs, int n_neurons,
|
||||
double weight_regularizer_l1,
|
||||
double weight_regularizer_l2,
|
||||
double bias_regularizer_l1,
|
||||
double bias_regularizer_l2);
|
||||
|
||||
void forward(const std::vector<double>& inputs, bool training) override;
|
||||
void backward(std::vector<double>& dvalues) override;
|
||||
std::pair<std::vector<std::vector<double>>, std::vector<std::vector<double>>> get_parameters() const override;
|
||||
void set_parameters(const std::vector<std::vector<double>>& weights, const std::vector<std::vector<double>>& biases) override;
|
||||
};
|
||||
|
||||
class Layer_Dropout : public Layer {
|
||||
private:
|
||||
double rate;
|
||||
|
||||
public:
|
||||
Layer_Dropout(double rate);
|
||||
|
||||
void forward(const std::vector<double>& inputs, bool training) override;
|
||||
void backward(std::vector<double>& dvalues) override;
|
||||
};
|
||||
|
||||
|
||||
class Layer_Input {
|
||||
private:
|
||||
std::vector<double> output;
|
||||
|
||||
public:
|
||||
void forward(const std::vector<double>& inputs, bool training);
|
||||
};
|
||||
#endif // LAYERS_HPP
|
||||
|
||||
Loading…
Reference in New Issue
Block a user