Initial Commit

This commit is contained in:
sosokker 2023-08-22 01:07:50 +07:00
commit fde93f30ac
15 changed files with 661 additions and 0 deletions

56
.gitignore vendored Normal file
View File

@ -0,0 +1,56 @@
# Compiled Object files
*.o
# Compiled Dynamic libraries
*.so
# Compiled Static libraries
*.a
# Compiled executables
*.exe
# Compiled binaries
/bin/
/build/
/out/
# CMake build directory
CMakeFiles/
CMakeCache.txt
CMakeScripts/
CMakeVars.txt
Makefile
cmake_install.cmake
# Visual Studio Code settings
.vscode/
# Compiled binaries for Windows
*.dll
*.lib
# MacOS specific files
.DS_Store
# Compiled Python files
__pycache__/
# Ignore any logs or temporary files
*.log
*.tmp
# Ignore user-specific project files
.idea/
*.suo
*.user
*.sln.docstates
# Ignore user-specific files
*.swp
*~
# Ignore project dependencies
/bin/
/build/
/out/

72
src/Accuracy/Accuracy.cpp Normal file
View File

@ -0,0 +1,72 @@
#ifndef ACCURACY_HPP
#define ACCURACY_HPP
#include <vector>
#include <numeric>
#include <cmath>
class Accuracy {
private:
double accumulated_sum;
int accumulated_count;
public:
Accuracy() : accumulated_sum(0.0), accumulated_count(0) {}
double calculate(const std::vector<int>& predictions, const std::vector<int>& y) {
std::vector<int> comparisons(predictions.size());
for (size_t i = 0; i < predictions.size(); ++i) {
comparisons[i] = compare(predictions[i], y[i]);
}
double accuracy = static_cast<double>(std::accumulate(comparisons.begin(), comparisons.end(), 0)) / predictions.size();
accumulated_sum += std::accumulate(comparisons.begin(), comparisons.end(), 0);
accumulated_count += predictions.size();
return accuracy;
}
double calculate_accumulated() {
double accuracy = static_cast<double>(accumulated_sum) / accumulated_count;
return accuracy;
}
void new_pass() {
accumulated_sum = 0.0;
accumulated_count = 0;
}
virtual int compare(int prediction, int ground_truth) {
return prediction == ground_truth ? 1 : 0;
}
};
class Accuracy_Categorical : public Accuracy {
public:
int compare(int prediction, int ground_truth) override {
return prediction == ground_truth ? 1 : 0;
}
};
class Accuracy_Regression : public Accuracy {
private:
double precision;
public:
Accuracy_Regression() : precision(0.0) {}
void init(const std::vector<double>& y, bool reinit = false) {
if (precision == 0.0 || reinit) {
precision = std::sqrt(std::accumulate(y.begin(), y.end(), 0.0, [](double acc, double val) {
return acc + (val * val);
})) / 250.0;
}
}
int compare(int prediction, int ground_truth) override {
return std::abs(prediction - ground_truth) < precision ? 1 : 0;
}
};
#endif // ACCURACY_HPP

36
src/Accuracy/Accuracy.hpp Normal file
View File

@ -0,0 +1,36 @@
#ifndef ACCURACY_HPP
#define ACCURACY_HPP
#include <vector>
#include <numeric>
#include <cmath>
class Accuracy {
private:
double accumulated_sum;
int accumulated_count;
public:
Accuracy();
double calculate(const std::vector<int>& predictions, const std::vector<int>& y);
double calculate_accumulated();
void new_pass();
virtual int compare(int prediction, int ground_truth);
};
class Accuracy_Categorical : public Accuracy {
public:
int compare(int prediction, int ground_truth) override;
};
class Accuracy_Regression : public Accuracy {
private:
double precision;
public:
Accuracy_Regression();
void init(const std::vector<double>& y, bool reinit = false);
int compare(int prediction, int ground_truth) override;
};
#endif // ACCURACY_HPP

View File

@ -0,0 +1,15 @@
#include "ActivationFunction.hpp"
void Activation_ReLU::forward(std::vector<double>& inputs, bool training) {
// Implementation
}
void Activation_ReLU::backward(std::vector<double>& dvalues) {
// Implementation
}
std::vector<double> Activation_ReLU::predictions(const std::vector<double>& outputs) {
// Implementation
}
// Implement similar member functions for other Activation classes

View File

@ -0,0 +1,50 @@
#ifndef ACTIVATIONFUNCTION_HPP
#define ACTIVATIONFUNCTION_HPP
#include <vector>
class Activation_ReLU {
public:
void forward(std::vector<double>& inputs, bool training);
void backward(std::vector<double>& dvalues);
std::vector<double> predictions(const std::vector<double>& outputs);
private:
std::vector<double> inputs;
std::vector<double> output;
std::vector<double> dinputs;
};
class Activation_Softmax {
public:
void forward(std::vector<double>& inputs, bool training);
void backward(std::vector<double>& dvalues);
std::vector<double> predictions(const std::vector<double>& outputs);
private:
std::vector<double> inputs;
std::vector<double> output;
std::vector<double> dinputs;
};
class Activation_Sigmoid {
public:
void forward(std::vector<double>& inputs, bool training);
void backward(std::vector<double>& dvalues);
std::vector<double> predictions(const std::vector<double>& outputs);
private:
std::vector<double> inputs;
std::vector<double> output;
std::vector<double> dinputs;
};
class Activation_Linear {
public:
void forward(std::vector<double>& inputs, bool training);
void backward(std::vector<double>& dvalues);
std::vector<double> predictions(const std::vector<double>& outputs);
private:
std::vector<double> inputs;
std::vector<double> output;
std::vector<double> dinputs;
};
#endif // ACTIVATIONFUNCTION_HPP

32
src/CMakeLists.txt Normal file
View File

@ -0,0 +1,32 @@
cmake_minimum_required(VERSION 3.10)
project(cpp_neural_network)
# Set C++ standard
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# Add the source files
set(SOURCES
src/Model/Model.cpp
src/utils/Accuracy.cpp
src/Optimizer/Optimizer.cpp
src/Loss/Loss.cpp
src/Layers/Layers.cpp
src/ActivationFunction/ActivationFunction.cpp
main.cpp
)
# Add executable
add_executable(${PROJECT_NAME} ${SOURCES})
# Include directories
target_include_directories(${PROJECT_NAME}
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/src/Model
${CMAKE_CURRENT_SOURCE_DIR}/src/utils
${CMAKE_CURRENT_SOURCE_DIR}/src/Optimizer
${CMAKE_CURRENT_SOURCE_DIR}/src/Loss
${CMAKE_CURRENT_SOURCE_DIR}/src/Layers
${CMAKE_CURRENT_SOURCE_DIR}/src/Activation
)

56
src/Layers/Layers.cpp Normal file
View File

@ -0,0 +1,56 @@
#include "Layers.hpp"
#include <random>
Layer_Dense::Layer_Dense(int n_inputs, int n_neurons,
double weight_regularizer_l1,
double weight_regularizer_l2,
double bias_regularizer_l1,
double bias_regularizer_l2)
: weight_regularizer_l1(weight_regularizer_l1),
weight_regularizer_l2(weight_regularizer_l2),
bias_regularizer_l1(bias_regularizer_l1),
bias_regularizer_l2(bias_regularizer_l2)
{
// Initialize weights and biases
weights.resize(n_inputs, std::vector<double>(n_neurons));
for (auto& row : weights) {
for (double& weight : row) {
weight = 0.01 * (std::rand() / double(RAND_MAX));
}
}
biases.resize(1, std::vector<double>(n_neurons, 0.0));
}
void Layer_Dense::forward(const std::vector<double>& inputs, bool training) {
this->inputs = inputs;
// Forward pass implementation
}
void Layer_Dense::backward(std::vector<double>& dvalues) {
// Backward pass implementation
}
std::pair<std::vector<std::vector<double>>, std::vector<std::vector<double>>> Layer_Dense::get_parameters() const {
return std::make_pair(weights, biases);
}
void Layer_Dense::set_parameters(const std::vector<std::vector<double>>& weights, const std::vector<std::vector<double>>& biases) {
this->weights = weights;
this->biases = biases;
}
Layer_Dropout::Layer_Dropout(double rate) : rate(1 - rate) {}
void Layer_Dropout::forward(const std::vector<double>& inputs, bool training) { // Forward pass implementation
}
void Layer_Dropout::backward(std::vector<double>& dvalues) {
// Backward pass implementation
}
void Layer_Input::forward(const std::vector<double>& inputs, bool training) {
this->output = inputs;
}
// Implement similar member functions for other Layer classes

53
src/Layers/Layers.hpp Normal file
View File

@ -0,0 +1,53 @@
#pragma once
#include <vector>
#include <utility>
class Layer {
public:
virtual void forward(const std::vector<double>& inputs, bool training) = 0;
virtual void backward(std::vector<double>& dvalues) = 0;
virtual std::pair<std::vector<std::vector<double>>, std::vector<std::vector<double>>> get_parameters() const = 0;
virtual void set_parameters(const std::vector<std::vector<double>>& weights, const std::vector<std::vector<double>>& biases) = 0;
};
class Layer_Dense : public Layer {
private:
double weight_regularizer_l1, weight_regularizer_l2,
bias_regularizer_l1, bias_regularizer_l2;
std::vector<std::vector<double>> weights, biases;
std::vector<double> inputs;
public:
Layer_Dense(int n_inputs, int n_neurons,
double weight_regularizer_l1,
double weight_regularizer_l2,
double bias_regularizer_l1,
double bias_regularizer_l2);
void forward(const std::vector<double>& inputs, bool training) override;
void backward(std::vector<double>& dvalues) override;
std::pair<std::vector<std::vector<double>>, std::vector<std::vector<double>>> get_parameters() const override;
void set_parameters(const std::vector<std::vector<double>>& weights, const std::vector<std::vector<double>>& biases) override;
};
class Layer_Dropout : public Layer {
private:
double rate;
public:
Layer_Dropout(double rate);
void forward(const std::vector<double>& inputs, bool training) override;
void backward(std::vector<double>& dvalues) override;
};
class Layer_Input {
private:
std::vector<double> output;
public:
void forward(const std::vector<double>& inputs, bool training);
};
// Add other Layer classes as needed

39
src/Loss/Loss.cpp Normal file
View File

@ -0,0 +1,39 @@
#include "Loss.hpp"
double Loss::regularization_loss() {
// Implementation
}
// Implement other member functions of Loss class
double Loss_CategoricalCrossentropy::forward(const std::vector<double>& y_pred, const std::vector<double>& y_true) {
// Implementation
}
void Loss_CategoricalCrossentropy::backward(std::vector<double>& dvalues, const std::vector<double>& y_true) {
// Implementation
}
double Loss_BinaryCrossentropy::forward(const std::vector<double>& y_pred, const std::vector<double>& y_true) {
// Implementation
}
void Loss_BinaryCrossentropy::backward(std::vector<double>& dvalues, const std::vector<double>& y_true) {
// Implementation
}
double Loss_MeanSquaredError::forward(const std::vector<double>& y_pred, const std::vector<double>& y_true) {
// Implementation
}
void Loss_MeanSquaredError::backward(std::vector<double>& dvalues, const std::vector<double>& y_true) {
// Implementation
}
double Loss_MeanAbsoluteError::forward(const std::vector<double>& y_pred, const std::vector<double>& y_true) {
// Implementation
}
void Loss_MeanAbsoluteError::backward(std::vector<double>& dvalues, const std::vector<double>& y_true) {
// Implementation
}

49
src/Loss/Loss.hpp Normal file
View File

@ -0,0 +1,49 @@
#ifndef LOSS_HPP
#define LOSS_HPP
#include <vector>
#include <cmath>
#include <iostream>
#include "Layers/Layers.hpp"
class Loss {
public:
virtual double regularization_loss();
virtual double forward(const std::vector<double>& y_pred, const std::vector<double>& y_true);
virtual void backward(std::vector<double>& dvalues, const std::vector<double>& y_true);
virtual void remember_trainable_layers(const std::vector<Layer*>& trainable_layers);
virtual double calculate(const std::vector<double>& output, const std::vector<double>& y, bool include_regularization = false);
virtual double calculate_accumulated(bool include_regularization = false);
virtual void new_pass();
private:
std::vector<Layer*> trainable_layers;
double accumulated_sum;
int accumulated_count;
};
class Loss_CategoricalCrossentropy : public Loss {
public:
double forward(const std::vector<double>& y_pred, const std::vector<double>& y_true) override;
void backward(std::vector<double>& dvalues, const std::vector<double>& y_true) override;
};
class Loss_BinaryCrossentropy : public Loss {
public:
double forward(const std::vector<double>& y_pred, const std::vector<double>& y_true) override;
void backward(std::vector<double>& dvalues, const std::vector<double>& y_true) override;
};
class Loss_MeanSquaredError : public Loss {
public:
double forward(const std::vector<double>& y_pred, const std::vector<double>& y_true) override;
void backward(std::vector<double>& dvalues, const std::vector<double>& y_true) override;
};
class Loss_MeanAbsoluteError : public Loss {
public:
double forward(const std::vector<double>& y_pred, const std::vector<double>& y_true) override;
void backward(std::vector<double>& dvalues, const std::vector<double>& y_true) override;
};
#endif // LOSS_HPP

73
src/Model/Model.cpp Normal file
View File

@ -0,0 +1,73 @@
#include "Model/Model.hpp"
#include "ActivationFunction/ActivationFunction.hpp"
#include "Loss/Loss.hpp"
#include "Optimizer/Optimizer.hpp"
#include "Accuracy/Accuracy.hpp"
#include "Layers/Layers.hpp"
Model::Model() {}
void Model::add(Layer& layer) {
layers.push_back(&layer);
}
void Model::set_loss(Loss& loss) {
this->loss = &loss;
}
void Model::set_optimizer(Optimizer& optimizer) {
this->optimizer = &optimizer;
}
void Model::set_accuracy(Accuracy& accuracy) {
this->accuracy = &accuracy;
}
void Model::finalize() {
// Implement finalize method
}
void Model::train(const std::vector<std::vector<double>>& X, const std::vector<std::vector<double>>& y,
int epochs, int batch_size, int print_every, const std::vector<std::vector<double>>& validation_data) {
// Implement train method
}
void Model::evaluate(const std::vector<std::vector<double>>& X_val, const std::vector<std::vector<double>>& y_val, int batch_size) {
// Implement evaluate method
}
std::vector<std::vector<double>> Model::predict(const std::vector<std::vector<double>>& X, int batch_size) {
// Implement predict method
}
void Model::save_parameters(const std::string& path) {
// Implement save_parameters method
}
void Model::load_parameters(const std::string& path) {
// Implement load_parameters method
}
void Model::save(const std::string& path) {
// Implement save method
}
void Model::forward(const std::vector<std::vector<double>>& X, bool training) {
// Implement forward method
}
void Model::backward(const std::vector<std::vector<double>>& output, const std::vector<std::vector<double>>& y) {
// Implement backward method
}
std::vector<std::vector<std::vector<double>>> Model::get_parameters() {
// Implement get_parameters method
}
void Model::set_parameters(const std::vector<std::vector<std::vector<double>>>& parameters) {
// Implement set_parameters method
}
Model Model::load(const std::string& path) {
// Implement load method
}

42
src/Model/Model.hpp Normal file
View File

@ -0,0 +1,42 @@
#pragma once
#include "ActivationFunction/ActivationFunction.hpp"
#include "Loss/Loss.hpp"
#include "Optimizer/Optimizer.hpp"
#include "Accuracy/Accuracy.hpp"
#include "Layers/Layers.hpp"
class Model {
private:
Loss* loss;
Optimizer* optimizer;
Accuracy* accuracy;
std::vector<Layer*> layers;
public:
Model();
void add(Layer& layer);
void set_loss(Loss& loss);
void set_optimizer(Optimizer& optimizer);
void set_accuracy(Accuracy& accuracy);
void finalize();
void train(const std::vector<std::vector<double>>& X, const std::vector<std::vector<double>>& y,
int epochs, int batch_size, int print_every, const std::vector<std::vector<double>>& validation_data);
void evaluate(const std::vector<std::vector<double>>& X_val, const std::vector<std::vector<double>>& y_val, int batch_size);
std::vector<std::vector<double>> predict(const std::vector<std::vector<double>>& X, int batch_size);
void save_parameters(const std::string& path);
void load_parameters(const std::string& path);
void save(const std::string& path);
void forward(const std::vector<std::vector<double>>& X, bool training);
void backward(const std::vector<std::vector<double>>& output, const std::vector<std::vector<double>>& y);
std::vector<std::vector<std::vector<double>>> get_parameters();
void set_parameters(const std::vector<std::vector<std::vector<double>>>& parameters);
static Model load(const std::string& path);
};

View File

@ -0,0 +1,43 @@
#include "Optimizer/Optimizer.hpp"
#include "Layers/Layers.hpp"
Optimizer_SGD::Optimizer_SGD(double learning_rate, double decay, double momentum)
: learning_rate(learning_rate),
current_learning_rate(learning_rate),
decay(decay),
iterations(0),
momentum(momentum) {}
void Optimizer_SGD::pre_update_params() {
// pre_update_params implementation
}
void Optimizer_SGD::update_params(Layer& layer) {
// update_params implementation for any Layer
}
void Optimizer_SGD::post_update_params() {
iterations++;
}
Optimizer_Adagrad::Optimizer_Adagrad(double learning_rate, double decay, double epsilon)
: learning_rate(learning_rate),
current_learning_rate(learning_rate),
decay(decay),
iterations(0),
epsilon(epsilon) {}
void Optimizer_Adagrad::pre_update_params() {
// pre_update_params implementation
}
void Optimizer_Adagrad::update_params(Layer& layer) {
// update_params implementation for any Layer
}
void Optimizer_Adagrad::post_update_params() {
iterations++;
}
// Similar implementations for Optimizer_RMSprop and Optimizer_Adam

View File

@ -0,0 +1,45 @@
#pragma once
#include "Layers/Layers.hpp"
class Optimizer {
public:
virtual void pre_update_params() = 0;
virtual void update_params(Layer& layer) = 0;
virtual void post_update_params() = 0;
// Other common members and methods
};
class Optimizer_SGD : public Optimizer {
public:
Optimizer_SGD(double learning_rate, double decay, double momentum);
void pre_update_params() override;
void update_params(Layer& layer) override;
void post_update_params() override;
private:
double learning_rate;
double current_learning_rate;
double decay;
int iterations;
double momentum;
};
class Optimizer_Adagrad : public Optimizer {
public:
Optimizer_Adagrad(double learning_rate, double decay, double epsilon);
void pre_update_params() override;
void update_params(Layer& layer) override;
void post_update_params() override;
private:
double learning_rate;
double current_learning_rate;
double decay;
int iterations;
double epsilon;
};
// Similar declarations for Optimizer_RMSprop and Optimizer_Adam

0
src/main.cpp Normal file
View File