From fde93f30ac23b53603b2cc5841128937e2950ac9 Mon Sep 17 00:00:00 2001 From: sosokker Date: Tue, 22 Aug 2023 01:07:50 +0700 Subject: [PATCH] Initial Commit --- .gitignore | 56 ++++++++++++++ src/Accuracy/Accuracy.cpp | 72 ++++++++++++++++++ src/Accuracy/Accuracy.hpp | 36 +++++++++ src/ActivationFunction/ActivationFunction.cpp | 15 ++++ src/ActivationFunction/ActivationFunction.hpp | 50 +++++++++++++ src/CMakeLists.txt | 32 ++++++++ src/Layers/Layers.cpp | 56 ++++++++++++++ src/Layers/Layers.hpp | 53 ++++++++++++++ src/Loss/Loss.cpp | 39 ++++++++++ src/Loss/Loss.hpp | 49 +++++++++++++ src/Model/Model.cpp | 73 +++++++++++++++++++ src/Model/Model.hpp | 42 +++++++++++ src/Optimizer/Optimizer.cpp | 43 +++++++++++ src/Optimizer/Optimizer.hpp | 45 ++++++++++++ src/main.cpp | 0 15 files changed, 661 insertions(+) create mode 100644 .gitignore create mode 100644 src/Accuracy/Accuracy.cpp create mode 100644 src/Accuracy/Accuracy.hpp create mode 100644 src/ActivationFunction/ActivationFunction.cpp create mode 100644 src/ActivationFunction/ActivationFunction.hpp create mode 100644 src/CMakeLists.txt create mode 100644 src/Layers/Layers.cpp create mode 100644 src/Layers/Layers.hpp create mode 100644 src/Loss/Loss.cpp create mode 100644 src/Loss/Loss.hpp create mode 100644 src/Model/Model.cpp create mode 100644 src/Model/Model.hpp create mode 100644 src/Optimizer/Optimizer.cpp create mode 100644 src/Optimizer/Optimizer.hpp create mode 100644 src/main.cpp diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..45e83e0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,56 @@ +# Compiled Object files +*.o + +# Compiled Dynamic libraries +*.so + +# Compiled Static libraries +*.a + +# Compiled executables +*.exe + +# Compiled binaries +/bin/ +/build/ +/out/ + +# CMake build directory +CMakeFiles/ +CMakeCache.txt +CMakeScripts/ +CMakeVars.txt +Makefile +cmake_install.cmake + +# Visual Studio Code settings +.vscode/ + +# Compiled binaries for Windows +*.dll +*.lib + +# MacOS specific files +.DS_Store + +# Compiled Python files +__pycache__/ + +# Ignore any logs or temporary files +*.log +*.tmp + +# Ignore user-specific project files +.idea/ +*.suo +*.user +*.sln.docstates + +# Ignore user-specific files +*.swp +*~ + +# Ignore project dependencies +/bin/ +/build/ +/out/ diff --git a/src/Accuracy/Accuracy.cpp b/src/Accuracy/Accuracy.cpp new file mode 100644 index 0000000..3dfbf94 --- /dev/null +++ b/src/Accuracy/Accuracy.cpp @@ -0,0 +1,72 @@ +#ifndef ACCURACY_HPP +#define ACCURACY_HPP + +#include +#include +#include + +class Accuracy { +private: + double accumulated_sum; + int accumulated_count; + +public: + Accuracy() : accumulated_sum(0.0), accumulated_count(0) {} + + double calculate(const std::vector& predictions, const std::vector& y) { + std::vector comparisons(predictions.size()); + for (size_t i = 0; i < predictions.size(); ++i) { + comparisons[i] = compare(predictions[i], y[i]); + } + + double accuracy = static_cast(std::accumulate(comparisons.begin(), comparisons.end(), 0)) / predictions.size(); + + accumulated_sum += std::accumulate(comparisons.begin(), comparisons.end(), 0); + accumulated_count += predictions.size(); + + return accuracy; + } + + double calculate_accumulated() { + double accuracy = static_cast(accumulated_sum) / accumulated_count; + return accuracy; + } + + void new_pass() { + accumulated_sum = 0.0; + accumulated_count = 0; + } + + virtual int compare(int prediction, int ground_truth) { + return prediction == ground_truth ? 1 : 0; + } +}; + +class Accuracy_Categorical : public Accuracy { +public: + int compare(int prediction, int ground_truth) override { + return prediction == ground_truth ? 1 : 0; + } +}; + +class Accuracy_Regression : public Accuracy { +private: + double precision; + +public: + Accuracy_Regression() : precision(0.0) {} + + void init(const std::vector& y, bool reinit = false) { + if (precision == 0.0 || reinit) { + precision = std::sqrt(std::accumulate(y.begin(), y.end(), 0.0, [](double acc, double val) { + return acc + (val * val); + })) / 250.0; + } + } + + int compare(int prediction, int ground_truth) override { + return std::abs(prediction - ground_truth) < precision ? 1 : 0; + } +}; + +#endif // ACCURACY_HPP diff --git a/src/Accuracy/Accuracy.hpp b/src/Accuracy/Accuracy.hpp new file mode 100644 index 0000000..58adfab --- /dev/null +++ b/src/Accuracy/Accuracy.hpp @@ -0,0 +1,36 @@ +#ifndef ACCURACY_HPP +#define ACCURACY_HPP + +#include +#include +#include + +class Accuracy { +private: + double accumulated_sum; + int accumulated_count; + +public: + Accuracy(); + double calculate(const std::vector& predictions, const std::vector& y); + double calculate_accumulated(); + void new_pass(); + virtual int compare(int prediction, int ground_truth); +}; + +class Accuracy_Categorical : public Accuracy { +public: + int compare(int prediction, int ground_truth) override; +}; + +class Accuracy_Regression : public Accuracy { +private: + double precision; + +public: + Accuracy_Regression(); + void init(const std::vector& y, bool reinit = false); + int compare(int prediction, int ground_truth) override; +}; + +#endif // ACCURACY_HPP diff --git a/src/ActivationFunction/ActivationFunction.cpp b/src/ActivationFunction/ActivationFunction.cpp new file mode 100644 index 0000000..e405564 --- /dev/null +++ b/src/ActivationFunction/ActivationFunction.cpp @@ -0,0 +1,15 @@ +#include "ActivationFunction.hpp" + +void Activation_ReLU::forward(std::vector& inputs, bool training) { + // Implementation +} + +void Activation_ReLU::backward(std::vector& dvalues) { + // Implementation +} + +std::vector Activation_ReLU::predictions(const std::vector& outputs) { + // Implementation +} + +// Implement similar member functions for other Activation classes diff --git a/src/ActivationFunction/ActivationFunction.hpp b/src/ActivationFunction/ActivationFunction.hpp new file mode 100644 index 0000000..c943d66 --- /dev/null +++ b/src/ActivationFunction/ActivationFunction.hpp @@ -0,0 +1,50 @@ +#ifndef ACTIVATIONFUNCTION_HPP +#define ACTIVATIONFUNCTION_HPP + +#include + +class Activation_ReLU { +public: + void forward(std::vector& inputs, bool training); + void backward(std::vector& dvalues); + std::vector predictions(const std::vector& outputs); +private: + std::vector inputs; + std::vector output; + std::vector dinputs; +}; + +class Activation_Softmax { +public: + void forward(std::vector& inputs, bool training); + void backward(std::vector& dvalues); + std::vector predictions(const std::vector& outputs); +private: + std::vector inputs; + std::vector output; + std::vector dinputs; +}; + +class Activation_Sigmoid { +public: + void forward(std::vector& inputs, bool training); + void backward(std::vector& dvalues); + std::vector predictions(const std::vector& outputs); +private: + std::vector inputs; + std::vector output; + std::vector dinputs; +}; + +class Activation_Linear { +public: + void forward(std::vector& inputs, bool training); + void backward(std::vector& dvalues); + std::vector predictions(const std::vector& outputs); +private: + std::vector inputs; + std::vector output; + std::vector dinputs; +}; + +#endif // ACTIVATIONFUNCTION_HPP diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 0000000..76f9f1f --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 3.10) + +project(cpp_neural_network) + +# Set C++ standard +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Add the source files +set(SOURCES + src/Model/Model.cpp + src/utils/Accuracy.cpp + src/Optimizer/Optimizer.cpp + src/Loss/Loss.cpp + src/Layers/Layers.cpp + src/ActivationFunction/ActivationFunction.cpp + main.cpp +) + +# Add executable +add_executable(${PROJECT_NAME} ${SOURCES}) + +# Include directories +target_include_directories(${PROJECT_NAME} + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/src/Model + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils + ${CMAKE_CURRENT_SOURCE_DIR}/src/Optimizer + ${CMAKE_CURRENT_SOURCE_DIR}/src/Loss + ${CMAKE_CURRENT_SOURCE_DIR}/src/Layers + ${CMAKE_CURRENT_SOURCE_DIR}/src/Activation +) diff --git a/src/Layers/Layers.cpp b/src/Layers/Layers.cpp new file mode 100644 index 0000000..c4df860 --- /dev/null +++ b/src/Layers/Layers.cpp @@ -0,0 +1,56 @@ +#include "Layers.hpp" +#include + +Layer_Dense::Layer_Dense(int n_inputs, int n_neurons, + double weight_regularizer_l1, + double weight_regularizer_l2, + double bias_regularizer_l1, + double bias_regularizer_l2) + : weight_regularizer_l1(weight_regularizer_l1), + weight_regularizer_l2(weight_regularizer_l2), + bias_regularizer_l1(bias_regularizer_l1), + bias_regularizer_l2(bias_regularizer_l2) +{ + // Initialize weights and biases + weights.resize(n_inputs, std::vector(n_neurons)); + for (auto& row : weights) { + for (double& weight : row) { + weight = 0.01 * (std::rand() / double(RAND_MAX)); + } + } + biases.resize(1, std::vector(n_neurons, 0.0)); +} + +void Layer_Dense::forward(const std::vector& inputs, bool training) { + this->inputs = inputs; + + // Forward pass implementation +} + +void Layer_Dense::backward(std::vector& dvalues) { + // Backward pass implementation +} + +std::pair>, std::vector>> Layer_Dense::get_parameters() const { + return std::make_pair(weights, biases); +} + +void Layer_Dense::set_parameters(const std::vector>& weights, const std::vector>& biases) { + this->weights = weights; + this->biases = biases; +} + +Layer_Dropout::Layer_Dropout(double rate) : rate(1 - rate) {} + +void Layer_Dropout::forward(const std::vector& inputs, bool training) { // Forward pass implementation +} + +void Layer_Dropout::backward(std::vector& dvalues) { + // Backward pass implementation +} + +void Layer_Input::forward(const std::vector& inputs, bool training) { + this->output = inputs; +} + +// Implement similar member functions for other Layer classes diff --git a/src/Layers/Layers.hpp b/src/Layers/Layers.hpp new file mode 100644 index 0000000..bace51f --- /dev/null +++ b/src/Layers/Layers.hpp @@ -0,0 +1,53 @@ + #pragma once + #include + #include + + class Layer { + public: + virtual void forward(const std::vector& inputs, bool training) = 0; + virtual void backward(std::vector& dvalues) = 0; + virtual std::pair>, std::vector>> get_parameters() const = 0; + virtual void set_parameters(const std::vector>& weights, const std::vector>& biases) = 0; + }; + + class Layer_Dense : public Layer { + private: + double weight_regularizer_l1, weight_regularizer_l2, + bias_regularizer_l1, bias_regularizer_l2; + std::vector> weights, biases; + std::vector inputs; + + public: + Layer_Dense(int n_inputs, int n_neurons, + double weight_regularizer_l1, + double weight_regularizer_l2, + double bias_regularizer_l1, + double bias_regularizer_l2); + + void forward(const std::vector& inputs, bool training) override; + void backward(std::vector& dvalues) override; + std::pair>, std::vector>> get_parameters() const override; + void set_parameters(const std::vector>& weights, const std::vector>& biases) override; + }; + + class Layer_Dropout : public Layer { + private: + double rate; + + public: + Layer_Dropout(double rate); + + void forward(const std::vector& inputs, bool training) override; + void backward(std::vector& dvalues) override; + }; + + + class Layer_Input { + private: + std::vector output; + + public: + void forward(const std::vector& inputs, bool training); + }; + + // Add other Layer classes as needed diff --git a/src/Loss/Loss.cpp b/src/Loss/Loss.cpp new file mode 100644 index 0000000..49ed8a1 --- /dev/null +++ b/src/Loss/Loss.cpp @@ -0,0 +1,39 @@ +#include "Loss.hpp" + +double Loss::regularization_loss() { + // Implementation +} + +// Implement other member functions of Loss class + +double Loss_CategoricalCrossentropy::forward(const std::vector& y_pred, const std::vector& y_true) { + // Implementation +} + +void Loss_CategoricalCrossentropy::backward(std::vector& dvalues, const std::vector& y_true) { + // Implementation +} + +double Loss_BinaryCrossentropy::forward(const std::vector& y_pred, const std::vector& y_true) { + // Implementation +} + +void Loss_BinaryCrossentropy::backward(std::vector& dvalues, const std::vector& y_true) { + // Implementation +} + +double Loss_MeanSquaredError::forward(const std::vector& y_pred, const std::vector& y_true) { + // Implementation +} + +void Loss_MeanSquaredError::backward(std::vector& dvalues, const std::vector& y_true) { + // Implementation +} + +double Loss_MeanAbsoluteError::forward(const std::vector& y_pred, const std::vector& y_true) { + // Implementation +} + +void Loss_MeanAbsoluteError::backward(std::vector& dvalues, const std::vector& y_true) { + // Implementation +} diff --git a/src/Loss/Loss.hpp b/src/Loss/Loss.hpp new file mode 100644 index 0000000..1b13507 --- /dev/null +++ b/src/Loss/Loss.hpp @@ -0,0 +1,49 @@ +#ifndef LOSS_HPP +#define LOSS_HPP + +#include +#include +#include +#include "Layers/Layers.hpp" + +class Loss { +public: + virtual double regularization_loss(); + virtual double forward(const std::vector& y_pred, const std::vector& y_true); + virtual void backward(std::vector& dvalues, const std::vector& y_true); + virtual void remember_trainable_layers(const std::vector& trainable_layers); + virtual double calculate(const std::vector& output, const std::vector& y, bool include_regularization = false); + virtual double calculate_accumulated(bool include_regularization = false); + virtual void new_pass(); + +private: + std::vector trainable_layers; + double accumulated_sum; + int accumulated_count; +}; + +class Loss_CategoricalCrossentropy : public Loss { +public: + double forward(const std::vector& y_pred, const std::vector& y_true) override; + void backward(std::vector& dvalues, const std::vector& y_true) override; +}; + +class Loss_BinaryCrossentropy : public Loss { +public: + double forward(const std::vector& y_pred, const std::vector& y_true) override; + void backward(std::vector& dvalues, const std::vector& y_true) override; +}; + +class Loss_MeanSquaredError : public Loss { +public: + double forward(const std::vector& y_pred, const std::vector& y_true) override; + void backward(std::vector& dvalues, const std::vector& y_true) override; +}; + +class Loss_MeanAbsoluteError : public Loss { +public: + double forward(const std::vector& y_pred, const std::vector& y_true) override; + void backward(std::vector& dvalues, const std::vector& y_true) override; +}; + +#endif // LOSS_HPP diff --git a/src/Model/Model.cpp b/src/Model/Model.cpp new file mode 100644 index 0000000..1cf4296 --- /dev/null +++ b/src/Model/Model.cpp @@ -0,0 +1,73 @@ +#include "Model/Model.hpp" +#include "ActivationFunction/ActivationFunction.hpp" +#include "Loss/Loss.hpp" +#include "Optimizer/Optimizer.hpp" +#include "Accuracy/Accuracy.hpp" +#include "Layers/Layers.hpp" + +Model::Model() {} + +void Model::add(Layer& layer) { + layers.push_back(&layer); +} + +void Model::set_loss(Loss& loss) { + this->loss = &loss; +} + +void Model::set_optimizer(Optimizer& optimizer) { + this->optimizer = &optimizer; +} + +void Model::set_accuracy(Accuracy& accuracy) { + this->accuracy = &accuracy; +} + +void Model::finalize() { + // Implement finalize method +} + +void Model::train(const std::vector>& X, const std::vector>& y, + int epochs, int batch_size, int print_every, const std::vector>& validation_data) { + // Implement train method +} + +void Model::evaluate(const std::vector>& X_val, const std::vector>& y_val, int batch_size) { + // Implement evaluate method +} + +std::vector> Model::predict(const std::vector>& X, int batch_size) { + // Implement predict method +} + +void Model::save_parameters(const std::string& path) { + // Implement save_parameters method +} + +void Model::load_parameters(const std::string& path) { + // Implement load_parameters method +} + +void Model::save(const std::string& path) { + // Implement save method +} + +void Model::forward(const std::vector>& X, bool training) { + // Implement forward method +} + +void Model::backward(const std::vector>& output, const std::vector>& y) { + // Implement backward method +} + +std::vector>> Model::get_parameters() { + // Implement get_parameters method +} + +void Model::set_parameters(const std::vector>>& parameters) { + // Implement set_parameters method +} + +Model Model::load(const std::string& path) { + // Implement load method +} diff --git a/src/Model/Model.hpp b/src/Model/Model.hpp new file mode 100644 index 0000000..f77fe2e --- /dev/null +++ b/src/Model/Model.hpp @@ -0,0 +1,42 @@ +#pragma once +#include "ActivationFunction/ActivationFunction.hpp" +#include "Loss/Loss.hpp" +#include "Optimizer/Optimizer.hpp" +#include "Accuracy/Accuracy.hpp" +#include "Layers/Layers.hpp" + +class Model { +private: + Loss* loss; + Optimizer* optimizer; + Accuracy* accuracy; + std::vector layers; + +public: + Model(); + + void add(Layer& layer); + void set_loss(Loss& loss); + void set_optimizer(Optimizer& optimizer); + void set_accuracy(Accuracy& accuracy); + void finalize(); + + void train(const std::vector>& X, const std::vector>& y, + int epochs, int batch_size, int print_every, const std::vector>& validation_data); + + void evaluate(const std::vector>& X_val, const std::vector>& y_val, int batch_size); + + std::vector> predict(const std::vector>& X, int batch_size); + + void save_parameters(const std::string& path); + void load_parameters(const std::string& path); + void save(const std::string& path); + + void forward(const std::vector>& X, bool training); + void backward(const std::vector>& output, const std::vector>& y); + + std::vector>> get_parameters(); + void set_parameters(const std::vector>>& parameters); + + static Model load(const std::string& path); +}; diff --git a/src/Optimizer/Optimizer.cpp b/src/Optimizer/Optimizer.cpp new file mode 100644 index 0000000..5bb9584 --- /dev/null +++ b/src/Optimizer/Optimizer.cpp @@ -0,0 +1,43 @@ +#include "Optimizer/Optimizer.hpp" +#include "Layers/Layers.hpp" + +Optimizer_SGD::Optimizer_SGD(double learning_rate, double decay, double momentum) + : learning_rate(learning_rate), + current_learning_rate(learning_rate), + decay(decay), + iterations(0), + momentum(momentum) {} + +void Optimizer_SGD::pre_update_params() { + // pre_update_params implementation +} + +void Optimizer_SGD::update_params(Layer& layer) { + // update_params implementation for any Layer +} + +void Optimizer_SGD::post_update_params() { + iterations++; +} + +Optimizer_Adagrad::Optimizer_Adagrad(double learning_rate, double decay, double epsilon) + : learning_rate(learning_rate), + current_learning_rate(learning_rate), + decay(decay), + iterations(0), + epsilon(epsilon) {} + +void Optimizer_Adagrad::pre_update_params() { + // pre_update_params implementation +} + +void Optimizer_Adagrad::update_params(Layer& layer) { + // update_params implementation for any Layer +} + +void Optimizer_Adagrad::post_update_params() { + iterations++; +} + +// Similar implementations for Optimizer_RMSprop and Optimizer_Adam + diff --git a/src/Optimizer/Optimizer.hpp b/src/Optimizer/Optimizer.hpp new file mode 100644 index 0000000..2143edc --- /dev/null +++ b/src/Optimizer/Optimizer.hpp @@ -0,0 +1,45 @@ +#pragma once +#include "Layers/Layers.hpp" + +class Optimizer { +public: + virtual void pre_update_params() = 0; + virtual void update_params(Layer& layer) = 0; + virtual void post_update_params() = 0; + // Other common members and methods +}; + +class Optimizer_SGD : public Optimizer { +public: + Optimizer_SGD(double learning_rate, double decay, double momentum); + + void pre_update_params() override; + void update_params(Layer& layer) override; + void post_update_params() override; + +private: + double learning_rate; + double current_learning_rate; + double decay; + int iterations; + double momentum; +}; + +class Optimizer_Adagrad : public Optimizer { +public: + Optimizer_Adagrad(double learning_rate, double decay, double epsilon); + + void pre_update_params() override; + void update_params(Layer& layer) override; + void post_update_params() override; + +private: + double learning_rate; + double current_learning_rate; + double decay; + int iterations; + double epsilon; +}; + +// Similar declarations for Optimizer_RMSprop and Optimizer_Adam + diff --git a/src/main.cpp b/src/main.cpp new file mode 100644 index 0000000..e69de29