From 7053fa7b49fff7a20c33ed51a2aeac8c0de1974b Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 03:45:12 +0530 Subject: [PATCH 01/20] OHE doc, Activation functions+doc --- CMakeLists.txt | 4 +- docs/methods/activation_functions.md | 76 +++++++++++++++++++ docs/methods/preprocessing/one_hot_encoder.md | 36 +++++++++ examples/activation_functions_eg.cpp | 41 ++++++++++ .../methods/activation_functions.cpp | 62 +++++++++++++++ .../methods/activation_functions.hpp | 30 ++++++++ 6 files changed, 248 insertions(+), 1 deletion(-) create mode 100644 docs/methods/activation_functions.md create mode 100644 docs/methods/preprocessing/one_hot_encoder.md create mode 100644 examples/activation_functions_eg.cpp create mode 100644 src/slowmokit/methods/activation_functions.cpp create mode 100644 src/slowmokit/methods/activation_functions.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index b423799..dd3982d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,4 +60,6 @@ add_library(slowmokit src/slowmokit/methods/metrics/recall.hpp src/slowmokit/methods/metrics/recall.cpp src/slowmokit/methods/metrics/mean_squared_error.hpp - src/slowmokit/methods/metrics/mean_squared_error.cpp) + src/slowmokit/methods/metrics/mean_squared_error.cpp + src/slowmokit/methods/activation_functions.cpp + src/slowmokit/methods/activation_functions.hpp) diff --git a/docs/methods/activation_functions.md b/docs/methods/activation_functions.md new file mode 100644 index 0000000..987b6d1 --- /dev/null +++ b/docs/methods/activation_functions.md @@ -0,0 +1,76 @@ +# Activation Functions + +Sigmoid-It is computationally expensive, causes vanishing gradient problem and not zero-centred. This method is generally used for binary classification problems. + +tanh- The Tanh activation function is a hyperbolic tangent sigmoid function that has a range of -1 to 1. It is often used in deep learning models for its ability to model nonlinear boundaries + +tan-1h-The inverse of tanh.The ArcTan function is a sigmoid function to model accelerating and decelerating outputs but with useful output ranges. + +ReLU-This The ReLU activation function returns 0 if the input value to the function is less than 0 but for any positive input, the output is the same as the input. It is also continuous but non-differentiable at 0 and at values less than 0 because its derivative is 0 for any negative input. + +leakyReLU- With Leaky ReLU there is a small negative slope so instead of that firing at all, for large gradients, our neurons do output some value and that makes our layer much more optimized too. + +softmax-The softmax is a more generalised form of the sigmoid. It is used in multi-class classification problems. Similar to sigmoid, it produces values in the range of 0–1 therefore it is used as the final layer in classification models. + +binaryStep-The Step activation function is used in the perceptron network. This is usually used in single-layer networks to convert to an output that is binary (0 or 1).These are called Binary Step Function. + + + +## Parameters + +| Name | Definition | Type | +|--------------|--------------------------------------------|--------------| +| x | double value on which the function is applied. | `double` | + + +## Methods + +| Name | Definition | Return value | +|----------------------------------------|-----------------------------------------------|---------------| +| y | double value after putting x in the functions gets returned. | `double` | + +## Example + +``` +int main(){ + //sigmoid example +double x = 1.0; +double y = sigmoid(x); +std::cout << "sigmoid(" << x << ") = " << y << std::endl; + + //tanh example +double x = -1.0; +double y = tanh(x); +std::cout << "tanh(" << x << ") = " << y << std::endl; + + //tan inverse example +double x = 0.0; +double y = arctan(x); +std::cout << "arctan(" << x << ") = " << y << std::endl; + + //ReLU example +double x = 1.0; +double y = ReLU(x); +std::cout << "ReLU(" << x << ") = " << y << std::endl; + + //leakyReLU example +double x = -1.0; +double alpha = 0.01; +y = leakyReLU(x, alpha); +std::cout << "leakyReLU(" << x << ", " << alpha << ") = " << y << std::endl; + + //binaryStep example +double x = 1.0; +double y = binaryStep(x); +std::cout << "binaryStep(" << x << ") = " << y << std::endl; + + //softmax example +std::vector x = {1, 2, 3}; +std::vector result = softmax(x); + for (double value : result) { + std::cout << value << " "; + } + return 0; + +} +``` \ No newline at end of file diff --git a/docs/methods/preprocessing/one_hot_encoder.md b/docs/methods/preprocessing/one_hot_encoder.md new file mode 100644 index 0000000..67ddbeb --- /dev/null +++ b/docs/methods/preprocessing/one_hot_encoder.md @@ -0,0 +1,36 @@ +# One Hot Encoder + +One hot encoding is a technique to represent categorical variables as numerical values.Each unique value of a categorical variable is assigned a binary code, where a "1" in the code represents the presence of that value and a "0" represents its absence. + +One hot encoding makes our training data more useful and expressive, and it can be rescaled easily. + + +## Parameters + +| Name | Definition | Type | +|--------------|--------------------------------------------|--------------| +| data | The data that has to be encoded is passed as the data parameter in the oneHotEncoder function. | `vector` | + + +## Methods + +| Name | Definition | Return value | +|----------------------------------------|-----------------------------------------------|---------------| +| `oneHotEncoder(vector data, nClasses)` | To encode the data into numerical values. | `vector` | + +## Example + +``` +int main() { + std::vector data = {"apples", "banana", "mango", "pear", "mango","apples","pear"}; + int nClasses = 4; + std::vector> oneHotEncodedData = oneHotEncoder(data, nClasses); + for (const auto &row : oneHotEncodedData) { + for (const auto &column : row) { + std::cout << column << " "; + } + std::cout << std::endl; + } + return 0; +} +``` \ No newline at end of file diff --git a/examples/activation_functions_eg.cpp b/examples/activation_functions_eg.cpp new file mode 100644 index 0000000..0d65a49 --- /dev/null +++ b/examples/activation_functions_eg.cpp @@ -0,0 +1,41 @@ +//int main(){ + //sigmoid example +//double x = 1.0; +//double y = sigmoid(x); +//std::cout << "sigmoid(" << x << ") = " << y << std::endl; + + //tanh example +//double x = -1.0; +//double y = tanh(x); +//std::cout << "tanh(" << x << ") = " << y << std::endl; + + //tan inverse example +//double x = 0.0; +//double y = arctan(x); +//std::cout << "arctan(" << x << ") = " << y << std::endl; + + //ReLU example +//double x = 1.0; +//double y = ReLU(x); +//std::cout << "ReLU(" << x << ") = " << y << std::endl; + + //leakyReLU example +//double x = -1.0; +//double alpha = 0.01; +//y = leakyReLU(x, alpha); +//std::cout << "leakyReLU(" << x << ", " << alpha << ") = " << y << std::endl; + + //binaryStep example +//double x = 1.0; +//double y = binaryStep(x); +//std::cout << "binaryStep(" << x << ") = " << y << std::endl; + + //softmax example +//std::vector x = {1, 2, 3}; +//std::vector result = softmax(x); +// for (double value : result) { +// std::cout << value << " "; +// } +// return 0; + +//} \ No newline at end of file diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp new file mode 100644 index 0000000..a14a506 --- /dev/null +++ b/src/slowmokit/methods/activation_functions.cpp @@ -0,0 +1,62 @@ +/** + * @file methods/activation_functions.cpp + * + * Implementation of activation functions + */ +#include "activation_functions.hpp" +template +#include +//sigmoid +double sigmoid(double x) { + return 1 / (1 + exp(-x)); +} +//ReLU +double ReLU(double x) { + if (x > 0) { + return x; + } else { + return 0; + } +} +//tanh +double tanh(double x) { + double result = (exp(x) - exp(-x)) / (exp(x) + exp(-x)); + return result; +} +//tan inverse +double arctan(double x) { + return atan(x); +} + +//softmax +std::vector softmax(const std::vector &x) { + std::vector result(x.size()); + double sum = 0; + for (double value : x) { + sum += exp(value); + } + for (int i = 0; i < x.size(); i++) { + result[i] = exp(x[i]) / sum; + } + return result; +} +//binary step +double binaryStep(double x) { + if (x >= 0) { + return 1; //assuming threshold value to be 0 here + } else { + return 0; + } +} +//leaky ReLU +double leakyReLU(double x, double alpha) { + if (x >= 0) { + return x; + } else { + return alpha * x; + } +} + + + + diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp new file mode 100644 index 0000000..f174253 --- /dev/null +++ b/src/slowmokit/methods/activation_functions.hpp @@ -0,0 +1,30 @@ +/** + * @file methods/activation_functions.hpp + * + * Easy include to add non-linearity into a neural network. + */ + +#ifndef ACTIVATION_FUNCTIONS_HPP +#define ACTIVATION_FUNCTIONS_HPP +#include "../../core.hpp" +#include + +/** + * @param x {double x} - double value on which the function is applied. + * + * @param x {vector} - vector containing 'double' values of x for softmax activation function implementation. + * + * @return {double value} - double value after putting x in the functions gets returned. + */ + +template + +double sigmoid(double x); +double tanh(double x); +double ReLU(double x); +double leakyReLU(double x, double alpha); +std::vector softmax(const std::vector &x); +double arctan(double x); +double binaryStep(double x); + +#endif // ACTIVATION_FUNCTIONS_HPP \ No newline at end of file From 56af84e4beef139333a4a2ad93ed236c98dd7831 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 12:20:37 +0530 Subject: [PATCH 02/20] updated --- src/slowmokit/methods/activation_functions.cpp | 11 +++++------ src/slowmokit/methods/activation_functions.hpp | 18 +++++++++--------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index a14a506..742dce6 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -5,10 +5,9 @@ */ #include "activation_functions.hpp" template -#include //sigmoid double sigmoid(double x) { - return 1 / (1 + exp(-x)); + return 1 / (1 + std::exp(-x)); } //ReLU double ReLU(double x) { @@ -20,12 +19,12 @@ double ReLU(double x) { } //tanh double tanh(double x) { - double result = (exp(x) - exp(-x)) / (exp(x) + exp(-x)); + double result = (std::exp(x) - std::exp(-x)) / (std::exp(x) + std::exp(-x)); return result; } //tan inverse double arctan(double x) { - return atan(x); + return std::atan(x); } //softmax @@ -33,10 +32,10 @@ std::vector softmax(const std::vector &x) { std::vector result(x.size()); double sum = 0; for (double value : x) { - sum += exp(value); + sum += std::exp(value); } for (int i = 0; i < x.size(); i++) { - result[i] = exp(x[i]) / sum; + result[i] = std::exp(x[i]) / sum; } return result; } diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index f174253..a63f9de 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -6,8 +6,8 @@ #ifndef ACTIVATION_FUNCTIONS_HPP #define ACTIVATION_FUNCTIONS_HPP -#include "../../core.hpp" -#include +#include "../core.hpp" + /** * @param x {double x} - double value on which the function is applied. @@ -19,12 +19,12 @@ template -double sigmoid(double x); -double tanh(double x); -double ReLU(double x); -double leakyReLU(double x, double alpha); -std::vector softmax(const std::vector &x); -double arctan(double x); -double binaryStep(double x); +double sigmoid(double); +double tanh(double); +double ReLU(double); +double leakyReLU(double, double); +std::vector softmax(const std::vector &); +double arctan(double); +double binaryStep(double); #endif // ACTIVATION_FUNCTIONS_HPP \ No newline at end of file From 5eebc29054fab6686e728aca29e64e1c53dd7a8c Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 13:12:39 +0530 Subject: [PATCH 03/20] updated --- docs/methods/activation_functions.md | 2 +- docs/methods/preprocessing/one_hot_encoder.md | 4 +- .../methods/activation_functions.cpp | 6 +- .../methods/activation_functions.hpp | 55 ++++++++++++++++--- 4 files changed, 50 insertions(+), 17 deletions(-) diff --git a/docs/methods/activation_functions.md b/docs/methods/activation_functions.md index 987b6d1..de2e2f3 100644 --- a/docs/methods/activation_functions.md +++ b/docs/methods/activation_functions.md @@ -31,7 +31,7 @@ binaryStep-The Step activation function is used in the perceptron network. This ## Example -``` +```cpp int main(){ //sigmoid example double x = 1.0; diff --git a/docs/methods/preprocessing/one_hot_encoder.md b/docs/methods/preprocessing/one_hot_encoder.md index 67ddbeb..dd20ce6 100644 --- a/docs/methods/preprocessing/one_hot_encoder.md +++ b/docs/methods/preprocessing/one_hot_encoder.md @@ -10,7 +10,7 @@ One hot encoding makes our training data more useful and expressive, and it can | Name | Definition | Type | |--------------|--------------------------------------------|--------------| | data | The data that has to be encoded is passed as the data parameter in the oneHotEncoder function. | `vector` | - +| nClasses | This parameter is an integer that specifies the number of classes or categories in the input data. | `int` | ## Methods @@ -20,7 +20,7 @@ One hot encoding makes our training data more useful and expressive, and it can ## Example -``` +```cpp int main() { std::vector data = {"apples", "banana", "mango", "pear", "mango","apples","pear"}; int nClasses = 4; diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index 742dce6..61738b9 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -54,8 +54,4 @@ double leakyReLU(double x, double alpha) { } else { return alpha * x; } -} - - - - +} \ No newline at end of file diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index a63f9de..3c72951 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -7,24 +7,61 @@ #ifndef ACTIVATION_FUNCTIONS_HPP #define ACTIVATION_FUNCTIONS_HPP #include "../core.hpp" - - +template /** - * @param x {double x} - double value on which the function is applied. - * - * @param x {vector} - vector containing 'double' values of x for softmax activation function implementation. - * - * @return {double value} - double value after putting x in the functions gets returned. + * @brief To calculate sigmoid(x) + * @param x: Number whose sigmoid value is to be calculated + * @return a double value representing sigmoid(x) */ +double sigmoid(double); -template +/** + * @brief To calculate tan(x) + * @param x: Number whose tan value is to be calculated + * @return a double value representing tan(x) + */ -double sigmoid(double); double tanh(double); + +/** + * @brief To calculate ReLU(x) + * @param x: Number whose ReLU value is to be calculated + * @return a double value representing ReLU(x) + */ + double ReLU(double); + +/** + * @brief To calculate leakyReLU(x) + * @param x: Number whose leakyReLU value is to be calculated + * @return a double value representing leakyReLU(x) + */ + double leakyReLU(double, double); + +/** + * @brief To calculate softmax(x) + * @param x {vector} - vector containing 'double' values of x whose softmax values have to be calculated. + * + * @return vector containing 'double' values representing softmax(x) + */ + std::vector softmax(const std::vector &); + +/** + * @brief To calculate arctan(x) + * @param x: Number whose tan inverse value is to be calculated + * @return a double value representing arctan(x) + */ + double arctan(double); + +/** + * @brief To calculate binaryStep(x) + * @param x: Number whose binaryStep value is to be calculated + * @return a double value representing binaryStep(x) + */ + double binaryStep(double); #endif // ACTIVATION_FUNCTIONS_HPP \ No newline at end of file From 8184ff7918acb347d53cee31b442b6baf7ac1091 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 14:18:30 +0530 Subject: [PATCH 04/20] Formatted code using clang-format --- .../methods/activation_functions.cpp | 105 ++++++++++-------- .../methods/activation_functions.hpp | 10 +- 2 files changed, 64 insertions(+), 51 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index 742dce6..43a1ccb 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -5,57 +5,68 @@ */ #include "activation_functions.hpp" template -//sigmoid -double sigmoid(double x) { - return 1 / (1 + std::exp(-x)); -} -//ReLU -double ReLU(double x) { - if (x > 0) { - return x; - } else { - return 0; - } +// sigmoid +double sigmoid(double x) +{ + return 1 / (1 + std::exp(-x)); } -//tanh -double tanh(double x) { - double result = (std::exp(x) - std::exp(-x)) / (std::exp(x) + std::exp(-x)); - return result; +// ReLU +double ReLU(double x) +{ + if (x > 0) + { + return x; + } + else + { + return 0; + } } -//tan inverse -double arctan(double x) { - return std::atan(x); +// tanh +double tanh(double x) +{ + double result = (std::exp(x) - std::exp(-x)) / (std::exp(x) + std::exp(-x)); + return result; } +// tan inverse +double arctan(double x) { return std::atan(x); } -//softmax -std::vector softmax(const std::vector &x) { - std::vector result(x.size()); - double sum = 0; - for (double value : x) { - sum += std::exp(value); - } - for (int i = 0; i < x.size(); i++) { - result[i] = std::exp(x[i]) / sum; - } - return result; +// softmax +std::vector softmax(const std::vector &x) +{ + std::vector result(x.size()); + double sum = 0; + for (double value : x) + { + sum += std::exp(value); + } + for (int i = 0; i < x.size(); i++) + { + result[i] = std::exp(x[i]) / sum; + } + return result; } -//binary step -double binaryStep(double x) { - if (x >= 0) { - return 1; //assuming threshold value to be 0 here - } else { - return 0; - } +// binary step +double binaryStep(double x) +{ + if (x >= 0) + { + return 1; // assuming threshold value to be 0 here + } + else + { + return 0; + } } -//leaky ReLU -double leakyReLU(double x, double alpha) { - if (x >= 0) { - return x; - } else { - return alpha * x; - } +// leaky ReLU +double leakyReLU(double x, double alpha) +{ + if (x >= 0) + { + return x; + } + else + { + return alpha * x; + } } - - - - diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index a63f9de..37931ee 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -11,10 +11,12 @@ /** * @param x {double x} - double value on which the function is applied. - * - * @param x {vector} - vector containing 'double' values of x for softmax activation function implementation. - * - * @return {double value} - double value after putting x in the functions gets returned. + * + * @param x {vector} - vector containing 'double' values of x for + * softmax activation function implementation. + * + * @return {double value} - double value after putting x in the functions gets + * returned. */ template From 4111d842ee401966b4845d037f71e79ac2c66c9d Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 17:08:27 +0530 Subject: [PATCH 05/20] updated --- docs/methods/activation_functions.md | 13 ++++++++++--- docs/methods/preprocessing/one_hot_encoder.md | 4 ++-- src/slowmokit/methods/activation_functions.cpp | 4 ++-- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/docs/methods/activation_functions.md b/docs/methods/activation_functions.md index de2e2f3..654b7db 100644 --- a/docs/methods/activation_functions.md +++ b/docs/methods/activation_functions.md @@ -4,7 +4,7 @@ Sigmoid-It is computationally expensive, causes vanishing gradient problem and n tanh- The Tanh activation function is a hyperbolic tangent sigmoid function that has a range of -1 to 1. It is often used in deep learning models for its ability to model nonlinear boundaries -tan-1h-The inverse of tanh.The ArcTan function is a sigmoid function to model accelerating and decelerating outputs but with useful output ranges. +tan-1h-The ArcTan function is a sigmoid function to model accelerating and decelerating outputs but with useful output ranges.This activation function maps the input values in the range (−π/2,π/2). Its derivative converges quadratically against 0 for large input values. ReLU-This The ReLU activation function returns 0 if the input value to the function is less than 0 but for any positive input, the output is the same as the input. It is also continuous but non-differentiable at 0 and at values less than 0 because its derivative is 0 for any negative input. @@ -23,11 +23,18 @@ binaryStep-The Step activation function is used in the perceptron network. This | x | double value on which the function is applied. | `double` | -## Methods +## Functions | Name | Definition | Return value | |----------------------------------------|-----------------------------------------------|---------------| -| y | double value after putting x in the functions gets returned. | `double` | +|sigmoid(x)| 1 / (1 + e^(-x)) | `double` | +|tanh(x)| (e^x - e^(-x)) / (e^x + e^(-x)) | `double` | +|arctan(x)| the inverse of tan(x) | `double` | +|ReLU(x)| max(0, x) | `double` | +|leakyReLU(x)| max(αx, x),α=0.1 | `double` | +|binaryStep(x)| 0, if x < 0 + 1, if x ≥ 0 | `double` | +|softmax(x)| e^(x_i) / Σ_j e^(x_j) | vector | ## Example diff --git a/docs/methods/preprocessing/one_hot_encoder.md b/docs/methods/preprocessing/one_hot_encoder.md index dd20ce6..92bcb1c 100644 --- a/docs/methods/preprocessing/one_hot_encoder.md +++ b/docs/methods/preprocessing/one_hot_encoder.md @@ -9,14 +9,14 @@ One hot encoding makes our training data more useful and expressive, and it can | Name | Definition | Type | |--------------|--------------------------------------------|--------------| -| data | The data that has to be encoded is passed as the data parameter in the oneHotEncoder function. | `vector` | +| data | The data that has to be encoded is passed as the data parameter in the oneHotEncoder function. | `vector` | | nClasses | This parameter is an integer that specifies the number of classes or categories in the input data. | `int` | ## Methods | Name | Definition | Return value | |----------------------------------------|-----------------------------------------------|---------------| -| `oneHotEncoder(vector data, nClasses)` | To encode the data into numerical values. | `vector` | +| `oneHotEncoder(vector data, nClasses)` | To encode the data into numerical values. | `vector>` | ## Example diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index 61738b9..3b1235c 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -48,10 +48,10 @@ double binaryStep(double x) { } } //leaky ReLU -double leakyReLU(double x, double alpha) { +double leakyReLU(double x) { if (x >= 0) { return x; } else { - return alpha * x; + return 0.1 * x; //alpha=0.1 } } \ No newline at end of file From 5015e462dd9619a8ca6b08b9684980829c0a1bb8 Mon Sep 17 00:00:00 2001 From: NandiniGera <97438986+NandiniGera@users.noreply.github.com> Date: Sun, 12 Feb 2023 17:26:02 +0530 Subject: [PATCH 06/20] Update src/slowmokit/methods/activation_functions.hpp Co-authored-by: Ishwarendra Jha <75680424+Ishwarendra@users.noreply.github.com> --- src/slowmokit/methods/activation_functions.hpp | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 8408817..671ea39 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -16,15 +16,6 @@ template double sigmoid(double); /** -<<<<<<< HEAD - * @param x {double x} - double value on which the function is applied. - * - * @param x {vector} - vector containing 'double' values of x for - * softmax activation function implementation. - * - * @return {double value} - double value after putting x in the functions gets - * returned. -======= * @brief To calculate tan(x) * @param x: Number whose tan value is to be calculated * @return a double value representing tan(x) From 357ee7ba8cd4f03a304c62b3157608e6230dba12 Mon Sep 17 00:00:00 2001 From: NandiniGera <97438986+NandiniGera@users.noreply.github.com> Date: Sun, 12 Feb 2023 17:26:32 +0530 Subject: [PATCH 07/20] Update src/slowmokit/methods/activation_functions.hpp Co-authored-by: Ishwarendra Jha <75680424+Ishwarendra@users.noreply.github.com> --- src/slowmokit/methods/activation_functions.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 671ea39..3c72951 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -19,7 +19,6 @@ double sigmoid(double); * @brief To calculate tan(x) * @param x: Number whose tan value is to be calculated * @return a double value representing tan(x) ->>>>>>> 5eebc29054fab6686e728aca29e64e1c53dd7a8c */ double tanh(double); From c26aba77f03d2ad60dd06661e7f9314fb4e60edc Mon Sep 17 00:00:00 2001 From: NandiniGera <97438986+NandiniGera@users.noreply.github.com> Date: Sun, 12 Feb 2023 17:26:46 +0530 Subject: [PATCH 08/20] Update src/slowmokit/methods/activation_functions.cpp Co-authored-by: Ishwarendra Jha <75680424+Ishwarendra@users.noreply.github.com> --- src/slowmokit/methods/activation_functions.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index be707f1..5e6fd34 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -58,7 +58,6 @@ double binaryStep(double x) return 0; } } -<<<<<<< HEAD // leaky ReLU double leakyReLU(double x, double alpha) { From c7438e600416e2351eb11f37e2e3a21e341547b3 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 17:32:33 +0530 Subject: [PATCH 09/20] updated --- src/slowmokit/methods/activation_functions.cpp | 16 +--------------- src/slowmokit/methods/activation_functions.hpp | 9 --------- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index be707f1..f4be7ef 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -58,20 +58,6 @@ double binaryStep(double x) return 0; } } -<<<<<<< HEAD -// leaky ReLU -double leakyReLU(double x, double alpha) -{ - if (x >= 0) - { - return x; - } - else - { - return alpha * x; - } -} -======= //leaky ReLU double leakyReLU(double x) { if (x >= 0) { @@ -80,4 +66,4 @@ double leakyReLU(double x) { return 0.1 * x; //alpha=0.1 } } ->>>>>>> 5eebc29054fab6686e728aca29e64e1c53dd7a8c + diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 8408817..671ea39 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -16,15 +16,6 @@ template double sigmoid(double); /** -<<<<<<< HEAD - * @param x {double x} - double value on which the function is applied. - * - * @param x {vector} - vector containing 'double' values of x for - * softmax activation function implementation. - * - * @return {double value} - double value after putting x in the functions gets - * returned. -======= * @brief To calculate tan(x) * @param x: Number whose tan value is to be calculated * @return a double value representing tan(x) From 41a1f6eb88d96915ae3881543101f89479ff68f3 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 18:09:43 +0530 Subject: [PATCH 10/20] Formatted code using clang-format --- .../methods/activation_functions.cpp | 20 +++++++++++-------- .../methods/activation_functions.hpp | 5 +++-- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index e3cb1ec..ce75930 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -71,13 +71,17 @@ double leakyReLU(double x, double alpha) return alpha * x; } } -======= -//leaky ReLU -double leakyReLU(double x, double alpha) { - if (x >= 0) { - return x; - } else { - return alpha * x; - } +== == == = + // leaky ReLU + double leakyReLU(double x, double alpha) +{ + if (x >= 0) + { + return x; + } + else + { + return alpha * x; + } } >>>>>>> 5eebc29054fab6686e728aca29e64e1c53dd7a8c diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 8408817..d1e63cf 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -51,8 +51,9 @@ double leakyReLU(double, double); /** * @brief To calculate softmax(x) - * @param x {vector} - vector containing 'double' values of x whose softmax values have to be calculated. - * + * @param x {vector} - vector containing 'double' values of x whose + * softmax values have to be calculated. + * * @return vector containing 'double' values representing softmax(x) */ From 110d2f634c3bc09974a6e2a331cf63d197941095 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 20:16:03 +0530 Subject: [PATCH 11/20] updated all changes --- docs/methods/activation_functions.md | 77 ++++++++++------ examples/activation_functions_eg.cpp | 72 ++++++++++----- .../methods/activation_functions.cpp | 87 +++++++++++-------- .../methods/activation_functions.hpp | 54 +++++++----- 4 files changed, 183 insertions(+), 107 deletions(-) diff --git a/docs/methods/activation_functions.md b/docs/methods/activation_functions.md index 654b7db..99b3478 100644 --- a/docs/methods/activation_functions.md +++ b/docs/methods/activation_functions.md @@ -15,7 +15,7 @@ softmax-The softmax is a more generalised form of the sigmoid. It is used in mul binaryStep-The Step activation function is used in the perceptron network. This is usually used in single-layer networks to convert to an output that is binary (0 or 1).These are called Binary Step Function. - +The following function definitions are defined for a single variable for making it comprehendible and are implemented for a vector of 'double' values in the original code. ## Parameters | Name | Definition | Type | @@ -34,48 +34,75 @@ binaryStep-The Step activation function is used in the perceptron network. This |leakyReLU(x)| max(αx, x),α=0.1 | `double` | |binaryStep(x)| 0, if x < 0 1, if x ≥ 0 | `double` | -|softmax(x)| e^(x_i) / Σ_j e^(x_j) | vector | +|softmax(x)| e^(x_i) / Σ_j e^(x_j) | `double` | + ## Example ```cpp int main(){ //sigmoid example -double x = 1.0; -double y = sigmoid(x); -std::cout << "sigmoid(" << x << ") = " << y << std::endl; +std::vector x = {-1,0,1}; +std::vector y = sigmoid(x); + for (int i = 0; i < y.size(); i++) { + std::cout << y[i] << " "; + } + return 0; + +} //tanh example -double x = -1.0; -double y = tanh(x); -std::cout << "tanh(" << x << ") = " << y << std::endl; +std::vector x = {-1,0,1}; +std::vector y = tanh(x); + for (int i = 0; i < y.size(); i++) { + std::cout << y[i] << " "; + } + return 0; + +} - //tan inverse example -double x = 0.0; -double y = arctan(x); -std::cout << "arctan(" << x << ") = " << y << std::endl; + //arctan example +std::vector x = {-1,0,1}; +std::vector y = arctan(x); + for (int i = 0; i < y.size(); i++) { + std::cout << y[i] << " "; + } + return 0; +} //ReLU example -double x = 1.0; -double y = ReLU(x); -std::cout << "ReLU(" << x << ") = " << y << std::endl; +std::vector x = {1, 2, 3}; +std::vector y = ReLU(x); + for (int i = 0; i < y.size(); i++) { + std::cout << y[i] << " "; + } + return 0; + +} //leakyReLU example -double x = -1.0; -double alpha = 0.01; -y = leakyReLU(x, alpha); -std::cout << "leakyReLU(" << x << ", " << alpha << ") = " << y << std::endl; +std::vector x = {1, 2, 3}; +std::vector y = leakyReLU(x); + for (int i = 0; i < y.size(); i++) { + std::cout << y[i] << " "; + } + return 0; +} //binaryStep example -double x = 1.0; -double y = binaryStep(x); -std::cout << "binaryStep(" << x << ") = " << y << std::endl; +std::vector x = {1, 2, 3}; +std::vector y = binaryStep(x); + for (int i = 0; i < y.size(); i++) { + std::cout << y[i] << " "; + } + return 0; +} //softmax example std::vector x = {1, 2, 3}; -std::vector result = softmax(x); - for (double value : result) { - std::cout << value << " "; +std::vector y= softmax(x); + for (int i = 0; i < y.size(); i++) { + std::cout << y[i] << " "; } return 0; diff --git a/examples/activation_functions_eg.cpp b/examples/activation_functions_eg.cpp index 0d65a49..10b87c2 100644 --- a/examples/activation_functions_eg.cpp +++ b/examples/activation_functions_eg.cpp @@ -1,40 +1,66 @@ //int main(){ //sigmoid example -//double x = 1.0; -//double y = sigmoid(x); -//std::cout << "sigmoid(" << x << ") = " << y << std::endl; +//std::vector x = {-1,0,1}; +//std::vector y = sigmoid(x); +// for (int i = 0; i < y.size(); i++) { +// std::cout << y[i] << " "; +// } +// return 0; + +//} //tanh example -//double x = -1.0; -//double y = tanh(x); -//std::cout << "tanh(" << x << ") = " << y << std::endl; +//std::vector x = {-1,0,1}; +//std::vector y = tanh(x); +// for (int i = 0; i < y.size(); i++) { +// std::cout << y[i] << " "; +// } +// return 0; - //tan inverse example -//double x = 0.0; -//double y = arctan(x); -//std::cout << "arctan(" << x << ") = " << y << std::endl; +//} + //arctan example +//std::vector x = {-1,0,1}; +//std::vector y = arctan(x); +// for (int i = 0; i < y.size(); i++) { +// std::cout << y[i] << " "; +// } +// return 0; + +//} //ReLU example -//double x = 1.0; -//double y = ReLU(x); -//std::cout << "ReLU(" << x << ") = " << y << std::endl; +//std::vector x = {1, 2, 3}; +//std::vector y = ReLU(x); +// for (int i = 0; i < y.size(); i++) { +// std::cout << y[i] << " "; +// } +// return 0; + +//} //leakyReLU example -//double x = -1.0; -//double alpha = 0.01; -//y = leakyReLU(x, alpha); -//std::cout << "leakyReLU(" << x << ", " << alpha << ") = " << y << std::endl; +//std::vector x = {1, 2, 3}; +//std::vector y = leakyReLU(x); +// for (int i = 0; i < y.size(); i++) { +// std::cout << y[i] << " "; +// } +// return 0; +//} //binaryStep example -//double x = 1.0; -//double y = binaryStep(x); -//std::cout << "binaryStep(" << x << ") = " << y << std::endl; +//std::vector x = {1, 2, 3}; +//std::vector y = binaryStep(x); +// for (int i = 0; i < y.size(); i++) { +// std::cout << y[i] << " "; +// } +// return 0; +//} //softmax example //std::vector x = {1, 2, 3}; -//std::vector result = softmax(x); -// for (double value : result) { -// std::cout << value << " "; +//std::vector y = softmax(x); +// for (int i = 0; i < y.size(); i++) { +// std::cout << y[i] << " "; // } // return 0; diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index f4be7ef..f142af0 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -6,35 +6,44 @@ #include "activation_functions.hpp" template // sigmoid -double sigmoid(double x) -{ - return 1 / (1 + std::exp(-x)); +std::vector sigmoid(const std::vector &x) { + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) { + y[i] = 1 / (1 + exp(-x[i])); + } + return y; } // ReLU -double ReLU(double x) -{ - if (x > 0) - { - return x; - } - else - { - return 0; - } +std::vector ReLU(const std::vector &x) { + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) { + if(x[i]>0.0) + { y[i]=x[i]; } + else + { y[i]=0.0; } + } + return y; } // tanh -double tanh(double x) -{ - double result = (std::exp(x) - std::exp(-x)) / (std::exp(x) + std::exp(-x)); - return result; +std::vector tanh(const std::vector &x) { + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) { + y[i] = (std::exp(x[i]) - std::exp(-x[i])) / (std::exp(x[i]) + std::exp(-x[i])); + } + return y; +} +// arctan +std::vector arctan(const std::vector &x) { + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) { + y[i] = atan(x[i]); + } + return y; } -// tan inverse -double arctan(double x) { return std::atan(x); } // softmax std::vector softmax(const std::vector &x) -{ - std::vector result(x.size()); +{ std::vector y(x.size()); double sum = 0; for (double value : x) { @@ -42,28 +51,34 @@ std::vector softmax(const std::vector &x) } for (int i = 0; i < x.size(); i++) { - result[i] = std::exp(x[i]) / sum; + y[i] = std::exp(x[i]) / sum; } - return result; + return y; } -// binary step -double binaryStep(double x) -{ - if (x >= 0) +// binarystep +std::vector binaryStep(const std::vector &x) { + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) { + if (x[i] >= 0) { - return 1; // assuming threshold value to be 0 here - } + y[i]=1; }// assuming threshold value to be 0 here else { - return 0; - } + y[i]=0; } + } + return y; } -//leaky ReLU -double leakyReLU(double x) { - if (x >= 0) { - return x; +//leakyReLU +std::vector leakyReLU(const std::vector &x) { + std::vector y(x.size()); + double alpha=0.1; + for (int i = 0; i < x.size(); i++) { + if (x[i] >= 0) { + y[i]=x[i]; } else { - return 0.1 * x; //alpha=0.1 + y[i]= alpha * x[i]; //alpha=0.1 + }; } + return y; } diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 3c72951..99e62d4 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -8,36 +8,42 @@ #define ACTIVATION_FUNCTIONS_HPP #include "../core.hpp" template + /** * @brief To calculate sigmoid(x) - * @param x: Number whose sigmoid value is to be calculated - * @return a double value representing sigmoid(x) + * @param x {vector} - vector containing 'double' values of x whose sigmoid values have to be calculated. + * + * @return vector containing 'double' values representing sigmoid(x) */ -double sigmoid(double); + +std::vector sigmoid(const std::vector &); /** - * @brief To calculate tan(x) - * @param x: Number whose tan value is to be calculated - * @return a double value representing tan(x) + * @brief To calculate ReLU(x) + * @param x {vector} - vector containing 'double' values of x whose ReLU values have to be calculated. + * + * @return vector containing 'double' values representing ReLU(x) */ -double tanh(double); +std::vector ReLU(const std::vector &); /** - * @brief To calculate ReLU(x) - * @param x: Number whose ReLU value is to be calculated - * @return a double value representing ReLU(x) + * @brief To calculate tanh(x) + * @param x {vector} - vector containing 'double' values of x whose tanh values have to be calculated. + * + * @return vector containing 'double' values representing tanh(x) */ -double ReLU(double); +std::vector tanh(const std::vector &); /** - * @brief To calculate leakyReLU(x) - * @param x: Number whose leakyReLU value is to be calculated - * @return a double value representing leakyReLU(x) + * @brief To calculate arctan(x) + * @param x {vector} - vector containing 'double' values of x whose arctan values have to be calculated. + * + * @return vector containing 'double' values representing arctan(x) */ -double leakyReLU(double, double); +std::vector arctan(const std::vector &); /** * @brief To calculate softmax(x) @@ -49,19 +55,21 @@ double leakyReLU(double, double); std::vector softmax(const std::vector &); /** - * @brief To calculate arctan(x) - * @param x: Number whose tan inverse value is to be calculated - * @return a double value representing arctan(x) + * @brief To calculate binaryStep(x) + * @param x {vector} - vector containing 'double' values of x whose binaryStep values have to be calculated. + * + * @return vector containing 'double' values representing binaryStep(x) */ -double arctan(double); +std::vector binaryStep(const std::vector &); /** - * @brief To calculate binaryStep(x) - * @param x: Number whose binaryStep value is to be calculated - * @return a double value representing binaryStep(x) + * @brief To calculate leakyReLU(x) + * @param x {vector} - vector containing 'double' values of x whose leakyReLU values have to be calculated. + * + * @return vector containing 'double' values representing leakyReLU(x) */ -double binaryStep(double); +std::vector leakyReLU(const std::vector &); #endif // ACTIVATION_FUNCTIONS_HPP \ No newline at end of file From b1a9bbaa218d5df30140aa27807b5820e1f1bd85 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 20:43:43 +0530 Subject: [PATCH 12/20] updated all changes --- .../methods/activation_functions.cpp | 26 ++++++++++++++++--- .../methods/activation_functions.hpp | 18 +++++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index f142af0..d8ab6c4 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -63,9 +63,8 @@ std::vector binaryStep(const std::vector &x) { { y[i]=1; }// assuming threshold value to be 0 here else - { - y[i]=0; } - } + { y[i]=0; } + } return y; } //leakyReLU @@ -81,4 +80,23 @@ std::vector leakyReLU(const std::vector &x) { } return y; } - +//binaryToBipolar Conversion +//x= binary numbers entered by the user +//y= bipolar numbers to be produced as output +std::vector binaryToBipolar(const std::vector &x) { + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) { + y[i] = 2 * x[i] - 1; + } + return y; +} +//bipolarToBinary Conversion +//x= bipolar numbers entered by the user +//y= binary numbers to be produced as output +std::vector bipolarToBinary(const std::vector &x) { + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) { + y[i] = (x[i] + 1) / 2; + } + return y; +} \ No newline at end of file diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 99e62d4..27c5199 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -72,4 +72,22 @@ std::vector binaryStep(const std::vector &); std::vector leakyReLU(const std::vector &); +/** + * @brief To convert binaryToBipolar(x) + * @param x {vector} - vector containing 'double' values of x who have to be converted to bipolar numbers. + * + * @return vector containing 'double' values representing binaryToBipolar(x) +*/ + +std::vector binaryToBipolar(const std::vector &); + +/** + * @brief To convert bipolarToBinary(x) + * @param x {vector} - vector containing 'double' values of x who have to be converted to binary numbers. + * + * @return vector containing 'double' values representing bipolarToBinary(x) + */ + +std::vector bipolarToBinary(const std::vector &); + #endif // ACTIVATION_FUNCTIONS_HPP \ No newline at end of file From 7c81c582dbc76428a2be3e1af8a81912e17f4863 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Sun, 12 Feb 2023 20:54:26 +0530 Subject: [PATCH 13/20] Formatted code using clang-format --- .../methods/activation_functions.cpp | 160 ++++++++++-------- .../methods/activation_functions.hpp | 42 +++-- 2 files changed, 119 insertions(+), 83 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index d8ab6c4..c5989cc 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -6,44 +6,58 @@ #include "activation_functions.hpp" template // sigmoid -std::vector sigmoid(const std::vector &x) { - std::vector y(x.size()); - for (int i = 0; i < x.size(); i++) { - y[i] = 1 / (1 + exp(-x[i])); - } - return y; +std::vector sigmoid(const std::vector &x) +{ + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) + { + y[i] = 1 / (1 + exp(-x[i])); + } + return y; } // ReLU -std::vector ReLU(const std::vector &x) { - std::vector y(x.size()); - for (int i = 0; i < x.size(); i++) { - if(x[i]>0.0) - { y[i]=x[i]; } - else - { y[i]=0.0; } +std::vector ReLU(const std::vector &x) +{ + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) + { + if (x[i] > 0.0) + { + y[i] = x[i]; + } + else + { + y[i] = 0.0; } - return y; + } + return y; } // tanh -std::vector tanh(const std::vector &x) { - std::vector y(x.size()); - for (int i = 0; i < x.size(); i++) { - y[i] = (std::exp(x[i]) - std::exp(-x[i])) / (std::exp(x[i]) + std::exp(-x[i])); - } - return y; -} +std::vector tanh(const std::vector &x) +{ + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) + { + y[i] = + (std::exp(x[i]) - std::exp(-x[i])) / (std::exp(x[i]) + std::exp(-x[i])); + } + return y; +} // arctan -std::vector arctan(const std::vector &x) { - std::vector y(x.size()); - for (int i = 0; i < x.size(); i++) { - y[i] = atan(x[i]); - } - return y; +std::vector arctan(const std::vector &x) +{ + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) + { + y[i] = atan(x[i]); + } + return y; } // softmax std::vector softmax(const std::vector &x) -{ std::vector y(x.size()); +{ + std::vector y(x.size()); double sum = 0; for (double value : x) { @@ -56,47 +70,61 @@ std::vector softmax(const std::vector &x) return y; } // binarystep -std::vector binaryStep(const std::vector &x) { - std::vector y(x.size()); - for (int i = 0; i < x.size(); i++) { - if (x[i] >= 0) +std::vector binaryStep(const std::vector &x) +{ + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) { - y[i]=1; }// assuming threshold value to be 0 here - else - { y[i]=0; } - } - return y; -} -//leakyReLU -std::vector leakyReLU(const std::vector &x) { - std::vector y(x.size()); - double alpha=0.1; - for (int i = 0; i < x.size(); i++) { - if (x[i] >= 0) { - y[i]=x[i]; - } else { - y[i]= alpha * x[i]; //alpha=0.1 - }; + if (x[i] >= 0) + { + y[i] = 1; + } // assuming threshold value to be 0 here + else + { + y[i] = 0; } - return y; + } + return y; } -//binaryToBipolar Conversion -//x= binary numbers entered by the user -//y= bipolar numbers to be produced as output -std::vector binaryToBipolar(const std::vector &x) { - std::vector y(x.size()); - for (int i = 0; i < x.size(); i++) { - y[i] = 2 * x[i] - 1; +// leakyReLU +std::vector leakyReLU(const std::vector &x) +{ + std::vector y(x.size()); + double alpha = 0.1; + for (int i = 0; i < x.size(); i++) + { + if (x[i] >= 0) + { + y[i] = x[i]; } - return y; + else + { + y[i] = alpha * x[i]; // alpha=0.1 + }; + } + return y; } -//bipolarToBinary Conversion -//x= bipolar numbers entered by the user -//y= binary numbers to be produced as output -std::vector bipolarToBinary(const std::vector &x) { - std::vector y(x.size()); - for (int i = 0; i < x.size(); i++) { - y[i] = (x[i] + 1) / 2; - } - return y; +// binaryToBipolar Conversion +// x= binary numbers entered by the user +// y= bipolar numbers to be produced as output +std::vector binaryToBipolar(const std::vector &x) +{ + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) + { + y[i] = 2 * x[i] - 1; + } + return y; +} +// bipolarToBinary Conversion +// x= bipolar numbers entered by the user +// y= binary numbers to be produced as output +std::vector bipolarToBinary(const std::vector &x) +{ + std::vector y(x.size()); + for (int i = 0; i < x.size(); i++) + { + y[i] = (x[i] + 1) / 2; + } + return y; } \ No newline at end of file diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 6b7e36c..0ae5628 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -11,8 +11,9 @@ template /** * @brief To calculate sigmoid(x) - * @param x {vector} - vector containing 'double' values of x whose sigmoid values have to be calculated. - * + * @param x {vector} - vector containing 'double' values of x whose + * sigmoid values have to be calculated. + * * @return vector containing 'double' values representing sigmoid(x) */ @@ -20,8 +21,9 @@ std::vector sigmoid(const std::vector &); /** * @brief To calculate ReLU(x) - * @param x {vector} - vector containing 'double' values of x whose ReLU values have to be calculated. - * + * @param x {vector} - vector containing 'double' values of x whose ReLU + * values have to be calculated. + * * @return vector containing 'double' values representing ReLU(x) */ @@ -29,8 +31,9 @@ std::vector ReLU(const std::vector &); /** * @brief To calculate tanh(x) - * @param x {vector} - vector containing 'double' values of x whose tanh values have to be calculated. - * + * @param x {vector} - vector containing 'double' values of x whose tanh + * values have to be calculated. + * * @return vector containing 'double' values representing tanh(x) */ @@ -38,8 +41,9 @@ std::vector tanh(const std::vector &); /** * @brief To calculate arctan(x) - * @param x {vector} - vector containing 'double' values of x whose arctan values have to be calculated. - * + * @param x {vector} - vector containing 'double' values of x whose + * arctan values have to be calculated. + * * @return vector containing 'double' values representing arctan(x) */ @@ -57,8 +61,9 @@ std::vector softmax(const std::vector &); /** * @brief To calculate binaryStep(x) - * @param x {vector} - vector containing 'double' values of x whose binaryStep values have to be calculated. - * + * @param x {vector} - vector containing 'double' values of x whose + * binaryStep values have to be calculated. + * * @return vector containing 'double' values representing binaryStep(x) */ @@ -66,8 +71,9 @@ std::vector binaryStep(const std::vector &); /** * @brief To calculate leakyReLU(x) - * @param x {vector} - vector containing 'double' values of x whose leakyReLU values have to be calculated. - * + * @param x {vector} - vector containing 'double' values of x whose + * leakyReLU values have to be calculated. + * * @return vector containing 'double' values representing leakyReLU(x) */ @@ -75,17 +81,19 @@ std::vector leakyReLU(const std::vector &); /** * @brief To convert binaryToBipolar(x) - * @param x {vector} - vector containing 'double' values of x who have to be converted to bipolar numbers. - * + * @param x {vector} - vector containing 'double' values of x who have + * to be converted to bipolar numbers. + * * @return vector containing 'double' values representing binaryToBipolar(x) -*/ + */ std::vector binaryToBipolar(const std::vector &); /** * @brief To convert bipolarToBinary(x) - * @param x {vector} - vector containing 'double' values of x who have to be converted to binary numbers. - * + * @param x {vector} - vector containing 'double' values of x who have + * to be converted to binary numbers. + * * @return vector containing 'double' values representing bipolarToBinary(x) */ From 3e8af8ef743534911ad61e95a2d2a1f123021b93 Mon Sep 17 00:00:00 2001 From: Ishwarendra Jha <75680424+Ishwarendra@users.noreply.github.com> Date: Tue, 14 Feb 2023 21:28:11 +0530 Subject: [PATCH 14/20] fixed bracket issue --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index afbf43f..9889f7f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,6 +65,6 @@ add_library(slowmokit src/slowmokit/methods/metrics/mean_squared_error.hpp src/slowmokit/methods/metrics/mean_squared_error.cpp src/slowmokit/methods/activation_functions.cpp - src/slowmokit/methods/activation_functions.hpp) + src/slowmokit/methods/activation_functions.hpp src/slowmokit/methods/metrics/silhouette_score.cpp src/slowmokit/methods/metrics/silhouette_score.hpp) From eaa4e3aa9617c92c57fd587458703cf74a4ecbf4 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Wed, 15 Feb 2023 14:52:40 +0530 Subject: [PATCH 15/20] updated --- docs/methods/activation_functions.md | 81 ++++++++++--------- examples/activation_functions_eg.cpp | 67 ++++++++------- .../methods/activation_functions.cpp | 66 +++++---------- .../methods/activation_functions.hpp | 41 +++++----- 4 files changed, 123 insertions(+), 132 deletions(-) diff --git a/docs/methods/activation_functions.md b/docs/methods/activation_functions.md index 99b3478..8dd301f 100644 --- a/docs/methods/activation_functions.md +++ b/docs/methods/activation_functions.md @@ -43,66 +43,73 @@ The following function definitions are defined for a single variable for making int main(){ //sigmoid example std::vector x = {-1,0,1}; -std::vector y = sigmoid(x); - for (int i = 0; i < y.size(); i++) { - std::cout << y[i] << " "; - } + sigmoid(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; + } return 0; - -} - + //tanh example std::vector x = {-1,0,1}; -std::vector y = tanh(x); - for (int i = 0; i < y.size(); i++) { - std::cout << y[i] << " "; + tanh(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; } return 0; -} - //arctan example std::vector x = {-1,0,1}; -std::vector y = arctan(x); - for (int i = 0; i < y.size(); i++) { - std::cout << y[i] << " "; - } - return 0; + arctan(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; + } + return 0; -} //ReLU example std::vector x = {1, 2, 3}; -std::vector y = ReLU(x); - for (int i = 0; i < y.size(); i++) { - std::cout << y[i] << " "; + ReLU(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; } return 0; -} - //leakyReLU example std::vector x = {1, 2, 3}; -std::vector y = leakyReLU(x); - for (int i = 0; i < y.size(); i++) { - std::cout << y[i] << " "; + leakyReLU(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; } return 0; -} - + //binaryStep example std::vector x = {1, 2, 3}; -std::vector y = binaryStep(x); - for (int i = 0; i < y.size(); i++) { - std::cout << y[i] << " "; - } - return 0; -} + binaryStep(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; + } + return 0; //softmax example std::vector x = {1, 2, 3}; -std::vector y= softmax(x); - for (int i = 0; i < y.size(); i++) { - std::cout << y[i] << " "; + softmax(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; + } + return 0; + + //binaryToBipolar example +std::vector x = {-1,1}; + binaryToBipolar(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; + } + return 0; + + //bipolarToBinary example +std::vector x = {-1,1}; + bipolarToBinary(x); + for (int i = 0; i < x.size(); i++) { + std::cout << x[i] << " "; } return 0; diff --git a/examples/activation_functions_eg.cpp b/examples/activation_functions_eg.cpp index 10b87c2..33d9173 100644 --- a/examples/activation_functions_eg.cpp +++ b/examples/activation_functions_eg.cpp @@ -1,66 +1,73 @@ //int main(){ //sigmoid example //std::vector x = {-1,0,1}; -//std::vector y = sigmoid(x); -// for (int i = 0; i < y.size(); i++) { -// std::cout << y[i] << " "; +// sigmoid(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; // } // return 0; -//} - //tanh example //std::vector x = {-1,0,1}; -//std::vector y = tanh(x); -// for (int i = 0; i < y.size(); i++) { -// std::cout << y[i] << " "; +// tanh(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; // } // return 0; -//} - //arctan example //std::vector x = {-1,0,1}; -//std::vector y = arctan(x); -// for (int i = 0; i < y.size(); i++) { -// std::cout << y[i] << " "; +// arctan(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; // } // return 0; -//} //ReLU example //std::vector x = {1, 2, 3}; -//std::vector y = ReLU(x); -// for (int i = 0; i < y.size(); i++) { -// std::cout << y[i] << " "; +// ReLU(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; // } // return 0; -//} - //leakyReLU example //std::vector x = {1, 2, 3}; -//std::vector y = leakyReLU(x); -// for (int i = 0; i < y.size(); i++) { -// std::cout << y[i] << " "; +// leakyReLU(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; // } // return 0; -//} //binaryStep example //std::vector x = {1, 2, 3}; -//std::vector y = binaryStep(x); -// for (int i = 0; i < y.size(); i++) { -// std::cout << y[i] << " "; +// binaryStep(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; // } // return 0; -//} //softmax example //std::vector x = {1, 2, 3}; -//std::vector y = softmax(x); -// for (int i = 0; i < y.size(); i++) { -// std::cout << y[i] << " "; +// softmax(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; +// } +// return 0; + + //binaryToBipolar example +//std::vector x = {-1,1}; +// binaryToBipolar(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; +// } +// return 0; + + //bipolarToBinary example +//std::vector x = {-1,1}; +// bipolarToBinary(x); +// for (int i = 0; i < x.size(); i++) { +// std::cout << x[i] << " "; // } // return 0; diff --git a/src/slowmokit/methods/activation_functions.cpp b/src/slowmokit/methods/activation_functions.cpp index c5989cc..ef597fd 100644 --- a/src/slowmokit/methods/activation_functions.cpp +++ b/src/slowmokit/methods/activation_functions.cpp @@ -4,60 +4,46 @@ * Implementation of activation functions */ #include "activation_functions.hpp" -template // sigmoid -std::vector sigmoid(const std::vector &x) +void sigmoid(std::vector &x) { - std::vector y(x.size()); for (int i = 0; i < x.size(); i++) { - y[i] = 1 / (1 + exp(-x[i])); + x[i] = 1 / (1 + std::exp(-x[i])); } - return y; } // ReLU -std::vector ReLU(const std::vector &x) +void ReLU(std::vector &x) { - std::vector y(x.size()); for (int i = 0; i < x.size(); i++) { - if (x[i] > 0.0) + if (x[i] <= 0.0) { - y[i] = x[i]; - } - else - { - y[i] = 0.0; + x[i] = 0.0; } } - return y; } // tanh -std::vector tanh(const std::vector &x) +void tanh(std::vector &x) { - std::vector y(x.size()); for (int i = 0; i < x.size(); i++) { - y[i] = + x[i] = (std::exp(x[i]) - std::exp(-x[i])) / (std::exp(x[i]) + std::exp(-x[i])); } - return y; } // arctan -std::vector arctan(const std::vector &x) +void arctan(std::vector &x) { - std::vector y(x.size()); for (int i = 0; i < x.size(); i++) { - y[i] = atan(x[i]); + x[i] = std::atan(x[i]); } - return y; } // softmax -std::vector softmax(const std::vector &x) +void softmax(std::vector &x) { - std::vector y(x.size()); double sum = 0; for (double value : x) { @@ -65,66 +51,56 @@ std::vector softmax(const std::vector &x) } for (int i = 0; i < x.size(); i++) { - y[i] = std::exp(x[i]) / sum; + x[i] = std::exp(x[i]) / sum; } - return y; } // binarystep -std::vector binaryStep(const std::vector &x) +void binaryStep(std::vector &x) { - std::vector y(x.size()); for (int i = 0; i < x.size(); i++) { if (x[i] >= 0) { - y[i] = 1; + x[i] = 1; } // assuming threshold value to be 0 here else { - y[i] = 0; + x[i] = 0; } } - return y; } // leakyReLU -std::vector leakyReLU(const std::vector &x) +void leakyReLU(std::vector &x, double alpha = 0.1) { - std::vector y(x.size()); - double alpha = 0.1; for (int i = 0; i < x.size(); i++) { if (x[i] >= 0) { - y[i] = x[i]; + x[i] = x[i]; } else { - y[i] = alpha * x[i]; // alpha=0.1 + x[i] = alpha * x[i]; // alpha=0.1 }; } - return y; } // binaryToBipolar Conversion // x= binary numbers entered by the user // y= bipolar numbers to be produced as output -std::vector binaryToBipolar(const std::vector &x) +void binaryToBipolar(std::vector &x) { - std::vector y(x.size()); for (int i = 0; i < x.size(); i++) { - y[i] = 2 * x[i] - 1; + x[i] = 2 * x[i] - 1; } - return y; } // bipolarToBinary Conversion // x= bipolar numbers entered by the user // y= binary numbers to be produced as output -std::vector bipolarToBinary(const std::vector &x) +void bipolarToBinary(std::vector &x) { - std::vector y(x.size()); for (int i = 0; i < x.size(); i++) { - y[i] = (x[i] + 1) / 2; + x[i] = (x[i] + 1) / 2; } - return y; } \ No newline at end of file diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 0ae5628..04b32d4 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -14,89 +14,90 @@ template * @param x {vector} - vector containing 'double' values of x whose * sigmoid values have to be calculated. * - * @return vector containing 'double' values representing sigmoid(x) + * @return void, the initial vector x itself gets changed with the new values of the activation function. */ -std::vector sigmoid(const std::vector &); +void sigmoid(std::vector &); /** * @brief To calculate ReLU(x) * @param x {vector} - vector containing 'double' values of x whose ReLU * values have to be calculated. * - * @return vector containing 'double' values representing ReLU(x) + * @return void, the initial vector x itself gets changed with the new values of the activation function. */ -std::vector ReLU(const std::vector &); +void ReLU(std::vector &); /** * @brief To calculate tanh(x) * @param x {vector} - vector containing 'double' values of x whose tanh * values have to be calculated. * - * @return vector containing 'double' values representing tanh(x) + * @return void, the initial vector x itself gets changed with the new values of the activation function. */ -std::vector tanh(const std::vector &); +void tanh(std::vector &); /** * @brief To calculate arctan(x) * @param x {vector} - vector containing 'double' values of x whose * arctan values have to be calculated. * - * @return vector containing 'double' values representing arctan(x) + * @return void, the initial vector x itself gets changed with the new values of the activation function. */ -std::vector arctan(const std::vector &); +void arctan(std::vector &); /** * @brief To calculate softmax(x) * @param x {vector} - vector containing 'double' values of x whose * softmax values have to be calculated. * - * @return vector containing 'double' values representing softmax(x) + * @return void, the initial vector x itself gets changed with the new values of the activation function. */ -std::vector softmax(const std::vector &); +void softmax(std::vector &); /** * @brief To calculate binaryStep(x) * @param x {vector} - vector containing 'double' values of x whose * binaryStep values have to be calculated. * - * @return vector containing 'double' values representing binaryStep(x) + * @return void, the initial vector x itself gets changed with the new values of the activation function. */ -std::vector binaryStep(const std::vector &); +void binaryStep(std::vector &); /** * @brief To calculate leakyReLU(x) * @param x {vector} - vector containing 'double' values of x whose * leakyReLU values have to be calculated. - * - * @return vector containing 'double' values representing leakyReLU(x) + * @param alpha {double} - alpha's default value = 0.1 declared as parameter. + * + * @return void, the initial vector x itself gets changed with the new values of the activation function. */ -std::vector leakyReLU(const std::vector &); +void leakyReLU(std::vector &, double = 0.1); /** * @brief To convert binaryToBipolar(x) * @param x {vector} - vector containing 'double' values of x who have * to be converted to bipolar numbers. - * - * @return vector containing 'double' values representing binaryToBipolar(x) + * + * @return void, the initial vector x itself gets changed with the new bipolar values. */ -std::vector binaryToBipolar(const std::vector &); +void binaryToBipolar(std::vector &); /** * @brief To convert bipolarToBinary(x) * @param x {vector} - vector containing 'double' values of x who have * to be converted to binary numbers. * - * @return vector containing 'double' values representing bipolarToBinary(x) + * @return void, the initial vector x itself gets changed with the new binary values. */ -std::vector bipolarToBinary(const std::vector &); +void bipolarToBinary(std::vector &); #endif // ACTIVATION_FUNCTIONS_HPP \ No newline at end of file From 2b09f570371a7741f7c168e3869d6589068dd700 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Wed, 15 Feb 2023 15:00:53 +0530 Subject: [PATCH 16/20] Formatted code using clang-format --- .../methods/activation_functions.hpp | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 04b32d4..3914f67 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -14,7 +14,8 @@ template * @param x {vector} - vector containing 'double' values of x whose * sigmoid values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. + * @return void, the initial vector x itself gets changed with the new values of + * the activation function. */ void sigmoid(std::vector &); @@ -24,7 +25,8 @@ void sigmoid(std::vector &); * @param x {vector} - vector containing 'double' values of x whose ReLU * values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. + * @return void, the initial vector x itself gets changed with the new values of + * the activation function. */ void ReLU(std::vector &); @@ -34,7 +36,8 @@ void ReLU(std::vector &); * @param x {vector} - vector containing 'double' values of x whose tanh * values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. + * @return void, the initial vector x itself gets changed with the new values of + * the activation function. */ void tanh(std::vector &); @@ -44,7 +47,8 @@ void tanh(std::vector &); * @param x {vector} - vector containing 'double' values of x whose * arctan values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. + * @return void, the initial vector x itself gets changed with the new values of + * the activation function. */ void arctan(std::vector &); @@ -54,7 +58,8 @@ void arctan(std::vector &); * @param x {vector} - vector containing 'double' values of x whose * softmax values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. + * @return void, the initial vector x itself gets changed with the new values of + * the activation function. */ void softmax(std::vector &); @@ -64,7 +69,8 @@ void softmax(std::vector &); * @param x {vector} - vector containing 'double' values of x whose * binaryStep values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. + * @return void, the initial vector x itself gets changed with the new values of + * the activation function. */ void binaryStep(std::vector &); @@ -73,9 +79,10 @@ void binaryStep(std::vector &); * @brief To calculate leakyReLU(x) * @param x {vector} - vector containing 'double' values of x whose * leakyReLU values have to be calculated. - * @param alpha {double} - alpha's default value = 0.1 declared as parameter. - * - * @return void, the initial vector x itself gets changed with the new values of the activation function. + * @param alpha {double} - alpha's default value = 0.1 declared as parameter. + * + * @return void, the initial vector x itself gets changed with the new values of + * the activation function. */ void leakyReLU(std::vector &, double = 0.1); @@ -84,8 +91,9 @@ void leakyReLU(std::vector &, double = 0.1); * @brief To convert binaryToBipolar(x) * @param x {vector} - vector containing 'double' values of x who have * to be converted to bipolar numbers. - * - * @return void, the initial vector x itself gets changed with the new bipolar values. + * + * @return void, the initial vector x itself gets changed with the new bipolar + * values. */ void binaryToBipolar(std::vector &); @@ -95,7 +103,8 @@ void binaryToBipolar(std::vector &); * @param x {vector} - vector containing 'double' values of x who have * to be converted to binary numbers. * - * @return void, the initial vector x itself gets changed with the new binary values. + * @return void, the initial vector x itself gets changed with the new binary + * values. */ void bipolarToBinary(std::vector &); From ef42bac7ae0fcf04311adfb76c6c49dadbd0723b Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Wed, 15 Feb 2023 16:28:39 +0530 Subject: [PATCH 17/20] updated --- src/slowmokit/methods/activation_functions.hpp | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 04b32d4..2f832ec 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -14,7 +14,6 @@ template * @param x {vector} - vector containing 'double' values of x whose * sigmoid values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. */ void sigmoid(std::vector &); @@ -24,7 +23,6 @@ void sigmoid(std::vector &); * @param x {vector} - vector containing 'double' values of x whose ReLU * values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. */ void ReLU(std::vector &); @@ -34,7 +32,6 @@ void ReLU(std::vector &); * @param x {vector} - vector containing 'double' values of x whose tanh * values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. */ void tanh(std::vector &); @@ -44,7 +41,6 @@ void tanh(std::vector &); * @param x {vector} - vector containing 'double' values of x whose * arctan values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. */ void arctan(std::vector &); @@ -54,7 +50,6 @@ void arctan(std::vector &); * @param x {vector} - vector containing 'double' values of x whose * softmax values have to be calculated. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. */ void softmax(std::vector &); @@ -63,8 +58,7 @@ void softmax(std::vector &); * @brief To calculate binaryStep(x) * @param x {vector} - vector containing 'double' values of x whose * binaryStep values have to be calculated. - * - * @return void, the initial vector x itself gets changed with the new values of the activation function. + *. */ void binaryStep(std::vector &); @@ -75,7 +69,6 @@ void binaryStep(std::vector &); * leakyReLU values have to be calculated. * @param alpha {double} - alpha's default value = 0.1 declared as parameter. * - * @return void, the initial vector x itself gets changed with the new values of the activation function. */ void leakyReLU(std::vector &, double = 0.1); @@ -85,7 +78,6 @@ void leakyReLU(std::vector &, double = 0.1); * @param x {vector} - vector containing 'double' values of x who have * to be converted to bipolar numbers. * - * @return void, the initial vector x itself gets changed with the new bipolar values. */ void binaryToBipolar(std::vector &); @@ -95,7 +87,6 @@ void binaryToBipolar(std::vector &); * @param x {vector} - vector containing 'double' values of x who have * to be converted to binary numbers. * - * @return void, the initial vector x itself gets changed with the new binary values. */ void bipolarToBinary(std::vector &); From 3f00e98a05bf2e625144e0550b9102e7d1313fe0 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Wed, 15 Feb 2023 16:35:03 +0530 Subject: [PATCH 18/20] Formatted code using clang-format --- src/slowmokit/methods/activation_functions.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/slowmokit/methods/activation_functions.hpp b/src/slowmokit/methods/activation_functions.hpp index 36feea8..7d21281 100644 --- a/src/slowmokit/methods/activation_functions.hpp +++ b/src/slowmokit/methods/activation_functions.hpp @@ -76,7 +76,7 @@ void leakyReLU(std::vector &, double = 0.1); * @brief To convert binaryToBipolar(x) * @param x {vector} - vector containing 'double' values of x who have * to be converted to bipolar numbers. - * + * */ void binaryToBipolar(std::vector &); From ee101ce5fe0746ba22949cf5b82b0c8636a5ec9c Mon Sep 17 00:00:00 2001 From: Uttam Mittal <78130443+uttammittal02@users.noreply.github.com> Date: Wed, 15 Feb 2023 18:46:59 +0530 Subject: [PATCH 19/20] minor changes --- docs/methods/activation_functions.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/methods/activation_functions.md b/docs/methods/activation_functions.md index 8dd301f..8fe1f7e 100644 --- a/docs/methods/activation_functions.md +++ b/docs/methods/activation_functions.md @@ -4,7 +4,7 @@ Sigmoid-It is computationally expensive, causes vanishing gradient problem and n tanh- The Tanh activation function is a hyperbolic tangent sigmoid function that has a range of -1 to 1. It is often used in deep learning models for its ability to model nonlinear boundaries -tan-1h-The ArcTan function is a sigmoid function to model accelerating and decelerating outputs but with useful output ranges.This activation function maps the input values in the range (−π/2,π/2). Its derivative converges quadratically against 0 for large input values. +arctan-The ArcTan function is a sigmoid function to model accelerating and decelerating outputs but with useful output ranges.This activation function maps the input values in the range (−π/2,π/2). Its derivative converges quadratically against 0 for large input values. ReLU-This The ReLU activation function returns 0 if the input value to the function is less than 0 but for any positive input, the output is the same as the input. It is also continuous but non-differentiable at 0 and at values less than 0 because its derivative is 0 for any negative input. @@ -33,7 +33,7 @@ The following function definitions are defined for a single variable for making |ReLU(x)| max(0, x) | `double` | |leakyReLU(x)| max(αx, x),α=0.1 | `double` | |binaryStep(x)| 0, if x < 0 - 1, if x ≥ 0 | `double` | +| | 1, if x ≥ 0 | `double` | |softmax(x)| e^(x_i) / Σ_j e^(x_j) | `double` | From 4618b534596adefdf613fc201601820a03edecf8 Mon Sep 17 00:00:00 2001 From: NandiniGera Date: Thu, 16 Feb 2023 12:17:18 +0530 Subject: [PATCH 20/20] updated --- CMakeLists.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f32da54..dc6fbb7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,7 +85,5 @@ add_library(slowmokit src/slowmokit/methods/metrics/mean_squared_error.hpp src/slowmokit/methods/metrics/mean_squared_error.cpp src/slowmokit/methods/metrics/silhouette_score.cpp - src/slowmokit/methods/metrics/silhouette_score.hpp - src/slowmokit/methods/activation_functions.cpp - src/slowmokit/methods/activation_functions.hpp) + src/slowmokit/methods/metrics/silhouette_score.hpp)