-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutilities.h
More file actions
134 lines (95 loc) · 5.45 KB
/
utilities.h
File metadata and controls
134 lines (95 loc) · 5.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#ifndef NEURALNETWORK_UTILITIES_H
#define NEURALNETWORK_UTILITIES_H
#include <numeric>
#include <functional>
#include <xtensor/containers/xarray.hpp>
#include <xtensor/misc/xsort.hpp>
#include <xtensor/containers/xtensor.hpp>
#include "types.h"
xt::xarray<float> no_activation(const xt::xarray<float>& x);
xt::xarray<float> no_activation_derivative(const xt::xarray<float>& x);
xt::xarray<float> ReLU(const xt::xarray<float>& x);
xt::xarray<float> ReLU_derivative(const xt::xarray<float>& x);
xt::xarray<float> GELU(const xt::xarray<float>& x);
xt::xarray<float> GELU_derivative(const xt::xarray<float>& x);
xt::xarray<float> GELU_approx(const xt::xarray<float>& x);
xt::xarray<float> GELU_approx_derivative(const xt::xarray<float>& x);
xt::xarray<float> GELU_fast(const xt::xarray<float>& x);
xt::xarray<float> GELU_fast_derivative(const xt::xarray<float>& x);
xt::xarray<float> sigmoid(const xt::xarray<float>& x);
xt::xarray<float> sigmoid_derivative(const xt::xarray<float>& x);
xt::xarray<float> softmax(const xt::xarray<float>& x);
xt::xarray<float> cross_entropy_loss(const xt::xarray<float>& probs, const xt::xarray<float>& label);
xt::xarray<float> MSE(const xt::xarray<float>& activation, const xt::xarray<float>& labels);
xt::xarray<float> MSE_derivative(const xt::xarray<float>& activation, const xt::xarray<float>& labels);
ActivationFunction get_activation_function(ActivationID activation_id);
ActivationDerivative get_activation_derivative(ActivationID activation_id);
CostFunction get_cost_function(CostID cost_id);
CostDerivative get_cost_derivative(CostID cost_id);
xt::xarray<float> get_output_error(const xt::xarray<float>& output, const xt::xarray<float>& activations,
const xt::xarray<float>& labels, ActivationID activation_id, CostID cost_id);
xt::xarray<float> convert_vec_inputs(const std::vector<xt::xarray<float>>& inputs);
xt::xtensor<float, 2> index_3d(xt::xtensor<float, 3>& inputs, size_t index);
void set_3d(xt::xtensor<float, 3>& inputs, xt::xtensor<float, 2>& value, size_t index);
void update_adam_1d(xt::xtensor<float, 1>& weights, xt::xtensor<float, 1>& grad_weights,
xt::xtensor<float, 1>& m_weights, xt::xtensor<float, 1>& v_weights,
float lr, float beta1, float beta2, size_t timestep);
void update_adam_2d(xt::xtensor<float, 2>& weights, xt::xtensor<float, 2>& grad_weights,
xt::xtensor<float, 2>& m_weights, xt::xtensor<float, 2>& v_weights,
float lr, float beta1, float beta2, size_t timestep);
void update_adam_4d(xt::xtensor<float, 4>& weights, xt::xtensor<float, 4>& grad_weights,
xt::xtensor<float, 4>& m_weights, xt::xtensor<float, 4>& v_weights,
float lr, float beta1, float beta2, size_t timestep);
void update_adamw_1d(xt::xtensor<float, 1>& weights, xt::xtensor<float, 1>& grad_weights,
xt::xtensor<float, 1>& m_weights, xt::xtensor<float, 1>& v_weights,
float lr, float beta1, float beta2, float weight_decay, size_t timestep);
void update_adamw_2d(xt::xtensor<float, 2>& weights, xt::xtensor<float, 2>& grad_weights,
xt::xtensor<float, 2>& m_weights, xt::xtensor<float, 2>& v_weights,
float lr, float beta1, float beta2, float weight_decay, size_t timestep);
void update_adamw_4d(xt::xtensor<float, 4>& weights, xt::xtensor<float, 4>& grad_weights,
xt::xtensor<float, 4>& m_weights, xt::xtensor<float, 4>& v_weights,
float lr, float beta1, float beta2, float weight_decay, size_t timestep);
std::string format_time(size_t secs);
void save_1d(std::vector<float>& all, xt::xtensor<float, 1>& x);
void save_2d(std::vector<float>& all, xt::xtensor<float, 2>& x);
void save_4d(std::vector<float>& all, xt::xtensor<float, 4>& x);
void load_1d(xt::xtensor<float, 1>& all, xt::xtensor<float, 1>& x, size_t& index);
void load_2d(xt::xtensor<float, 1>& all, xt::xtensor<float, 2>& x, size_t& index);
void load_4d(xt::xtensor<float, 1>& all, xt::xtensor<float, 4>& x, size_t& index);
template <typename Out>
void split(const std::string &s, char delim, Out result);
std::vector<std::string> split(const std::string &s, char delim);
template <typename Shape>
void print_shape(const Shape& shape) {
std::string s;
s += "(";
s += std::to_string(shape[0]);
for (int i = 1; i < shape.size(); i++) s += ", " + std::to_string(shape[i]);
s += ")";
std::cout << s << std::endl;
}
void print_2d(xt::xtensor<float, 2>& inp);
template <typename Shape>
std::vector<size_t> unravel_index(size_t flat_index, const Shape& shape) {
size_t ndim = shape.size();
std::vector<size_t> suffix_products(ndim, 1);
std::vector<size_t> indices(ndim);
for (int i = static_cast<int>(ndim) - 2; i >= 0; i--) {
suffix_products[i] = suffix_products[i + 1] * shape[i + 1];
}
for (size_t i = 0; i < ndim; i++) {
indices[i] = flat_index / suffix_products[i];
flat_index %= suffix_products[i];
}
return indices;
}
template <typename Iterable>
auto prod(const Iterable& container, size_t start, size_t end) {
using ValueType = std::decay_t<decltype(*std::begin(container))>;
auto it_start = std::begin(container);
std::advance(it_start, start);
auto it_end = std::begin(container);
std::advance(it_end, end + 1);
return std::accumulate(it_start, it_end, static_cast<ValueType>(1), std::multiplies<ValueType>());
}
#endif //NEURALNETWORK_UTILITIES_H