|
| 1 | +/* |
| 2 | + * EMBEDDEDML v1.3b |
| 3 | + */ |
| 4 | + |
| 5 | +/* |
| 6 | + embeddedML.c - Embedded Machine Learning Library |
| 7 | + Copyright (C) 2018 Charles Zaloom |
| 8 | +
|
| 9 | + EmbeddedML is free software: you can redistribute it and/or modify |
| 10 | + it under the terms of the GNU General Public License as published by |
| 11 | + the Free Software Foundation, either version 3 of the License, or |
| 12 | + (at your option) any later version. |
| 13 | +
|
| 14 | + EmbeddedML is distributed in the hope that it will be useful, |
| 15 | + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 17 | + GNU General Public License for more details. |
| 18 | +
|
| 19 | + You should have received a copy of the GNU General Public License |
| 20 | + along with EmbeddedML. If not, see <https://www.gnu.org/licenses/> |
| 21 | +*/ |
| 22 | + |
| 23 | +#include "embeddedML.h" |
| 24 | + |
| 25 | +//-----ANN----- |
| 26 | +void BP_ANN(ANN *net, float *input, float *output, float *weights, float *velocity, float *bias, float *delta, int depth){ |
| 27 | + unsigned int i,j; |
| 28 | + unsigned int DIM[2] = {net->topology[net->n_layers - depth], net->topology[net->n_layers - depth - 1]}; |
| 29 | + |
| 30 | + if(depth == 1){ |
| 31 | + for(i = 0; i < DIM[0]; i++){ |
| 32 | + net->output[i] = 0.0; |
| 33 | + for(j = 0; j < DIM[1]; j++){ |
| 34 | + net->output[i] += weights[(DIM[1]*i)+j]*input[j]; |
| 35 | + } |
| 36 | + net->output[i] = net->output[i] + bias[i]; |
| 37 | + delta[i] = (output[i]-net->output_activation_function(net->output[i])) * net->output_activation_derivative(net->output[i]); |
| 38 | + net->output[i] = net->output_activation_function(net->output[i]); |
| 39 | + bias[i] = bias[i] + delta[i]*net->beta; |
| 40 | + } |
| 41 | + |
| 42 | + float dEdW[DIM[0]*DIM[1]]; |
| 43 | + for(i = 0; i < DIM[0]; i++){ |
| 44 | + for(j = 0; j < DIM[1]; j++){ |
| 45 | + dEdW[(DIM[1]*i)+j] = delta[i]*input[j]; |
| 46 | + } |
| 47 | + } |
| 48 | + for(i = 0; i < DIM[0]*DIM[1]; i++){ |
| 49 | + velocity[i] = dEdW[i]*net->eta - velocity[i]*net->alpha; |
| 50 | + weights[i] = weights[i] + velocity[i]; |
| 51 | + } |
| 52 | + return; |
| 53 | + } |
| 54 | + else{ |
| 55 | + float a[DIM[0]]; |
| 56 | + float d[DIM[0]]; |
| 57 | + |
| 58 | + for(i = 0; i < DIM[0]; i++){ |
| 59 | + a[i] = 0.0; |
| 60 | + for(j = 0; j < DIM[1]; j++){ |
| 61 | + a[i] += weights[(DIM[1]*i)+j]*input[j]; |
| 62 | + } |
| 63 | + a[i] += bias[i]; |
| 64 | + d[i] = net->hidden_activation_derivative(a[i]); |
| 65 | + a[i] = net->hidden_activation_function(a[i]); |
| 66 | + } |
| 67 | + |
| 68 | + unsigned int DIM1 = net->topology[net->n_layers - depth + 1]; |
| 69 | + |
| 70 | + float prev_delta[DIM1]; |
| 71 | + unsigned int weight_iter = DIM[0] * DIM[1]; |
| 72 | + |
| 73 | + float next_weights_T[DIM[0]*DIM1]; |
| 74 | + unsigned int iter = 0; |
| 75 | + for(i = 0; i < DIM[0]; i++){ |
| 76 | + for(j = 0; j < DIM1; j++){ |
| 77 | + next_weights_T[iter] = weights[(DIM[0]*j)+i+weight_iter]; |
| 78 | + iter++; |
| 79 | + } |
| 80 | + } |
| 81 | + |
| 82 | + BP_ANN(net, a, output, &weights[weight_iter], &velocity[weight_iter], &bias[DIM[0]], prev_delta, depth-1); |
| 83 | + |
| 84 | + for(i = 0; i < DIM[0]; i++){ |
| 85 | + delta[i] = 0; |
| 86 | + for(j = 0; j < DIM1; j++){ |
| 87 | + delta[i] += next_weights_T[(DIM1*i)+j]*prev_delta[j]; |
| 88 | + } |
| 89 | + delta[i] = delta[i]*d[i]; |
| 90 | + bias[i] = bias[i] + delta[i]*net->beta; |
| 91 | + } |
| 92 | + float dEdW[DIM[0]*DIM[1]]; |
| 93 | + for(i = 0; i < DIM[0]; i++){ |
| 94 | + for(j = 0; j < DIM[1]; j++){ |
| 95 | + dEdW[(DIM[1]*i)+j] = delta[i]*input[j]; |
| 96 | + } |
| 97 | + } |
| 98 | + for(i = 0; i < DIM[0]*DIM[1]; i++){ |
| 99 | + velocity[i] = dEdW[i]*net->eta - velocity[i]*net->alpha; |
| 100 | + weights[i] = weights[i] + velocity[i]; |
| 101 | + } |
| 102 | + return; |
| 103 | + } |
| 104 | +} |
| 105 | + |
| 106 | +void train_ann(ANN *net, float *input, float *output){ |
| 107 | + float delta[net->topology[1]]; |
| 108 | + BP_ANN(net, input, output, net->weights, net->dedw, net->bias, delta, net->n_layers-1); |
| 109 | +} |
| 110 | + |
| 111 | +void FP_ANN(ANN *net, float *input, unsigned int depth, float *weights){ |
| 112 | + unsigned int DIM[2] = {net->topology[net->n_layers - depth], net->topology[net->n_layers - depth - 1]}; |
| 113 | + unsigned int i,j,k; |
| 114 | + |
| 115 | + if(depth == 1){ |
| 116 | + for(i = 0; i < DIM[0]; i++){ |
| 117 | + net->output[i] = 0.0; |
| 118 | + for(k = 0; k < DIM[1]; k++){ |
| 119 | + net->output[i] += weights[(DIM[1]*i)+k]*input[k]; |
| 120 | + } |
| 121 | + net->output[i] = net->output_activation_function(net->output[i] + net->bias[i]); |
| 122 | + } |
| 123 | + return; |
| 124 | + } |
| 125 | + else{ |
| 126 | + float a[DIM[0]]; |
| 127 | + for(i = 0; i < DIM[0]; i++){ |
| 128 | + a[i] = 0.0; |
| 129 | + for(k = 0; k < DIM[1]; k++){ |
| 130 | + a[i] += weights[(DIM[1]*i)+k]*input[k]; |
| 131 | + } |
| 132 | + a[i] = net->hidden_activation_function(a[i] + net->bias[i]); |
| 133 | + //if(depth == 2) printf("%f,", a[i]); |
| 134 | + } |
| 135 | + //if(depth == 2) printf("0\n"); |
| 136 | + FP_ANN(net, a, depth-1, &weights[DIM[0]*DIM[1]]); |
| 137 | + } |
| 138 | + return; |
| 139 | +} |
| 140 | + |
| 141 | +void run_ann(ANN *net, float *input){ |
| 142 | + FP_ANN(net, input, net->n_layers-1, net->weights); |
| 143 | +} |
| 144 | + |
| 145 | +void init_ann(ANN *net){ |
| 146 | + fill_number(net->bias, net->n_bias, 0.1); |
| 147 | + fill_zeros(net->dedw, net->n_weights); |
| 148 | + |
| 149 | + if(net->output_activation_function == &relu) net->output_activation_derivative = &relu_derivative; |
| 150 | + else if(net->output_activation_function == &relu2) net->output_activation_derivative = &relu2_derivative; |
| 151 | + |
| 152 | + if(net->hidden_activation_function == &relu) net->hidden_activation_derivative = &relu_derivative; |
| 153 | + else if(net->hidden_activation_function == &relu2) net->hidden_activation_derivative = &relu2_derivative; |
| 154 | +} |
| 155 | + |
| 156 | +void init_pretrained_ann(ANN *net){ |
| 157 | + fill_zeros(net->dedw, net->n_weights); |
| 158 | + |
| 159 | + if(net->output_activation_function == &relu) net->output_activation_derivative = &relu_derivative; |
| 160 | + else if(net->output_activation_function == &relu2) net->output_activation_derivative = &relu2_derivative; |
| 161 | + |
| 162 | + if(net->hidden_activation_function == &relu) net->hidden_activation_derivative = &relu_derivative; |
| 163 | + else if(net->hidden_activation_function == &relu2) net->hidden_activation_derivative = &relu2_derivative; |
| 164 | +} |
| 165 | + |
| 166 | +//-----Utility----- |
| 167 | +void fill_zeros(float *v, unsigned int size){ |
| 168 | + int i; |
| 169 | + for(i = 0; i < size; i++){ v[i] = 0.0; } |
| 170 | +} |
| 171 | +void fill_number(float *v, unsigned int size, float number){ |
| 172 | + int i; |
| 173 | + for(i = 0; i < size; i++){ v[i] = number; } |
| 174 | +} |
| 175 | + |
| 176 | +//-----Activation Functions----- |
| 177 | +float relu(float x){ |
| 178 | + if(x < 0.0) return 0.0; |
| 179 | + else if(x > 1.0) return 0.1*x+0.93; |
| 180 | + return x; |
| 181 | +} |
| 182 | + |
| 183 | +//Similar to Tanh |
| 184 | +float relu2(float x){ |
| 185 | + if(x < -1.0) return 0.1*x-0.93; |
| 186 | + else if(x > 1.0) return 0.1*x+0.93; |
| 187 | + return x; |
| 188 | +} |
| 189 | + |
| 190 | +//-----Derivative Functions----- |
| 191 | +float relu_derivative(float x){ |
| 192 | + if(x < 0.0) return 0.0; |
| 193 | + else if(x > 1.0) return 0.1; |
| 194 | + return 1.0; |
| 195 | +} |
| 196 | +float relu2_derivative(float x){ |
| 197 | + if(x < -1.0) return 0.1; |
| 198 | + else if(x > 1.0) return 0.1; |
| 199 | + return 1.0; |
| 200 | +} |
0 commit comments