Skip to content

Commit 63404e3

Browse files
committed
v1.3b
Forking library into lightweight and full versions.
1 parent 91dcdee commit 63404e3

13 files changed

+210
-476
lines changed

EmbeddedML_Source/embeddedML.c

-435
This file was deleted.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.

Source/embeddedML.c

+200
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,200 @@
1+
/*
2+
* EMBEDDEDML v1.3b
3+
*/
4+
5+
/*
6+
embeddedML.c - Embedded Machine Learning Library
7+
Copyright (C) 2018 Charles Zaloom
8+
9+
EmbeddedML is free software: you can redistribute it and/or modify
10+
it under the terms of the GNU General Public License as published by
11+
the Free Software Foundation, either version 3 of the License, or
12+
(at your option) any later version.
13+
14+
EmbeddedML is distributed in the hope that it will be useful,
15+
but WITHOUT ANY WARRANTY; without even the implied warranty of
16+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17+
GNU General Public License for more details.
18+
19+
You should have received a copy of the GNU General Public License
20+
along with EmbeddedML. If not, see <https://www.gnu.org/licenses/>
21+
*/
22+
23+
#include "embeddedML.h"
24+
25+
//-----ANN-----
26+
void BP_ANN(ANN *net, float *input, float *output, float *weights, float *velocity, float *bias, float *delta, int depth){
27+
unsigned int i,j;
28+
unsigned int DIM[2] = {net->topology[net->n_layers - depth], net->topology[net->n_layers - depth - 1]};
29+
30+
if(depth == 1){
31+
for(i = 0; i < DIM[0]; i++){
32+
net->output[i] = 0.0;
33+
for(j = 0; j < DIM[1]; j++){
34+
net->output[i] += weights[(DIM[1]*i)+j]*input[j];
35+
}
36+
net->output[i] = net->output[i] + bias[i];
37+
delta[i] = (output[i]-net->output_activation_function(net->output[i])) * net->output_activation_derivative(net->output[i]);
38+
net->output[i] = net->output_activation_function(net->output[i]);
39+
bias[i] = bias[i] + delta[i]*net->beta;
40+
}
41+
42+
float dEdW[DIM[0]*DIM[1]];
43+
for(i = 0; i < DIM[0]; i++){
44+
for(j = 0; j < DIM[1]; j++){
45+
dEdW[(DIM[1]*i)+j] = delta[i]*input[j];
46+
}
47+
}
48+
for(i = 0; i < DIM[0]*DIM[1]; i++){
49+
velocity[i] = dEdW[i]*net->eta - velocity[i]*net->alpha;
50+
weights[i] = weights[i] + velocity[i];
51+
}
52+
return;
53+
}
54+
else{
55+
float a[DIM[0]];
56+
float d[DIM[0]];
57+
58+
for(i = 0; i < DIM[0]; i++){
59+
a[i] = 0.0;
60+
for(j = 0; j < DIM[1]; j++){
61+
a[i] += weights[(DIM[1]*i)+j]*input[j];
62+
}
63+
a[i] += bias[i];
64+
d[i] = net->hidden_activation_derivative(a[i]);
65+
a[i] = net->hidden_activation_function(a[i]);
66+
}
67+
68+
unsigned int DIM1 = net->topology[net->n_layers - depth + 1];
69+
70+
float prev_delta[DIM1];
71+
unsigned int weight_iter = DIM[0] * DIM[1];
72+
73+
float next_weights_T[DIM[0]*DIM1];
74+
unsigned int iter = 0;
75+
for(i = 0; i < DIM[0]; i++){
76+
for(j = 0; j < DIM1; j++){
77+
next_weights_T[iter] = weights[(DIM[0]*j)+i+weight_iter];
78+
iter++;
79+
}
80+
}
81+
82+
BP_ANN(net, a, output, &weights[weight_iter], &velocity[weight_iter], &bias[DIM[0]], prev_delta, depth-1);
83+
84+
for(i = 0; i < DIM[0]; i++){
85+
delta[i] = 0;
86+
for(j = 0; j < DIM1; j++){
87+
delta[i] += next_weights_T[(DIM1*i)+j]*prev_delta[j];
88+
}
89+
delta[i] = delta[i]*d[i];
90+
bias[i] = bias[i] + delta[i]*net->beta;
91+
}
92+
float dEdW[DIM[0]*DIM[1]];
93+
for(i = 0; i < DIM[0]; i++){
94+
for(j = 0; j < DIM[1]; j++){
95+
dEdW[(DIM[1]*i)+j] = delta[i]*input[j];
96+
}
97+
}
98+
for(i = 0; i < DIM[0]*DIM[1]; i++){
99+
velocity[i] = dEdW[i]*net->eta - velocity[i]*net->alpha;
100+
weights[i] = weights[i] + velocity[i];
101+
}
102+
return;
103+
}
104+
}
105+
106+
void train_ann(ANN *net, float *input, float *output){
107+
float delta[net->topology[1]];
108+
BP_ANN(net, input, output, net->weights, net->dedw, net->bias, delta, net->n_layers-1);
109+
}
110+
111+
void FP_ANN(ANN *net, float *input, unsigned int depth, float *weights){
112+
unsigned int DIM[2] = {net->topology[net->n_layers - depth], net->topology[net->n_layers - depth - 1]};
113+
unsigned int i,j,k;
114+
115+
if(depth == 1){
116+
for(i = 0; i < DIM[0]; i++){
117+
net->output[i] = 0.0;
118+
for(k = 0; k < DIM[1]; k++){
119+
net->output[i] += weights[(DIM[1]*i)+k]*input[k];
120+
}
121+
net->output[i] = net->output_activation_function(net->output[i] + net->bias[i]);
122+
}
123+
return;
124+
}
125+
else{
126+
float a[DIM[0]];
127+
for(i = 0; i < DIM[0]; i++){
128+
a[i] = 0.0;
129+
for(k = 0; k < DIM[1]; k++){
130+
a[i] += weights[(DIM[1]*i)+k]*input[k];
131+
}
132+
a[i] = net->hidden_activation_function(a[i] + net->bias[i]);
133+
//if(depth == 2) printf("%f,", a[i]);
134+
}
135+
//if(depth == 2) printf("0\n");
136+
FP_ANN(net, a, depth-1, &weights[DIM[0]*DIM[1]]);
137+
}
138+
return;
139+
}
140+
141+
void run_ann(ANN *net, float *input){
142+
FP_ANN(net, input, net->n_layers-1, net->weights);
143+
}
144+
145+
void init_ann(ANN *net){
146+
fill_number(net->bias, net->n_bias, 0.1);
147+
fill_zeros(net->dedw, net->n_weights);
148+
149+
if(net->output_activation_function == &relu) net->output_activation_derivative = &relu_derivative;
150+
else if(net->output_activation_function == &relu2) net->output_activation_derivative = &relu2_derivative;
151+
152+
if(net->hidden_activation_function == &relu) net->hidden_activation_derivative = &relu_derivative;
153+
else if(net->hidden_activation_function == &relu2) net->hidden_activation_derivative = &relu2_derivative;
154+
}
155+
156+
void init_pretrained_ann(ANN *net){
157+
fill_zeros(net->dedw, net->n_weights);
158+
159+
if(net->output_activation_function == &relu) net->output_activation_derivative = &relu_derivative;
160+
else if(net->output_activation_function == &relu2) net->output_activation_derivative = &relu2_derivative;
161+
162+
if(net->hidden_activation_function == &relu) net->hidden_activation_derivative = &relu_derivative;
163+
else if(net->hidden_activation_function == &relu2) net->hidden_activation_derivative = &relu2_derivative;
164+
}
165+
166+
//-----Utility-----
167+
void fill_zeros(float *v, unsigned int size){
168+
int i;
169+
for(i = 0; i < size; i++){ v[i] = 0.0; }
170+
}
171+
void fill_number(float *v, unsigned int size, float number){
172+
int i;
173+
for(i = 0; i < size; i++){ v[i] = number; }
174+
}
175+
176+
//-----Activation Functions-----
177+
float relu(float x){
178+
if(x < 0.0) return 0.0;
179+
else if(x > 1.0) return 0.1*x+0.93;
180+
return x;
181+
}
182+
183+
//Similar to Tanh
184+
float relu2(float x){
185+
if(x < -1.0) return 0.1*x-0.93;
186+
else if(x > 1.0) return 0.1*x+0.93;
187+
return x;
188+
}
189+
190+
//-----Derivative Functions-----
191+
float relu_derivative(float x){
192+
if(x < 0.0) return 0.0;
193+
else if(x > 1.0) return 0.1;
194+
return 1.0;
195+
}
196+
float relu2_derivative(float x){
197+
if(x < -1.0) return 0.1;
198+
else if(x > 1.0) return 0.1;
199+
return 1.0;
200+
}
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
1-
/* EMBEDDEDML V1.2.1 */
1+
/*
2+
* EMBEDDEDML v1.3b
3+
*/
4+
25
/*
36
embeddedML.h - Embedded Machine Learning Library
47
Copyright (C) 2018 Charles Zaloom
@@ -14,28 +17,11 @@
1417
GNU General Public License for more details.
1518
1619
You should have received a copy of the GNU General Public License
17-
along with EmbeddedML. If not, see <https://www.gnu.org/licenses/>
20+
along with EmbeddedML. If not, see <https://www.gnu.org/licenses/>
1821
*/
1922

20-
#ifndef EMBEDDED_ML
21-
#define EMBEDDED_ML
22-
23-
//#ifndef _EMBEDDED_
24-
// #define _EMBEDDED_
25-
//#endif
26-
27-
/*
28-
* NOTE: Defining _EMBEDDED_ removes all dependencies on standard libraries.
29-
* This means that some functions may not be available, but it will allow
30-
* usage on any system that runs C/C++.
31-
*/
32-
33-
34-
#ifndef _EMBEDDED_
35-
#include <stdlib.h>
36-
#include <math.h>
37-
#include <stdio.h>
38-
#endif
23+
#ifndef EMBEDDED_ML_METAL
24+
#define EMBEDDED_ML_METAL
3925

4026
//-----ANN Structure-----
4127
typedef struct {
@@ -54,42 +40,25 @@ typedef struct {
5440
float (*hidden_activation_derivative)(float);
5541

5642
float eta; //Learning Rate
43+
float beta; //Bias Learning Rate
5744
float alpha; //Momentum Coefficient
5845
} ANN;
5946

6047
void train_ann(ANN *net, float *input, float *output);
6148
void run_ann(ANN *net, float *input);
49+
6250
void init_ann(ANN *net);
63-
void init_embedded_ann(ANN *net);
64-
#ifndef _EMBEDDED_
65-
void load_ann(ANN *net, const char *filename);
66-
void save_ann(ANN *net, const char *filename);
67-
#endif
51+
void init_pretrained_ann(ANN *net);
6852

6953
//-----Utility-----
7054
void fill_zeros(float *v, unsigned int size);
7155
void fill_number(float *v, unsigned int size, float number);
72-
void strong_softmax(float *x, float *y);
73-
void weak_softmax(float *x, float *y);
74-
#ifndef _EMBEDDED_
75-
void fill_rand(float *v, unsigned int size, float lower, float upper);
76-
void softmax(unsigned int size, float multiplier, float *x, float *y);
77-
void softmax2(unsigned int size, float multiplier, float *x, float *y);
78-
#endif
7956

8057
//------Activation Functions-----
8158
float relu(float x);
8259
float relu_derivative(float x);
8360

8461
float relu2(float x);
8562
float relu2_derivative(float x);
86-
87-
#ifndef _EMBEDDED_
88-
//float tanhf(float x); //USE 'math.h' tanhf(x) function
89-
float tanhf_derivative(float x);
90-
91-
float sigmoid(float x);
92-
float sigmoid_derivative(float x);
93-
#endif
9463

9564
#endif
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.

0 commit comments

Comments
 (0)