Skip to content

Commit d90a59d

Browse files
committed
Enforced consistent naming and formatting style
1 parent 4387bba commit d90a59d

File tree

15 files changed

+263
-247
lines changed

15 files changed

+263
-247
lines changed

src/autodiff.cc

Lines changed: 72 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -17,50 +17,50 @@ using std::vector;
1717

1818
enum OpType{scalar, matmul, reduct};
1919

20-
struct Node{
20+
struct WegnerntNode{
2121
array<size_t, 2> parents;
2222
array<nn::Tensor, 2> weights;
2323
OpType type;
2424

25-
Node(size_t, size_t, const nn::Tensor&, const nn::Tensor&, OpType);
26-
Node(size_t, const nn::Tensor&, OpType=scalar);
27-
Node();
25+
WegnerntNode(size_t, size_t, const nn::Tensor&, const nn::Tensor&, OpType);
26+
WegnerntNode(size_t, const nn::Tensor&, OpType=scalar);
27+
WegnerntNode();
2828
};
2929

30-
Node::Node(size_t x_index, size_t y_index, const nn::Tensor &x_val, const nn::Tensor &y_val, OpType type_)
30+
WegnerntNode::WegnerntNode(size_t x_index, size_t y_index, const nn::Tensor &x_val, const nn::Tensor &y_val, OpType type_)
3131
: parents{{x_index, y_index}}, weights{{x_val, y_val}}, type(type_){}
3232

33-
Node::Node(size_t index , const nn::Tensor &val, OpType type_)
33+
WegnerntNode::WegnerntNode(size_t index , const nn::Tensor &val, OpType type_)
3434
: parents{{index, 0}}, weights{{val, nn::Tensor()}}, type(type_){}
3535

36-
Node::Node()
36+
WegnerntNode::WegnerntNode()
3737
: parents{{0,0}}, weights{{nn::Tensor(), nn::Tensor()}}, type(scalar){}
3838

3939
/**************************************************
40-
Tape
40+
Wengernt List
4141
A ticker tape which holds the nodes for each operation on an autodiff::var.
4242
Currently contains the vector for the gradient wrt to the last last node
4343
on each tree.
4444
Takes binary, unary and nullary operations.
4545
***************************************************/
4646

47-
struct Tape{
48-
vector<Node> nodes;
47+
struct WengerntList{
48+
vector<WegnerntNode> nodes;
4949
vector<nn::Tensor> grads;
5050

5151
size_t push_0(){
5252
auto size = nodes.size();
53-
nodes.emplace_back(Node(size, size, nn::Tensor(), nn::Tensor(), scalar));
53+
nodes.emplace_back(WegnerntNode(size, size, nn::Tensor(), nn::Tensor(), scalar));
5454
return size;
5555
}
5656
size_t push_1(size_t x_index, const nn::Tensor &data, OpType type=scalar){
5757
auto size = nodes.size();
58-
nodes.emplace_back(Node(x_index, data, type));
58+
nodes.emplace_back(WegnerntNode(x_index, data, type));
5959
return size;
6060
}
6161
size_t push_2(size_t x_index, size_t y_index, const nn::Tensor &x_val, const nn::Tensor &y_val, OpType type){
6262
auto size = nodes.size();
63-
nodes.emplace_back(Node(x_index, y_index, x_val, y_val, type));
63+
nodes.emplace_back(WegnerntNode(x_index, y_index, x_val, y_val, type));
6464
return size;
6565
}
6666
void push_grad(const nn::Shape& shape){
@@ -73,7 +73,7 @@ struct Tape{
7373
auto end(){ return nodes.end(); }
7474
};
7575

76-
static Tape tape;
76+
static WengerntList tape;
7777

7878
/***************************************************
7979
var
@@ -83,21 +83,21 @@ static Tape tape;
8383
namespace autodiff{
8484

8585
// Constructor for intializing variable with a tape entry
86-
var::var(const nn::Tensor& data_, size_t index_) :index(index_), data(data_){}
86+
Var::Var(const nn::Tensor& data_, size_t index_) :index(index_), data(data_){}
8787

8888
// Constructor for intializing a variable without a tape entry
89-
var::var(const nn::Tensor& data_) : data(data_){
89+
Var::Var(const nn::Tensor& data_) : data(data_){
9090
index = tape.push_0();
9191
tape.push_grad(data.shape);
9292
}
9393

94-
var::var(size_t x, size_t y, size_t z, size_t t)
94+
Var::Var(size_t x, size_t y, size_t z, size_t t)
9595
: data(nn::Tensor(x,y,z,t)){
9696
index = tape.push_0();
9797
tape.push_grad(data.shape);
9898
}
9999

100-
void var::evaluateLeaves()const{
100+
void Var::evaluate_leaves()const{
101101
tape.grads[index].ones();
102102
for(size_t i=index+1; i-- >0;){
103103
auto &gradient = tape.grads[i];
@@ -124,71 +124,71 @@ void var::evaluateLeaves()const{
124124
}
125125
}
126126

127-
nn::Tensor var::grad()const{ return tape.grads[index]; }
127+
nn::Tensor Var::grad()const{ return tape.grads[index]; }
128128

129-
double& var::operator()(size_t x, size_t y, size_t z, size_t t){
129+
double& Var::operator()(size_t x, size_t y, size_t z, size_t t){
130130
return data(x,y,z,t);
131131
}
132132

133-
var var::operator+(const var& y)const{
133+
Var Var::operator+(const Var& y)const{
134134
auto new_data = data + y.data;
135135
auto x_weight = nn::Tensor(data.shape, 1);
136136
auto y_weight = nn::Tensor(data.shape, 1);
137137
auto new_index = tape.push_2(index, y.index, x_weight, y_weight, scalar);
138138
tape.push_grad(new_data.shape);
139-
return var(new_data, new_index);
139+
return Var(new_data, new_index);
140140
}
141-
var var::operator-(const var& y)const{
141+
Var Var::operator-(const Var& y)const{
142142
auto new_data = data - y.data;
143143
auto x_weight = nn::Tensor(data.shape, 1);
144144
auto y_weight = nn::Tensor(data.shape, -1);
145145
auto new_index = tape.push_2(index, y.index, x_weight, y_weight, scalar);
146146
tape.push_grad(new_data.shape);
147-
return var(new_data, new_index);
147+
return Var(new_data, new_index);
148148
}
149-
var var::operator%(const var& y)const{
149+
Var Var::operator%(const Var& y)const{
150150
auto new_data = data % y.data;
151151
auto x_weight = y.data;
152152
auto y_weight = data;
153153
auto new_index = tape.push_2(index, y.index, x_weight, y_weight, scalar);
154154
tape.push_grad(new_data.shape);
155-
return var(new_data, new_index);
155+
return Var(new_data, new_index);
156156
}
157-
var var::operator/(const var& y)const{
157+
Var Var::operator/(const Var& y)const{
158158
auto x_weight = 1.0 / y.data;
159159
auto y_weight = 2.0 * data / (y.data % y.data);
160160
auto new_data = data / y.data;
161161
auto new_index = tape.push_2(index, y.index, x_weight, y_weight, scalar);
162162
tape.push_grad(new_data.shape);
163-
return var(new_data, new_index);
163+
return Var(new_data, new_index);
164164
}
165165

166-
var var::operator*(const var& y)const{
166+
Var Var::operator*(const Var& y)const{
167167
auto x_weight = y.data;
168168
auto y_weight = data;
169169
auto new_data = data * y.data;
170170
auto new_index = tape.push_2(index, y.index, x_weight, y_weight, matmul);
171171
tape.push_grad(new_data.shape);
172-
return var(new_data, new_index);
172+
return Var(new_data, new_index);
173173
}
174174

175-
var operator*(double x, const var& y){
175+
Var operator*(double x, const Var& y){
176176
auto weight = nn::Tensor(y.data.shape, x);
177177
auto new_data = x * y.data;
178178
auto new_index = tape.push_1(y.index, weight);
179179
tape.push_grad(new_data.shape);
180-
return var(new_data, new_index);
180+
return Var(new_data, new_index);
181181
}
182182

183-
void var::operator=(const nn::Tensor& y) { data = y;}
183+
void Var::operator=(const nn::Tensor& y) { data = y; }
184184

185-
void var::operator+=(const var& y){ *this = *this + y;}
186-
void var::operator-=(const var& y){ *this = *this - y;}
187-
void var::operator/=(const var& y){ *this = *this/y; }
188-
void var::operator*=(const var& y){ *this = *this * y; }
189-
void var::operator%=(const var& y){ *this = *this % y; }
185+
void Var::operator+=(const Var& y){ *this = *this + y;}
186+
void Var::operator-=(const Var& y){ *this = *this - y;}
187+
void Var::operator/=(const Var& y){ *this = *this / y; }
188+
void Var::operator*=(const Var& y){ *this = *this * y; }
189+
void Var::operator%=(const Var& y){ *this = *this % y; }
190190

191-
std::ostream& operator<<(std::ostream& os, const var& rhs){
191+
std::ostream& operator<<(std::ostream& os, const Var& rhs){
192192
os << "Tensor:" << std::endl;
193193
os << rhs.data;
194194
os << "Gradient:" << std::endl;
@@ -202,17 +202,18 @@ Reimplimentation of primitive cmath operations to include
202202
differentiation process.
203203
***************************************************/
204204

205-
var pow(const var &x, double y){
205+
Var pow(const Var &x, double y){
206206
auto x_weight = y * nn::pow(x.data, y - 1);
207207
auto new_index = tape.push_1(x.index, x_weight);
208-
return var(pow(x.data, y), new_index);
208+
return Var(pow(x.data, y), new_index);
209209
}
210-
var pow(const var &x, const var &y){
210+
211+
Var pow(const Var &x, const Var &y){
211212
auto x_weight = y.data % nn::pow(x.data, y.data-1);
212213
auto pow_x_y = nn::pow(x.data,y.data);
213214
auto y_weight = pow_x_y * nn::log(x.data);
214215
auto new_index = tape.push_2(x.index, y.index, x_weight, y_weight, scalar);
215-
return var(pow_x_y, new_index);
216+
return Var(pow_x_y, new_index);
216217
}
217218

218219
//var sqrt(const var &x) {
@@ -226,67 +227,67 @@ var pow(const var &x, const var &y){
226227
// return var(exp_x, new_index);
227228
//}
228229
//
229-
var log(const var &x){
230-
auto new_index = tape.push_1(x.index, 1/x.data);
231-
return var(nn::log(x.data), new_index);
230+
Var log(const Var &x){
231+
auto new_index = tape.push_1(x.index, 1 / x.data);
232+
return Var(nn::log(x.data), new_index);
232233
}
233-
//
234-
var sin(const var &x){
234+
235+
Var sin(const Var &x){
235236
auto new_index = tape.push_1(x.index, nn::cos(x.data));
236-
return var(nn::sin(x.data), new_index);
237+
return Var(nn::sin(x.data), new_index);
237238
}
238239

239-
var cos(const var &x){
240+
Var cos(const Var &x){
240241
auto new_index = tape.push_1(x.index, nn::sin(x.data) * -1.0);
241-
return var(nn::cos(x.data), new_index);
242+
return Var(nn::cos(x.data), new_index);
242243
}
243244

244-
var tan(const var &x){
245+
Var tan(const Var &x){
245246
auto cos_x = nn::cos(x.data);
246247
auto new_index = tape.push_1(x.index, 1.0 / (cos_x * cos_x));
247-
return var(nn::tan(x.data), new_index);
248+
return Var(nn::tan(x.data), new_index);
248249
}
249250

250-
var asin(const var &x){
251+
Var asin(const Var &x){
251252
auto weight = 1.0 / nn::sqrt(1.0 - x.data * x.data);
252253
auto new_index = tape.push_1(x.index, weight);
253-
return var(nn::asin(x.data), new_index);
254+
return Var(nn::asin(x.data), new_index);
254255
}
255256

256-
var acos(const var &x){
257+
Var acos(const Var &x){
257258
auto new_index = tape.push_1(x.index, -1.0 / nn::sqrt(1.0 - x.data * x.data));
258-
return var(nn::acos(x.data), new_index);
259+
return Var(nn::acos(x.data), new_index);
259260
}
260261

261-
var atan(const var &x){
262+
Var atan(const Var &x){
262263
auto new_index = tape.push_1(x.index, 1.0 / (1.0 + x.data * x.data));
263-
return var(atan(x.data), new_index);
264+
return Var(atan(x.data), new_index);
264265
}
265266

266-
var var::asum(){
267-
auto new_index = tape.push_1(index, nn::Tensor(data.shape,1), reduct);
268-
auto double_new_data = data.asum();
267+
Var Var::abs_sum(){
268+
auto new_index = tape.push_1(index, nn::Tensor(data.shape, 1), reduct);
269+
auto double_new_data = data.abs_sum();
269270
nn::Tensor new_data(1);
270271
new_data(0) = double_new_data;
271272
tape.push_grad(new_data.shape);
272-
return var(new_data, new_index);
273+
return Var(new_data, new_index);
273274
}
274-
var var::sum(){
275-
auto new_index = tape.push_1(index, nn::Tensor(data.shape,1), reduct);
275+
Var Var::sum(){
276+
auto new_index = tape.push_1(index, nn::Tensor(data.shape, 1), reduct);
276277
auto double_new_data = data.sum();
277278
nn::Tensor new_data(1);
278279
new_data(0) = double_new_data;
279280
tape.push_grad(new_data.shape);
280-
return var(new_data, new_index);
281+
return Var(new_data, new_index);
281282
}
282283

283-
var conv1d(const var& x, const var& y){
284-
auto new_data= nn::conv1d(x.data, y.data);
285-
auto x_weight = nn::conv1d(nn::Tensor(x.data.shape,1), y.data);
286-
auto y_weight = nn::conv1d(x.data, nn::Tensor(y.data.shape,1));
284+
Var conv_1d(const Var& x, const Var& y){
285+
auto new_data = nn::conv_1d(x.data, y.data);
286+
auto x_weight = nn::conv_1d(nn::Tensor(x.data.shape, 1), y.data);
287+
auto y_weight = nn::conv_1d(x.data, nn::Tensor(y.data.shape, 1));
287288
auto new_index = tape.push_2(x.index, y.index, x_weight, y_weight, scalar);
288289
tape.push_grad(new_data.shape);
289-
return var(new_data, new_index);
290+
return Var(new_data, new_index);
290291
}
291292

292293
//var conv2d(const var& x, const var& weight, size_t stride, size_t kernel){

0 commit comments

Comments
 (0)