Skip to content

Commit 0c13a1e

Browse files
authored
Add files via upload
1 parent daf32de commit 0c13a1e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+1175
-0
lines changed

ec/ec.m

+93
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
clc
2+
3+
%% Network defintion
4+
layers = get_lenet();
5+
layers{1}.batch_size = 1;
6+
7+
% load the trained weights
8+
load lenet.mat
9+
10+
path = '../images/';
11+
% Read images from Images folder
12+
all_img = dir(path);
13+
for j=1:length(all_img)
14+
img_name = all_img(j).name;
15+
img_file = fullfile(path, img_name);
16+
try
17+
%Read Image
18+
img = imread(img_file);
19+
im = img(:,:,1);
20+
im=imresize(im, 3);
21+
22+
%Show image
23+
figure(1)
24+
imshow(im);
25+
title('Input image')
26+
27+
%Color to gray
28+
if size(im,3)==3 % RGB image
29+
im=rgb2gray(im);
30+
end
31+
32+
%Binarize image
33+
threshold = graythresh(im);
34+
im =~imbinarize(im,threshold);
35+
36+
%Remove all object containing fewer than 30 pixels
37+
im = bwareaopen(im,30);
38+
pause(3)
39+
40+
%Display binary image
41+
figure(2)
42+
imshow(~im);
43+
title('Input image after bounding box')
44+
45+
%Label connected components
46+
[L, Ne]=bwlabel(im);
47+
48+
%Measure properties of image regions
49+
propied=regionprops(L,'BoundingBox');
50+
hold off
51+
52+
%Plot Bounding Box
53+
for n=1:size(propied,1)
54+
rectangle('Position',propied(n).BoundingBox,'EdgeColor','r','LineWidth',1);
55+
end
56+
57+
pause (3)
58+
figure;
59+
test_pred = zeros(784, size(propied,1));
60+
%%Plot Bounded
61+
for n=1:size(propied,1)
62+
coord = propied(n).BoundingBox;
63+
subImage = imcrop(L, [coord(1), coord(2), coord(3), coord(4)]);
64+
subImage = imresize(subImage, [28, 28]);
65+
test_pred(:,n) = im2col(subImage, [28 28]);
66+
subplot(1,size(propied,1),n), imshow(subImage);
67+
sgtitle('Bounded resized (28x28) digits')
68+
end
69+
70+
%hold off
71+
pause (3)
72+
73+
74+
%% Testing the network
75+
num_digits = 10;
76+
prediction = zeros(num_digits, size(propied,1));
77+
for i=1:size(propied,1)
78+
im = test_pred(:,i);
79+
im = im(:);
80+
im = im./max(im);
81+
[output, P] = convnet_forward(params, layers, im);
82+
prediction(:, i) = P;
83+
end
84+
85+
[val, ind] = max(prediction);
86+
fprintf('\n\n'); disp(thisname); fprintf('output\n')
87+
disp(ind)
88+
catch
89+
end
90+
end
91+
92+
93+

images/image1.JPG

56.9 KB
Loading

images/image2.JPG

48.5 KB
Loading

images/image3.png

24 KB
Loading

images/image4.jpg

12.7 KB
Loading

matlab/col2im_conv.m

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
function im = col2im_conv(col, input, layer, h_out, w_out)
2+
h_in = input.height;
3+
w_in = input.width;
4+
c = input.channel;
5+
k = layer.k;
6+
pad = layer.pad;
7+
stride = layer.stride;
8+
9+
% im = col2im_conv_c(col, h_in, w_in, c, k, pad, stride, h_out, w_out);
10+
im = col2im_conv_matlab(col, input, layer, h_out, w_out);
11+
end

matlab/col2im_conv_matlab.m

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
function im = col2im_conv_matlab(col, input, layer, h_out, w_out)
2+
h_in = input.height;
3+
w_in = input.width;
4+
c = input.channel;
5+
k = layer.k;
6+
pad = layer.pad;
7+
stride = layer.stride;
8+
9+
im = zeros(h_in, w_in, c);
10+
assert(pad == 0, 'pad must be 0');
11+
% im = padarray(im, [pad, pad], 0);
12+
col = reshape(col, [k*k*c, h_out*w_out]);
13+
for h = 1:h_out
14+
for w = 1:w_out
15+
im((h-1)*stride + 1 : (h-1)*stride + k, (w-1)*stride + 1 : (w-1)*stride + k, :) ...
16+
= im((h-1)*stride + 1 : (h-1)*stride + k, (w-1)*stride + 1 : (w-1)*stride + k, :) + ...
17+
reshape(col(:, h + (w-1)*h_out), [k, k, c]);
18+
end
19+
end
20+
im = im(pad+1:end-pad, pad+1:end-pad, :);
21+
end

matlab/conv_layer_backward.m

+40
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
function [param_grad, input_od] = conv_layer_backward(output, input, layer, param)
2+
% conv layer backward
3+
h_in = input.height;
4+
w_in = input.width;
5+
c = input.channel;
6+
batch_size = input.batch_size;
7+
k = layer.k;
8+
group = layer.group;
9+
num = layer.num;
10+
11+
h_out = output.height;
12+
w_out = output.width;
13+
input_n.height = h_in;
14+
input_n.width = w_in;
15+
input_n.channel = c;
16+
input_od = zeros(size(input.data));
17+
param_grad.b = zeros(size(param.b));
18+
param_grad.w = zeros(size(param.w));
19+
20+
for n = 1:batch_size
21+
input_n.data = input.data(:, n);
22+
col = im2col_conv(input_n, layer, h_out, w_out);
23+
col = reshape(col, k*k*c, h_out*w_out);
24+
col_diff = zeros(size(col));
25+
temp_data_diff = reshape(output.diff(:, n), [h_out*w_out, num]);
26+
for g = 1:group
27+
g_c_idx = (g-1)*k*k*c/group + 1: g*k*k*c/group;
28+
g_num_idx = (g-1)*num/group + 1 : g*num/group;
29+
col_g = col(g_c_idx, :);
30+
weight = param.w(:, g_num_idx);
31+
% get the gradient of param
32+
param_grad.b(:, g_num_idx) = param_grad.b(:, g_num_idx) + sum(temp_data_diff(:, g_num_idx));
33+
param_grad.w(:, g_num_idx) = param_grad.w(:, g_num_idx) + col_g*temp_data_diff(:, g_num_idx);
34+
col_diff(g_c_idx, :) = weight*temp_data_diff(:, g_num_idx)';
35+
end
36+
im = col2im_conv(col_diff(:), input, layer, h_out, w_out);
37+
% set the gradient w.r.t to input.data
38+
input_od(:, n) = im(:);
39+
end
40+
end

matlab/conv_layer_forward.m

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
function [output] = conv_layer_forward(input, layer, param)
2+
% Conv layer forward
3+
% input: struct with input data
4+
% layer: convolution layer struct
5+
% param: weights for the convolution layer
6+
7+
% output:
8+
9+
h_in = input.height;
10+
w_in = input.width;
11+
c = input.channel;
12+
batch_size = input.batch_size;
13+
k = layer.k;
14+
pad = layer.pad;
15+
stride = layer.stride;
16+
num = layer.num;
17+
% resolve output shape
18+
h_out = (h_in + 2*pad - k) / stride + 1;
19+
w_out = (w_in + 2*pad - k) / stride + 1;
20+
21+
assert(h_out == floor(h_out), 'h_out is not integer')
22+
assert(w_out == floor(w_out), 'w_out is not integer')
23+
input_n.height = h_in;
24+
input_n.width = w_in;
25+
input_n.channel = c;
26+
group = 1;
27+
for n = 1:batch_size
28+
input_n.data = input.data(:, n);
29+
col = im2col_conv(input_n, layer, h_out, w_out);
30+
col = reshape(col, k*k*c, h_out*w_out);
31+
for g = 1:group
32+
col_g = col((g-1)*k*k*c/group + 1: g*k*k*c/group, :);
33+
weigth = param.w(:, (g-1)*num/group + 1 : g*num/group);
34+
b = param.b(:, (g-1)*num/group + 1 : g*num/group);
35+
tempoutput(:, (g-1)*num/group + 1 : g*num/group) = bsxfun(@plus, col_g'*weigth, b);
36+
end
37+
output.data(:, n) = tempoutput(:);
38+
clear tempoutput;
39+
end
40+
41+
output.height = h_out;
42+
output.width = w_out;
43+
output.channel = num;
44+
output.batch_size = batch_size;
45+
46+
end
47+

matlab/conv_net.m

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
function [cp, param_grad] = conv_net(params, layers, data, labels)
2+
3+
l = length(layers);
4+
batch_size = layers{1}.batch_size;
5+
%% Forward pass
6+
output = convnet_forward(params, layers, data);
7+
8+
%% Loss layer
9+
i = l;
10+
assert(strcmp(layers{i}.type, 'LOSS') == 1, 'last layer must be loss layer');
11+
12+
wb = [params{i-1}.w(:); params{i-1}.b(:)];
13+
[cost, grad, input_od, percent] = mlrloss(wb, output{i-1}.data, labels, layers{i}.num, 0, 1);
14+
15+
%% Back prop
16+
if nargout >= 2
17+
param_grad{i-1}.w = reshape(grad(1:length(params{i-1}.w(:))), size(params{i-1}.w));
18+
param_grad{i-1}.b = reshape(grad(end - length(params{i-1}.b(:)) + 1 : end), size(params{i-1}.b));
19+
param_grad{i-1}.w = param_grad{i-1}.w / batch_size;
20+
param_grad{i-1}.b = param_grad{i-1}.b /batch_size;
21+
end
22+
23+
cp.cost = cost/batch_size;
24+
cp.percent = percent;
25+
26+
if nargout >= 2
27+
for i = l-1:-1:2
28+
switch layers{i}.type
29+
case 'CONV'
30+
output{i}.diff = input_od;
31+
[param_grad{i-1}, input_od] = conv_layer_backward(output{i}, output{i-1}, layers{i}, params{i-1});
32+
case 'POOLING'
33+
output{i}.diff = input_od;
34+
[input_od] = pooling_layer_backward(output{i}, output{i-1}, layers{i});
35+
param_grad{i-1}.w = [];
36+
param_grad{i-1}.b = [];
37+
case 'IP'
38+
output{i}.diff = input_od;
39+
[param_grad{i-1}, input_od] = inner_product_backward(output{i}, output{i-1}, layers{i}, params{i-1});
40+
case 'RELU'
41+
output{i}.diff = input_od;
42+
[input_od] = relu_backward(output{i}, output{i-1}, layers{i});
43+
param_grad{i-1}.w = [];
44+
param_grad{i-1}.b = [];
45+
case 'ELU'
46+
output{i}.diff = input_od;
47+
[input_od] = elu_backward(output{i}, output{i-1}, layers{i});
48+
param_grad{i-1}.w = [];
49+
param_grad{i-1}.b = [];
50+
end
51+
param_grad{i-1}.w = param_grad{i-1}.w / batch_size;
52+
param_grad{i-1}.b = param_grad{i-1}.b / batch_size;
53+
end
54+
end
55+
end

matlab/convnet_forward.m

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
function [output, P] = convnet_forward(params, layers, data)
2+
l = length(layers);
3+
%batch_size = layers{1}.batch_size;
4+
assert(strcmp(layers{1}.type, 'DATA') == 1, 'first layer must be data layer');
5+
output{1}.data = data;
6+
output{1}.height = layers{1}.height;
7+
output{1}.width = layers{1}.width;
8+
output{1}.channel = layers{1}.channel;
9+
output{1}.batch_size = layers{1}.batch_size;
10+
output{1}.diff = 0;
11+
for i = 2:l-1
12+
switch layers{i}.type
13+
case 'CONV'
14+
output{i} = conv_layer_forward(output{i-1}, layers{i}, params{i-1});
15+
case 'POOLING'
16+
output{i} = pooling_layer_forward(output{i-1}, layers{i});
17+
case 'IP'
18+
output{i} = inner_product_forward(output{i-1}, layers{i}, params{i-1});
19+
case 'RELU'
20+
output{i} = relu_forward(output{i-1});
21+
case 'ELU'
22+
output{i} = elu_forward(output{i-1}, layers{i});
23+
end
24+
end
25+
if nargout > 1
26+
W = bsxfun(@plus, params{l-1}.w * output{l-1}.data, params{l-1}.b);
27+
W = [W; zeros(1, size(W, 2))];
28+
W=bsxfun(@minus, W, max(W));
29+
W=exp(W);
30+
31+
% Convert to Probabilities by normalizing
32+
P=bsxfun(@rdivide, W, sum(W));
33+
end
34+
end

matlab/get_lenet.m

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
function layers = get_lenet()
2+
3+
layers{1}.type = 'DATA';
4+
layers{1}.height = 28;
5+
layers{1}.width = 28;
6+
layers{1}.channel = 1;
7+
layers{1}.batch_size = 100;
8+
9+
layers{2}.type = 'CONV';
10+
layers{2}.num = 20;
11+
layers{2}.k = 5;
12+
layers{2}.stride = 1;
13+
layers{2}.pad = 0;
14+
layers{2}.group = 1;
15+
16+
layers{3}.type = 'RELU';
17+
18+
layers{4}.type = 'POOLING';
19+
layers{4}.k = 2;
20+
layers{4}.stride = 2;
21+
layers{4}.pad = 0;
22+
23+
24+
layers{5}.type = 'CONV';
25+
layers{5}.k = 5;
26+
layers{5}.stride = 1;
27+
layers{5}.pad = 0;
28+
layers{5}.group = 1;
29+
layers{5}.num = 50;
30+
31+
layers{6}.type = 'RELU';
32+
33+
layers{7}.type = 'POOLING';
34+
layers{7}.k = 2;
35+
layers{7}.stride = 2;
36+
layers{7}.pad = 0;
37+
38+
layers{8}.type = 'IP';
39+
layers{8}.num = 500;
40+
layers{8}.init_type = 'uniform';
41+
42+
layers{9}.type = 'RELU';
43+
44+
layers{10}.type = 'LOSS';
45+
layers{10}.num = 10;
46+
47+
end

matlab/get_lr.m

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
function lr_t = get_lr(iter, epsilon, gamma, power)
2+
% get the learning rate at step iter
3+
4+
lr_t = epsilon / (1 + gamma * iter)^power;
5+
end

matlab/im2col_conv.m

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
function col = im2col_conv(input_n, layer, h_out, w_out)
2+
% h_in = input_n.height;
3+
% w_in = input_n.width;
4+
% c = input_n.channel;
5+
% k = layer.k;
6+
% pad = layer.pad;
7+
% stride = layer.stride;
8+
9+
% col = im2col_conv_c(input_n.data, h_in, w_in, c, k, pad, stride, h_out, w_out);
10+
% col = im2col_conv_c(input_n.data, input_n.height, input_n.width, input_n.channel,...
11+
% layer.k, layer.pad, layer.stride, h_out, w_out);
12+
col = im2col_conv_matlab(input_n, layer, h_out, w_out);
13+
end

matlab/im2col_conv_matlab.m

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
function col = im2col_conv_matlab(input_n, layer, h_out, w_out)
2+
h_in = input_n.height;
3+
w_in = input_n.width;
4+
c = input_n.channel;
5+
k = layer.k;
6+
pad = layer.pad;
7+
stride = layer.stride;
8+
9+
im = reshape(input_n.data, [h_in, w_in, c]);
10+
im = padarray(im, [pad, pad], 0);
11+
col = zeros(k*k*c, h_out*w_out);
12+
for h = 1:h_out
13+
for w = 1:w_out
14+
matrix_hw = im((h-1)*stride + 1 : (h-1)*stride + k, (w-1)*stride + 1 : (w-1)*stride + k, :);
15+
col(:, h + (w-1)*h_out) = matrix_hw(:);
16+
end
17+
end
18+
col = col(:);
19+
end

0 commit comments

Comments
 (0)