Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
Daneshpajouh authored Sep 13, 2022
1 parent daf32de commit 0c13a1e
Show file tree
Hide file tree
Showing 42 changed files with 1,175 additions and 0 deletions.
93 changes: 93 additions & 0 deletions ec/ec.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
clc

%% Network defintion
layers = get_lenet();
layers{1}.batch_size = 1;

% load the trained weights
load lenet.mat

path = '../images/';
% Read images from Images folder
all_img = dir(path);
for j=1:length(all_img)
img_name = all_img(j).name;
img_file = fullfile(path, img_name);
try
%Read Image
img = imread(img_file);
im = img(:,:,1);
im=imresize(im, 3);

%Show image
figure(1)
imshow(im);
title('Input image')

%Color to gray
if size(im,3)==3 % RGB image
im=rgb2gray(im);
end

%Binarize image
threshold = graythresh(im);
im =~imbinarize(im,threshold);

%Remove all object containing fewer than 30 pixels
im = bwareaopen(im,30);
pause(3)

%Display binary image
figure(2)
imshow(~im);
title('Input image after bounding box')

%Label connected components
[L, Ne]=bwlabel(im);

%Measure properties of image regions
propied=regionprops(L,'BoundingBox');
hold off

%Plot Bounding Box
for n=1:size(propied,1)
rectangle('Position',propied(n).BoundingBox,'EdgeColor','r','LineWidth',1);
end

pause (3)
figure;
test_pred = zeros(784, size(propied,1));
%%Plot Bounded
for n=1:size(propied,1)
coord = propied(n).BoundingBox;
subImage = imcrop(L, [coord(1), coord(2), coord(3), coord(4)]);
subImage = imresize(subImage, [28, 28]);
test_pred(:,n) = im2col(subImage, [28 28]);
subplot(1,size(propied,1),n), imshow(subImage);
sgtitle('Bounded resized (28x28) digits')
end

%hold off
pause (3)


%% Testing the network
num_digits = 10;
prediction = zeros(num_digits, size(propied,1));
for i=1:size(propied,1)
im = test_pred(:,i);
im = im(:);
im = im./max(im);
[output, P] = convnet_forward(params, layers, im);
prediction(:, i) = P;
end

[val, ind] = max(prediction);
fprintf('\n\n'); disp(thisname); fprintf('output\n')
disp(ind)
catch
end
end



Binary file added images/image1.JPG
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/image2.JPG
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/image3.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/image4.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
11 changes: 11 additions & 0 deletions matlab/col2im_conv.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
function im = col2im_conv(col, input, layer, h_out, w_out)
h_in = input.height;
w_in = input.width;
c = input.channel;
k = layer.k;
pad = layer.pad;
stride = layer.stride;

% im = col2im_conv_c(col, h_in, w_in, c, k, pad, stride, h_out, w_out);
im = col2im_conv_matlab(col, input, layer, h_out, w_out);
end
21 changes: 21 additions & 0 deletions matlab/col2im_conv_matlab.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
function im = col2im_conv_matlab(col, input, layer, h_out, w_out)
h_in = input.height;
w_in = input.width;
c = input.channel;
k = layer.k;
pad = layer.pad;
stride = layer.stride;

im = zeros(h_in, w_in, c);
assert(pad == 0, 'pad must be 0');
% im = padarray(im, [pad, pad], 0);
col = reshape(col, [k*k*c, h_out*w_out]);
for h = 1:h_out
for w = 1:w_out
im((h-1)*stride + 1 : (h-1)*stride + k, (w-1)*stride + 1 : (w-1)*stride + k, :) ...
= im((h-1)*stride + 1 : (h-1)*stride + k, (w-1)*stride + 1 : (w-1)*stride + k, :) + ...
reshape(col(:, h + (w-1)*h_out), [k, k, c]);
end
end
im = im(pad+1:end-pad, pad+1:end-pad, :);
end
40 changes: 40 additions & 0 deletions matlab/conv_layer_backward.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
function [param_grad, input_od] = conv_layer_backward(output, input, layer, param)
% conv layer backward
h_in = input.height;
w_in = input.width;
c = input.channel;
batch_size = input.batch_size;
k = layer.k;
group = layer.group;
num = layer.num;

h_out = output.height;
w_out = output.width;
input_n.height = h_in;
input_n.width = w_in;
input_n.channel = c;
input_od = zeros(size(input.data));
param_grad.b = zeros(size(param.b));
param_grad.w = zeros(size(param.w));

for n = 1:batch_size
input_n.data = input.data(:, n);
col = im2col_conv(input_n, layer, h_out, w_out);
col = reshape(col, k*k*c, h_out*w_out);
col_diff = zeros(size(col));
temp_data_diff = reshape(output.diff(:, n), [h_out*w_out, num]);
for g = 1:group
g_c_idx = (g-1)*k*k*c/group + 1: g*k*k*c/group;
g_num_idx = (g-1)*num/group + 1 : g*num/group;
col_g = col(g_c_idx, :);
weight = param.w(:, g_num_idx);
% get the gradient of param
param_grad.b(:, g_num_idx) = param_grad.b(:, g_num_idx) + sum(temp_data_diff(:, g_num_idx));
param_grad.w(:, g_num_idx) = param_grad.w(:, g_num_idx) + col_g*temp_data_diff(:, g_num_idx);
col_diff(g_c_idx, :) = weight*temp_data_diff(:, g_num_idx)';
end
im = col2im_conv(col_diff(:), input, layer, h_out, w_out);
% set the gradient w.r.t to input.data
input_od(:, n) = im(:);
end
end
47 changes: 47 additions & 0 deletions matlab/conv_layer_forward.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
function [output] = conv_layer_forward(input, layer, param)
% Conv layer forward
% input: struct with input data
% layer: convolution layer struct
% param: weights for the convolution layer

% output:

h_in = input.height;
w_in = input.width;
c = input.channel;
batch_size = input.batch_size;
k = layer.k;
pad = layer.pad;
stride = layer.stride;
num = layer.num;
% resolve output shape
h_out = (h_in + 2*pad - k) / stride + 1;
w_out = (w_in + 2*pad - k) / stride + 1;

assert(h_out == floor(h_out), 'h_out is not integer')
assert(w_out == floor(w_out), 'w_out is not integer')
input_n.height = h_in;
input_n.width = w_in;
input_n.channel = c;
group = 1;
for n = 1:batch_size
input_n.data = input.data(:, n);
col = im2col_conv(input_n, layer, h_out, w_out);
col = reshape(col, k*k*c, h_out*w_out);
for g = 1:group
col_g = col((g-1)*k*k*c/group + 1: g*k*k*c/group, :);
weigth = param.w(:, (g-1)*num/group + 1 : g*num/group);
b = param.b(:, (g-1)*num/group + 1 : g*num/group);
tempoutput(:, (g-1)*num/group + 1 : g*num/group) = bsxfun(@plus, col_g'*weigth, b);
end
output.data(:, n) = tempoutput(:);
clear tempoutput;
end

output.height = h_out;
output.width = w_out;
output.channel = num;
output.batch_size = batch_size;

end

55 changes: 55 additions & 0 deletions matlab/conv_net.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
function [cp, param_grad] = conv_net(params, layers, data, labels)

l = length(layers);
batch_size = layers{1}.batch_size;
%% Forward pass
output = convnet_forward(params, layers, data);

%% Loss layer
i = l;
assert(strcmp(layers{i}.type, 'LOSS') == 1, 'last layer must be loss layer');

wb = [params{i-1}.w(:); params{i-1}.b(:)];
[cost, grad, input_od, percent] = mlrloss(wb, output{i-1}.data, labels, layers{i}.num, 0, 1);

%% Back prop
if nargout >= 2
param_grad{i-1}.w = reshape(grad(1:length(params{i-1}.w(:))), size(params{i-1}.w));
param_grad{i-1}.b = reshape(grad(end - length(params{i-1}.b(:)) + 1 : end), size(params{i-1}.b));
param_grad{i-1}.w = param_grad{i-1}.w / batch_size;
param_grad{i-1}.b = param_grad{i-1}.b /batch_size;
end

cp.cost = cost/batch_size;
cp.percent = percent;

if nargout >= 2
for i = l-1:-1:2
switch layers{i}.type
case 'CONV'
output{i}.diff = input_od;
[param_grad{i-1}, input_od] = conv_layer_backward(output{i}, output{i-1}, layers{i}, params{i-1});
case 'POOLING'
output{i}.diff = input_od;
[input_od] = pooling_layer_backward(output{i}, output{i-1}, layers{i});
param_grad{i-1}.w = [];
param_grad{i-1}.b = [];
case 'IP'
output{i}.diff = input_od;
[param_grad{i-1}, input_od] = inner_product_backward(output{i}, output{i-1}, layers{i}, params{i-1});
case 'RELU'
output{i}.diff = input_od;
[input_od] = relu_backward(output{i}, output{i-1}, layers{i});
param_grad{i-1}.w = [];
param_grad{i-1}.b = [];
case 'ELU'
output{i}.diff = input_od;
[input_od] = elu_backward(output{i}, output{i-1}, layers{i});
param_grad{i-1}.w = [];
param_grad{i-1}.b = [];
end
param_grad{i-1}.w = param_grad{i-1}.w / batch_size;
param_grad{i-1}.b = param_grad{i-1}.b / batch_size;
end
end
end
34 changes: 34 additions & 0 deletions matlab/convnet_forward.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
function [output, P] = convnet_forward(params, layers, data)
l = length(layers);
%batch_size = layers{1}.batch_size;
assert(strcmp(layers{1}.type, 'DATA') == 1, 'first layer must be data layer');
output{1}.data = data;
output{1}.height = layers{1}.height;
output{1}.width = layers{1}.width;
output{1}.channel = layers{1}.channel;
output{1}.batch_size = layers{1}.batch_size;
output{1}.diff = 0;
for i = 2:l-1
switch layers{i}.type
case 'CONV'
output{i} = conv_layer_forward(output{i-1}, layers{i}, params{i-1});
case 'POOLING'
output{i} = pooling_layer_forward(output{i-1}, layers{i});
case 'IP'
output{i} = inner_product_forward(output{i-1}, layers{i}, params{i-1});
case 'RELU'
output{i} = relu_forward(output{i-1});
case 'ELU'
output{i} = elu_forward(output{i-1}, layers{i});
end
end
if nargout > 1
W = bsxfun(@plus, params{l-1}.w * output{l-1}.data, params{l-1}.b);
W = [W; zeros(1, size(W, 2))];
W=bsxfun(@minus, W, max(W));
W=exp(W);

% Convert to Probabilities by normalizing
P=bsxfun(@rdivide, W, sum(W));
end
end
47 changes: 47 additions & 0 deletions matlab/get_lenet.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
function layers = get_lenet()

layers{1}.type = 'DATA';
layers{1}.height = 28;
layers{1}.width = 28;
layers{1}.channel = 1;
layers{1}.batch_size = 100;

layers{2}.type = 'CONV';
layers{2}.num = 20;
layers{2}.k = 5;
layers{2}.stride = 1;
layers{2}.pad = 0;
layers{2}.group = 1;

layers{3}.type = 'RELU';

layers{4}.type = 'POOLING';
layers{4}.k = 2;
layers{4}.stride = 2;
layers{4}.pad = 0;


layers{5}.type = 'CONV';
layers{5}.k = 5;
layers{5}.stride = 1;
layers{5}.pad = 0;
layers{5}.group = 1;
layers{5}.num = 50;

layers{6}.type = 'RELU';

layers{7}.type = 'POOLING';
layers{7}.k = 2;
layers{7}.stride = 2;
layers{7}.pad = 0;

layers{8}.type = 'IP';
layers{8}.num = 500;
layers{8}.init_type = 'uniform';

layers{9}.type = 'RELU';

layers{10}.type = 'LOSS';
layers{10}.num = 10;

end
5 changes: 5 additions & 0 deletions matlab/get_lr.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
function lr_t = get_lr(iter, epsilon, gamma, power)
% get the learning rate at step iter

lr_t = epsilon / (1 + gamma * iter)^power;
end
13 changes: 13 additions & 0 deletions matlab/im2col_conv.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
function col = im2col_conv(input_n, layer, h_out, w_out)
% h_in = input_n.height;
% w_in = input_n.width;
% c = input_n.channel;
% k = layer.k;
% pad = layer.pad;
% stride = layer.stride;

% col = im2col_conv_c(input_n.data, h_in, w_in, c, k, pad, stride, h_out, w_out);
% col = im2col_conv_c(input_n.data, input_n.height, input_n.width, input_n.channel,...
% layer.k, layer.pad, layer.stride, h_out, w_out);
col = im2col_conv_matlab(input_n, layer, h_out, w_out);
end
19 changes: 19 additions & 0 deletions matlab/im2col_conv_matlab.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
function col = im2col_conv_matlab(input_n, layer, h_out, w_out)
h_in = input_n.height;
w_in = input_n.width;
c = input_n.channel;
k = layer.k;
pad = layer.pad;
stride = layer.stride;

im = reshape(input_n.data, [h_in, w_in, c]);
im = padarray(im, [pad, pad], 0);
col = zeros(k*k*c, h_out*w_out);
for h = 1:h_out
for w = 1:w_out
matrix_hw = im((h-1)*stride + 1 : (h-1)*stride + k, (w-1)*stride + 1 : (w-1)*stride + k, :);
col(:, h + (w-1)*h_out) = matrix_hw(:);
end
end
col = col(:);
end
Loading

0 comments on commit 0c13a1e

Please sign in to comment.