diff --git a/README.md b/README.md
index 0eb977f..e7a8cca 100644
--- a/README.md
+++ b/README.md
@@ -49,30 +49,30 @@ This submission consists of various methods for video stitching from multi-camer
- `PositioningSystem.py` - Define the function for point transformation and box transformation based on the `trans_params.json` files. It transforms the local coordinates of individual camera to the global coordinates.
- `PositioningSystem_Test.py` - Test the positoning system of three farms by visualizing the panorama results of position transformation from each camera
-
+----
## Usage
### Feature Extraction
-- **Image Feature Extraction Test**: Extract different kinds of features from the input image and visualize the result
-```bash
- $ python feature_extraction_test.py
-```
+- **Image Feature Extraction Test**: Extract various features (SIFT, BRISK, ORB) from input images and visualize results
+ ```bash
+ $ ./main.sh -t feature_extraction
+ ```
![](result/ROIs.png)
diff --git a/main.sh b/main.sh
new file mode 100755
index 0000000..420ebe8
--- /dev/null
+++ b/main.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+usage() {
+ echo "Usage: ${0} [-t|--type] [-img1] [-img2]]" 1>&2
+ exit 1
+}
+
+ROT=0
+while [[ $# -gt 0 ]];do
+ key=${1}
+ case ${key} in
+ -t|--type)
+ TYPE=${2}
+ echo "TYPE : $TYPE"
+ shift 2
+ ;;
+ -img1)
+ IMG1=${2}
+ echo "IMAGE 1 : $IMG1"
+ shift 2
+ ;;
+ -img2)
+ IMG2=${2}
+ echo "IMAGE 2 : $IMG2"
+ shift 2
+ ;;
+ *)
+ usage
+ shift
+ ;;
+ esac
+done
+
+if [[ $TYPE == "feature_extraction" ]];
+then
+ python3 src/feature_extraction_test.py --img $IMG1 #--rotate
+elif [[ $TYPE == "feature_matching" ]];
+then
+ python3 src/feature_matching_test.py --img1 $IMG1 --img2 $IMG2 #--rotate
+fi
\ No newline at end of file
diff --git a/src/__init__.py b/src/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/feature_extraction_test.py b/src/feature_extraction_test.py
similarity index 86%
rename from feature_extraction_test.py
rename to src/feature_extraction_test.py
index 5cd0aa9..663dc22 100644
--- a/feature_extraction_test.py
+++ b/src/feature_extraction_test.py
@@ -1,18 +1,32 @@
+import os
+import sys
+sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
+
+import argparse
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from stitch import utils, Image
-
if __name__ == '__main__':
- '''This script is used for feature extraction from each image'''
+ """
+ This script is used for feature extraction testing.
+ """
+ # Define parser arguments
+ parser = argparse.ArgumentParser(description="Image Stitching")
+ parser.add_argument("--img", type=str)
+ parser.add_argument("--rotate", action="store_true" , help="Rotate the image to get better visualization")
+ args, _ = parser.parse_known_args()
+
# Load exmaple image
- img = cv.imread("dataset/example_image/APAP-railtracks/1.JPG")
+ img = cv.imread(args.img)
+ # img = cv.imread("dataset/example_image/APAP-railtracks/1.JPG")
# img = cv.imread("dataset/Arie/lamp_01_Arie.PNG")
# img = cv.imread("dataset/example_image/park/1.jpg")
- # img = np.rot90(img,1) # Rotate the image to get better visualization
+ if bool(args.rotate):
+ img = np.rot90(img,1) # Rotate the image to get better visualization
Img = Image(img)
diff --git a/feature_matching_test.py b/src/feature_matching_test.py
similarity index 82%
rename from feature_matching_test.py
rename to src/feature_matching_test.py
index 3039624..19fc425 100644
--- a/feature_matching_test.py
+++ b/src/feature_matching_test.py
@@ -1,28 +1,42 @@
-import matplotlib.pyplot as plt
+import os
+import sys
+sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
+
+import argparse
+import itertools
import numpy as np
import cv2 as cv
-import itertools
+import matplotlib.pyplot as plt
from stitch import Image, utils
if __name__ == '__main__':
'''This script will be tested for feature matching'''
+ # Define parser arguments
+ parser = argparse.ArgumentParser(description="Image Stitching")
+ parser.add_argument("--img1", type=str)
+ parser.add_argument("--img2", type=str)
+ parser.add_argument("--rotate", action="store_true", help="Rotate the image to get better visualization")
+ args, _ = parser.parse_known_args()
draw_params = dict(matchColor = (0,255,0),
singlePointColor = (255,0,0),
flags = cv.DrawMatchesFlags_DEFAULT)
# load the matching images
- #img1 = cv.imread("dataset/Arie/lamp_02_Arie.PNG")
- #img2 = cv.imread("dataset/Arie/lamp_01_Arie.PNG")
- img1 = cv.imread("dataset/example_image/APAP-railtracks/1.JPG")
- img2 = cv.imread("dataset/example_image/APAP-railtracks/2.JPG")
+ img1 = cv.imread(args.img1)
+ img2 = cv.imread(args.img2)
+ # img1 = cv.imread("dataset/Arie/lamp_02_Arie.PNG")
+ # img2 = cv.imread("dataset/Arie/lamp_01_Arie.PNG")
+ #img1 = cv.imread("dataset/example_image/APAP-railtracks/1.JPG")
+ #img2 = cv.imread("dataset/example_image/APAP-railtracks/2.JPG")
#img1 = cv.imread("dataset/example_image/NISwGSP-denny/denny02.jpg")
#img2 = cv.imread("dataset/example_image/NISwGSP-denny/denny03.jpg")
- #img1 = np.rot90(img1,1)
- #img2 = np.rot90(img2,1)
+ if args.rotate:
+ img1 = np.rot90(img1,1)
+ img2 = np.rot90(img2,1)
# Initialize the Stitch Class
Img1 = Image(img1)
@@ -39,9 +53,8 @@
matches_sift_knn = utils.featureMatch(des1_filter, des2_filter, 'sift', knn=True)
- img_sift = cv.drawMatches(Img1.img,kps1_filter,Img2.img,kps2_filter,matches_sift[:300],None,**draw_params)
- img_sift_knn = cv.drawMatches(Img1.img,kps1_filter,Img2.img,kps2_filter,matches_sift_knn[:300],None,**draw_params)
-
+ img_sift = cv.drawMatches(Img1.img, kps1_filter, Img2.img, kps2_filter, matches_sift[:300],None,**draw_params)
+ img_sift_knn = cv.drawMatches(Img1.img, kps1_filter, Img2.img, kps2_filter, matches_sift_knn[:300],None,**draw_params)
'''BRISK Features Matching Comparison'''
# Extract the BRISK features
diff --git a/stitch/ImageStitch.py b/stitch/ImageStitch.py
index eff9e93..64bc291 100644
--- a/stitch/ImageStitch.py
+++ b/stitch/ImageStitch.py
@@ -31,7 +31,7 @@ def __init__(self, img, nfeatures=0, method = 'sift'):
# Extract the features from image and update the feature property
kps, des = self.findFeatures(method)
- print("\nInitial Successfullys")
+ print("\nInitial Successfully!")
def equalizeHist(self):
self.img = utils.equalizeHist(self.img)