Skip to content

Commit

Permalink
Adaptations to pyslam project
Browse files Browse the repository at this point in the history
  • Loading branch information
luigifreda committed Jul 20, 2024
1 parent 06e1174 commit 604fabb
Show file tree
Hide file tree
Showing 10 changed files with 171 additions and 94 deletions.
12 changes: 6 additions & 6 deletions config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -75,13 +75,13 @@ groundtruth_file=auto
[VIDEO_DATASET]
type=video
;
base_path=./videos/kitti00
cam_settings=settings/KITTI00-02.yaml
name=video.mp4
;base_path=./videos/kitti00
;cam_settings=settings/KITTI00-02.yaml
;name=video.mp4
;
; base_path=./videos/kitti06
; cam_settings=settings/KITTI04-12.yaml
; name=video_color.mp4
base_path=./videos/kitti06
cam_settings=settings/KITTI04-12.yaml
name=video_color.mp4
;
;base_path=./videos/webcam
;cam_settings=settings/WEBCAM.yaml
Expand Down
2 changes: 1 addition & 1 deletion dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ def get_current_frame(self):
class KittiDataset(Dataset):
def __init__(self, path, name, associations=None, type=DatasetType.KITTI):
super().__init__(path, name, 10, associations, type)
self.fps = 20
self.fps = 10
self.image_left_path = '/image_0/'
self.image_right_path = '/image_1/'
self.timestamps = np.loadtxt(self.path + '/sequences/' + self.name + '/times.txt')
Expand Down
57 changes: 29 additions & 28 deletions feature_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
KeyNetDescFeature2D = import_from('feature_keynet', 'KeyNetDescFeature2D')
DiskFeature2D = import_from('feature_disk', 'DiskFeature2D')

kVerbose = False
kVerbose = True

kNumFeatureDefault = Parameters.kNumFeatures

Expand Down Expand Up @@ -320,17 +320,17 @@ def __init__(self, num_features=kNumFeatureDefault,
self.pyramid_type = PyramidType.GAUSS_PYRAMID
self.pyramid_do_parallel = False # N.B.: SUPERPOINT interface class is not thread-safe!
self.force_multiscale_detect_and_compute = True # force it since SUPERPOINT cannot compute descriptors separately from keypoints

#
#
elif self.detector_type == FeatureDetectorTypes.XFEAT:
self.oriented_features = False
self._feature_detector = XfeatFeature2D()
if self.descriptor_type != FeatureDescriptorTypes.NONE:
self.use_pyramid_adaptor = self.num_levels > 1
self.need_nms = self.num_levels > 1
self.pyramid_type = PyramidType.GAUSS_PYRAMID
self.pyramid_do_parallel = False # N.B.: SUPERPOINT interface class is not thread-safe!
self.force_multiscale_detect_and_compute = True # force it since SUPERPOINT cannot compute descriptors separately from keypoints
#
self.pyramid_do_parallel = False # N.B.: XFEAT interface class is not thread-safe!
self.force_multiscale_detect_and_compute = True # force it since XFEAT cannot compute descriptors separately from keypoints
#
#
elif self.detector_type == FeatureDetectorTypes.FAST:
Expand Down Expand Up @@ -562,11 +562,12 @@ def __init__(self, num_features=kNumFeatureDefault,
if self.detector_type != FeatureDetectorTypes.SUPERPOINT:
raise ValueError("You cannot use SUPERPOINT descriptor without SUPERPOINT detector!\nPlease, select SUPERPOINT as both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse the same SuperPointDector object

#
#
elif self.descriptor_type == FeatureDescriptorTypes.XFEAT:
if self.detector_type != FeatureDetectorTypes.XFEAT:
raise ValueError("You cannot use XFEAT descriptor without XFEAT detector!\nPlease, select XFEATas both descriptor and detector!")
self._feature_descriptor = self._feature_detector # reuse the same SuperPointDector object
self._feature_descriptor = self._feature_detector # reuse the same XFeat object
#
#
elif self.descriptor_type == FeatureDescriptorTypes.TFEAT:
Expand Down Expand Up @@ -905,17 +906,17 @@ def detect(self, frame, mask=None, filter=True):
# standard detection
kps = self._feature_detector.detect(frame, mask)
# filter keypoints
# filter_name = 'NONE'
# if filter:
# kps, _, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps)
# # if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors
# if self.do_keypoints_size_rescaling:
# self.rescale_keypoint_size(kps)
# if kDrawOriginalExtractedFeatures: # draw the original features
# imgDraw = cv2.drawKeypoints(frame, kps, None, color=(0,255,0), flags=0)
# cv2.imshow('detected keypoints',imgDraw)
# if kVerbose:
# print('detector:',self.detector_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']')
filter_name = 'NONE'
if filter:
kps, _, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps)
# if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors
if self.do_keypoints_size_rescaling:
self.rescale_keypoint_size(kps)
if kDrawOriginalExtractedFeatures: # draw the original features
imgDraw = cv2.drawKeypoints(frame, kps, None, color=(0,255,0), flags=0)
cv2.imshow('detected keypoints',imgDraw)
if kVerbose:
print('detector:',self.detector_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']')
return kps


Expand Down Expand Up @@ -974,16 +975,16 @@ def detectAndCompute(self, frame, mask=None, filter = True):
#print('detector: ', self.detector_type.name, ', #features: ', len(kps))
print('descriptor: ', self.descriptor_type.name, ', #features: ', len(kps))
# filter keypoints
# filter_name = 'NONE'
# if filter:
# kps, des, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps, des)
# if self.detector_type == FeatureDetectorTypes.SIFT or \
# self.detector_type == FeatureDetectorTypes.ROOT_SIFT or \
# self.detector_type == FeatureDetectorTypes.CONTEXTDESC :
# unpackSiftOctaveKps(kps, method=UnpackOctaveMethod.INTRAL_LAYERS)
# if kVerbose:
# print('detector:',self.detector_type.name,', descriptor:', self.descriptor_type.name,', #features:', len(kps),' (#ref:', self.num_features, '), [kp-filter:',filter_name,']')
# self.debug_print(kps)
filter_name = 'NONE'
if filter:
kps, des, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps, des)
if self.detector_type == FeatureDetectorTypes.SIFT or \
self.detector_type == FeatureDetectorTypes.ROOT_SIFT or \
self.detector_type == FeatureDetectorTypes.CONTEXTDESC :
unpackSiftOctaveKps(kps, method=UnpackOctaveMethod.INTRAL_LAYERS)
if kVerbose:
print('detector:',self.detector_type.name,', descriptor:', self.descriptor_type.name,', #features:', len(kps),' (#ref:', self.num_features, '), [kp-filter:',filter_name,']')
self.debug_print(kps)
return kps, des


Expand Down
2 changes: 1 addition & 1 deletion feature_manager_adaptors.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def initSigmaLevels(self):
self.scale_factors[0]=1.0

# compute desired number of features per level (by using the scale factor)
self.num_features_per_level = np.zeros(num_levels,dtype=np.int8)
self.num_features_per_level = np.zeros(num_levels,dtype=np.int)
num_desired_features_per_level = self.num_features*(1 - self.inv_scale_factor)/(1 - math.pow(self.inv_scale_factor, self.num_levels))
sum_num_features = 0
for level in range(self.num_levels-1):
Expand Down
123 changes: 103 additions & 20 deletions feature_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"""
import numpy as np
import cv2
import torch
from parameters import Parameters
from enum import Enum
from collections import defaultdict
Expand Down Expand Up @@ -62,10 +63,8 @@ def feature_matcher_factory(norm_type=cv2.NORM_HAMMING, cross_check=False, ratio
DMatch.imgIdx - Index of the train image.
"""

#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
matcher = LightGlue(features="superpoint",n_layers=2).eval().to('cuda')

# base class
import torch
class FeatureMatcher(object):
def __init__(self, norm_type=cv2.NORM_HAMMING, cross_check = False, ratio_test=kRatioTest, type = FeatureMatcherTypes.BF):
self.type = type
Expand All @@ -75,52 +74,59 @@ def __init__(self, norm_type=cv2.NORM_HAMMING, cross_check = False, ratio_test=k
self.ratio_test = ratio_test
self.matcher = None
self.matcher_name = ''
self.matcherLG = None
self.deviceLG = None
if self.type == FeatureMatcherTypes.LG:
if self.matcherLG is None:
self.matcherLG = LightGlue(features="superpoint",n_layers=2).eval().to('cuda')
if self.deviceLG is None:
self.deviceLG = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# input: des1 = queryDescriptors, des2= trainDescriptors
# output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively)
def match(self,frame, des1, des2,kps1 = None,kps2 = None, ratio_test=None):
def match(self, frame, des1, des2, kps1 = None,kps2 = None, ratio_test=None):
if kVerbose:
print(self.matcher_name,', norm ', self.norm_type)
print('des1.shape:',des1.shape,' des2.shape:',des2.shape)
#print('des1.shape:',des1.shape,' des2.shape:',des2.shape)
#print('des1.dtype:',des1.dtype,' des2.dtype:',des2.dtype)
#print(self.type)
if self.type == FeatureMatcherTypes.LG:
d1={
'keypoints': torch.tensor(kps1,device='cuda').unsqueeze(0),
'descriptors': torch.tensor(des2,device='cuda').unsqueeze(0),
'image_size': torch.tensor(frame.shape, device='cuda').unsqueeze(0)
}
}
d2={
'keypoints': torch.tensor(kps2,device='cuda').unsqueeze(0),
'descriptors': torch.tensor(des1,device='cuda').unsqueeze(0),
'image_size': torch.tensor(frame.shape, device='cuda').unsqueeze(0)
}
}

matches01 = matcher({"image0": d1, "image1": d2})
matches01 = self.matcherLG({"image0": d1, "image1": d2})
#print(matches01['matches'])
idx0 = matches01['matches'][0][:, 0].cpu().tolist()
idx1 = matches01['matches'][0][:, 1].cpu().tolist()
#print(des1.shape,len(idx0),len(idx1))
return idx1, idx0
# print(d1['keypoints'].shape, d1['descriptors'].shape, d1['image_size'].shape)
# print(d2['keypoints'].shape, d2['descriptors'].shape, d2['image_size'].shape)

if self.type == FeatureMatcherTypes.XFEAT:
elif self.type == FeatureMatcherTypes.XFEAT:
d1_tensor = torch.tensor(des1, dtype=torch.float32) # Specify dtype if needed
d2_tensor = torch.tensor(des2, dtype=torch.float32) # Specify dtype if needed
print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaa")
# If the original tensors were on a GPU, you should move the new tensors to GPU as well
# d1_tensor = d1_tensor.to('cuda') # Use 'cuda' or 'cuda:0' if your device is a GPU
# d2_tensor = d2_tensor.to('cuda')
idx0, idx1 = self.matcher.match(d1_tensor, d2_tensor, 0.93)
return idx0.cpu(), idx1.cpu()
matches = self.matcher.knnMatch(des1, des2, k=2) #knnMatch(queryDescriptors,trainDescriptors)
self.matches = matches
return self.goodMatches(matches, des1, des2, ratio_test)



else:
matches = self.matcher.knnMatch(des1, des2, k=2) #knnMatch(queryDescriptors,trainDescriptors)
self.matches = matches
return self.goodMatches(matches, des1, des2, ratio_test)

# input: des1 = query-descriptors, des2 = train-descriptors
# output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively)
# N.B.: this returns matches where each trainIdx index is associated to only one queryIdx index
def goodMatchesOneToOne(self, matches, des1, des2, ratio_test=None):
len_des2 = len(des2)
idx1, idx2 = [], []
Expand Down Expand Up @@ -149,15 +155,92 @@ def goodMatchesOneToOne(self, matches, des1, des2, ratio_test=None):
assert(idx2[index] == m.trainIdx)
idx1[index]=m.queryIdx
idx2[index]=m.trainIdx

return idx1, idx2



def goodMatches(self, matches, des1, des2, ratio_test=None):
#return self.goodMatchesSimple(matches, des1, des2, ratio_test) # <= N.B.: this generates problem in SLAM since it can produce matches where a trainIdx index is associated to two (or more) queryIdx indexes
return self.goodMatchesOneToOne(matches, des1, des2, ratio_test)

# input: des1 = query-descriptors, des2 = train-descriptors, kps1 = query-keypoints, kps2 = train-keypoints
# output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively)
# N.B.0: cross checking can be also enabled with the BruteForce Matcher below
# N.B.1: after matching there is a model fitting with fundamental matrix estimation
# N.B.2: fitting a fundamental matrix has problems in the following cases: [see Hartley/Zisserman Book]
# - 'geometrical degenerate correspondences', e.g. all the observed features lie on a plane (the correct model for the correspondences is an homography) or lie a ruled quadric
# - degenerate motions such a pure rotation (a sufficient parallax is required) or an infinitesimal viewpoint change (where the translation is almost zero)
# N.B.3: as reported above, in case of pure rotation, this algorithm will compute a useless fundamental matrix which cannot be decomposed to return a correct rotation
# Adapted from https://github.com/lzx551402/geodesc/blob/master/utils/opencvhelper.py
def matchWithCrossCheckAndModelFit(self, des1, des2, kps1, kps2, ratio_test=None, cross_check=True, err_thld=1, info=''):
"""Compute putative and inlier matches.
Args:
feat: (n_kpts, 128) Local features.
cv_kpts: A list of keypoints represented as cv2.KeyPoint.
ratio_test: The threshold to apply ratio test.
cross_check: (True by default) Whether to apply cross check.
err_thld: Epipolar error threshold.
info: Info to print out.
Returns:
good_matches: Putative matches.
mask: The mask to distinguish inliers/outliers on putative matches.
"""
idx1, idx2 = [], []
if ratio_test is None:
ratio_test = self.ratio_test

init_matches1 = self.matcher.knnMatch(des1, des2, k=2)
init_matches2 = self.matcher.knnMatch(des2, des1, k=2)

good_matches = []

for i,(m1,n1) in enumerate(init_matches1):
cond = True
if cross_check:
cond1 = cross_check and init_matches2[m1.trainIdx][0].trainIdx == i
cond *= cond1
if ratio_test is not None:
cond2 = m1.distance <= ratio_test * n1.distance
cond *= cond2
if cond:
good_matches.append(m1)
idx1.append(m1.queryIdx)
idx2.append(m1.trainIdx)

if type(kps1) is list and type(kps2) is list:
good_kps1 = np.array([kps1[m.queryIdx].pt for m in good_matches])
good_kps2 = np.array([kps2[m.trainIdx].pt for m in good_matches])
elif type(kps1) is np.ndarray and type(kps2) is np.ndarray:
good_kps1 = np.array([kps1[m.queryIdx] for m in good_matches])
good_kps2 = np.array([kps2[m.trainIdx] for m in good_matches])
else:
raise Exception("Keypoint type error!")
exit(-1)

ransac_method = None
try:
ransac_method = cv2.USAC_MSAC
except:
ransac_method = cv2.RANSAC
_, mask = cv2.findFundamentalMat(good_kps1, good_kps2, ransac_method, err_thld, confidence=0.999)
n_inlier = np.count_nonzero(mask)
print(info, 'n_putative', len(good_matches), 'n_inlier', n_inlier)
return idx1, idx2, good_matches, mask

# input: des1 = query-descriptors, des2 = train-descriptors
# output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively)
# N.B.: this may return matches where a trainIdx index is associated to two (or more) queryIdx indexes
def goodMatchesSimple(self, matches, des1, des2, ratio_test=None):
idx1, idx2 = [], []
#good_matches = []
if ratio_test is None:
ratio_test = self.ratio_test
if matches is not None:
for m,n in matches:
if m.distance < ratio_test * n.distance:
idx1.append(m.queryIdx)
idx2.append(m.trainIdx)
return idx1, idx2



# Brute-Force Matcher
class BfFeatureMatcher(FeatureMatcher):
Expand Down
1 change: 0 additions & 1 deletion feature_shitomasi.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ def __init__(self, num_features=Parameters.kNumFeatures, quality_level = 0.01, m
self.quality_level = quality_level
self.min_coner_distance = min_coner_distance
self.blockSize=5 # 3 is the default block size
print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")

def detect(self, frame, mask=None):
pts = cv2.goodFeaturesToTrack(frame, self.num_features, self.quality_level, self.min_coner_distance, blockSize=self.blockSize, mask=mask)
Expand Down
Loading

0 comments on commit 604fabb

Please sign in to comment.