diff --git a/config.ini b/config.ini index 09023b91..5fe15aa5 100644 --- a/config.ini +++ b/config.ini @@ -75,13 +75,13 @@ groundtruth_file=auto [VIDEO_DATASET] type=video ; -base_path=./videos/kitti00 -cam_settings=settings/KITTI00-02.yaml -name=video.mp4 +;base_path=./videos/kitti00 +;cam_settings=settings/KITTI00-02.yaml +;name=video.mp4 ; -; base_path=./videos/kitti06 -; cam_settings=settings/KITTI04-12.yaml -; name=video_color.mp4 +base_path=./videos/kitti06 +cam_settings=settings/KITTI04-12.yaml +name=video_color.mp4 ; ;base_path=./videos/webcam ;cam_settings=settings/WEBCAM.yaml diff --git a/dataset.py b/dataset.py index 0a7d882a..b3645a76 100644 --- a/dataset.py +++ b/dataset.py @@ -336,7 +336,7 @@ def get_current_frame(self): class KittiDataset(Dataset): def __init__(self, path, name, associations=None, type=DatasetType.KITTI): super().__init__(path, name, 10, associations, type) - self.fps = 20 + self.fps = 10 self.image_left_path = '/image_0/' self.image_right_path = '/image_1/' self.timestamps = np.loadtxt(self.path + '/sequences/' + self.name + '/times.txt') diff --git a/feature_manager.py b/feature_manager.py index d2c65387..3edf797e 100644 --- a/feature_manager.py +++ b/feature_manager.py @@ -58,7 +58,7 @@ KeyNetDescFeature2D = import_from('feature_keynet', 'KeyNetDescFeature2D') DiskFeature2D = import_from('feature_disk', 'DiskFeature2D') -kVerbose = False +kVerbose = True kNumFeatureDefault = Parameters.kNumFeatures @@ -320,7 +320,8 @@ def __init__(self, num_features=kNumFeatureDefault, self.pyramid_type = PyramidType.GAUSS_PYRAMID self.pyramid_do_parallel = False # N.B.: SUPERPOINT interface class is not thread-safe! self.force_multiscale_detect_and_compute = True # force it since SUPERPOINT cannot compute descriptors separately from keypoints - + # + # elif self.detector_type == FeatureDetectorTypes.XFEAT: self.oriented_features = False self._feature_detector = XfeatFeature2D() @@ -328,9 +329,8 @@ def __init__(self, num_features=kNumFeatureDefault, self.use_pyramid_adaptor = self.num_levels > 1 self.need_nms = self.num_levels > 1 self.pyramid_type = PyramidType.GAUSS_PYRAMID - self.pyramid_do_parallel = False # N.B.: SUPERPOINT interface class is not thread-safe! - self.force_multiscale_detect_and_compute = True # force it since SUPERPOINT cannot compute descriptors separately from keypoints - # + self.pyramid_do_parallel = False # N.B.: XFEAT interface class is not thread-safe! + self.force_multiscale_detect_and_compute = True # force it since XFEAT cannot compute descriptors separately from keypoints # # elif self.detector_type == FeatureDetectorTypes.FAST: @@ -562,11 +562,12 @@ def __init__(self, num_features=kNumFeatureDefault, if self.detector_type != FeatureDetectorTypes.SUPERPOINT: raise ValueError("You cannot use SUPERPOINT descriptor without SUPERPOINT detector!\nPlease, select SUPERPOINT as both descriptor and detector!") self._feature_descriptor = self._feature_detector # reuse the same SuperPointDector object - + # + # elif self.descriptor_type == FeatureDescriptorTypes.XFEAT: if self.detector_type != FeatureDetectorTypes.XFEAT: raise ValueError("You cannot use XFEAT descriptor without XFEAT detector!\nPlease, select XFEATas both descriptor and detector!") - self._feature_descriptor = self._feature_detector # reuse the same SuperPointDector object + self._feature_descriptor = self._feature_detector # reuse the same XFeat object # # elif self.descriptor_type == FeatureDescriptorTypes.TFEAT: @@ -905,17 +906,17 @@ def detect(self, frame, mask=None, filter=True): # standard detection kps = self._feature_detector.detect(frame, mask) # filter keypoints - # filter_name = 'NONE' - # if filter: - # kps, _, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps) - # # if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors - # if self.do_keypoints_size_rescaling: - # self.rescale_keypoint_size(kps) - # if kDrawOriginalExtractedFeatures: # draw the original features - # imgDraw = cv2.drawKeypoints(frame, kps, None, color=(0,255,0), flags=0) - # cv2.imshow('detected keypoints',imgDraw) - # if kVerbose: - # print('detector:',self.detector_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']') + filter_name = 'NONE' + if filter: + kps, _, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps) + # if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors + if self.do_keypoints_size_rescaling: + self.rescale_keypoint_size(kps) + if kDrawOriginalExtractedFeatures: # draw the original features + imgDraw = cv2.drawKeypoints(frame, kps, None, color=(0,255,0), flags=0) + cv2.imshow('detected keypoints',imgDraw) + if kVerbose: + print('detector:',self.detector_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']') return kps @@ -974,16 +975,16 @@ def detectAndCompute(self, frame, mask=None, filter = True): #print('detector: ', self.detector_type.name, ', #features: ', len(kps)) print('descriptor: ', self.descriptor_type.name, ', #features: ', len(kps)) # filter keypoints - # filter_name = 'NONE' - # if filter: - # kps, des, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps, des) - # if self.detector_type == FeatureDetectorTypes.SIFT or \ - # self.detector_type == FeatureDetectorTypes.ROOT_SIFT or \ - # self.detector_type == FeatureDetectorTypes.CONTEXTDESC : - # unpackSiftOctaveKps(kps, method=UnpackOctaveMethod.INTRAL_LAYERS) - # if kVerbose: - # print('detector:',self.detector_type.name,', descriptor:', self.descriptor_type.name,', #features:', len(kps),' (#ref:', self.num_features, '), [kp-filter:',filter_name,']') - # self.debug_print(kps) + filter_name = 'NONE' + if filter: + kps, des, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps, des) + if self.detector_type == FeatureDetectorTypes.SIFT or \ + self.detector_type == FeatureDetectorTypes.ROOT_SIFT or \ + self.detector_type == FeatureDetectorTypes.CONTEXTDESC : + unpackSiftOctaveKps(kps, method=UnpackOctaveMethod.INTRAL_LAYERS) + if kVerbose: + print('detector:',self.detector_type.name,', descriptor:', self.descriptor_type.name,', #features:', len(kps),' (#ref:', self.num_features, '), [kp-filter:',filter_name,']') + self.debug_print(kps) return kps, des diff --git a/feature_manager_adaptors.py b/feature_manager_adaptors.py index ce0b2ec2..975d7557 100644 --- a/feature_manager_adaptors.py +++ b/feature_manager_adaptors.py @@ -192,7 +192,7 @@ def initSigmaLevels(self): self.scale_factors[0]=1.0 # compute desired number of features per level (by using the scale factor) - self.num_features_per_level = np.zeros(num_levels,dtype=np.int8) + self.num_features_per_level = np.zeros(num_levels,dtype=np.int) num_desired_features_per_level = self.num_features*(1 - self.inv_scale_factor)/(1 - math.pow(self.inv_scale_factor, self.num_levels)) sum_num_features = 0 for level in range(self.num_levels-1): diff --git a/feature_matcher.py b/feature_matcher.py index 305aa0dd..c9ee677e 100644 --- a/feature_matcher.py +++ b/feature_matcher.py @@ -18,6 +18,7 @@ """ import numpy as np import cv2 +import torch from parameters import Parameters from enum import Enum from collections import defaultdict @@ -62,10 +63,8 @@ def feature_matcher_factory(norm_type=cv2.NORM_HAMMING, cross_check=False, ratio DMatch.imgIdx - Index of the train image. """ -#device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -matcher = LightGlue(features="superpoint",n_layers=2).eval().to('cuda') + # base class -import torch class FeatureMatcher(object): def __init__(self, norm_type=cv2.NORM_HAMMING, cross_check = False, ratio_test=kRatioTest, type = FeatureMatcherTypes.BF): self.type = type @@ -75,14 +74,21 @@ def __init__(self, norm_type=cv2.NORM_HAMMING, cross_check = False, ratio_test=k self.ratio_test = ratio_test self.matcher = None self.matcher_name = '' + self.matcherLG = None + self.deviceLG = None + if self.type == FeatureMatcherTypes.LG: + if self.matcherLG is None: + self.matcherLG = LightGlue(features="superpoint",n_layers=2).eval().to('cuda') + if self.deviceLG is None: + self.deviceLG = torch.device("cuda" if torch.cuda.is_available() else "cpu") # input: des1 = queryDescriptors, des2= trainDescriptors # output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively) - def match(self,frame, des1, des2,kps1 = None,kps2 = None, ratio_test=None): + def match(self, frame, des1, des2, kps1 = None,kps2 = None, ratio_test=None): if kVerbose: print(self.matcher_name,', norm ', self.norm_type) - print('des1.shape:',des1.shape,' des2.shape:',des2.shape) + #print('des1.shape:',des1.shape,' des2.shape:',des2.shape) #print('des1.dtype:',des1.dtype,' des2.dtype:',des2.dtype) #print(self.type) if self.type == FeatureMatcherTypes.LG: @@ -90,14 +96,14 @@ def match(self,frame, des1, des2,kps1 = None,kps2 = None, ratio_test=None): 'keypoints': torch.tensor(kps1,device='cuda').unsqueeze(0), 'descriptors': torch.tensor(des2,device='cuda').unsqueeze(0), 'image_size': torch.tensor(frame.shape, device='cuda').unsqueeze(0) - } + } d2={ 'keypoints': torch.tensor(kps2,device='cuda').unsqueeze(0), 'descriptors': torch.tensor(des1,device='cuda').unsqueeze(0), 'image_size': torch.tensor(frame.shape, device='cuda').unsqueeze(0) - } + } - matches01 = matcher({"image0": d1, "image1": d2}) + matches01 = self.matcherLG({"image0": d1, "image1": d2}) #print(matches01['matches']) idx0 = matches01['matches'][0][:, 0].cpu().tolist() idx1 = matches01['matches'][0][:, 1].cpu().tolist() @@ -105,22 +111,22 @@ def match(self,frame, des1, des2,kps1 = None,kps2 = None, ratio_test=None): return idx1, idx0 # print(d1['keypoints'].shape, d1['descriptors'].shape, d1['image_size'].shape) # print(d2['keypoints'].shape, d2['descriptors'].shape, d2['image_size'].shape) - - if self.type == FeatureMatcherTypes.XFEAT: + elif self.type == FeatureMatcherTypes.XFEAT: d1_tensor = torch.tensor(des1, dtype=torch.float32) # Specify dtype if needed d2_tensor = torch.tensor(des2, dtype=torch.float32) # Specify dtype if needed - print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaa") # If the original tensors were on a GPU, you should move the new tensors to GPU as well # d1_tensor = d1_tensor.to('cuda') # Use 'cuda' or 'cuda:0' if your device is a GPU # d2_tensor = d2_tensor.to('cuda') idx0, idx1 = self.matcher.match(d1_tensor, d2_tensor, 0.93) return idx0.cpu(), idx1.cpu() - matches = self.matcher.knnMatch(des1, des2, k=2) #knnMatch(queryDescriptors,trainDescriptors) - self.matches = matches - return self.goodMatches(matches, des1, des2, ratio_test) - - - + else: + matches = self.matcher.knnMatch(des1, des2, k=2) #knnMatch(queryDescriptors,trainDescriptors) + self.matches = matches + return self.goodMatches(matches, des1, des2, ratio_test) + + # input: des1 = query-descriptors, des2 = train-descriptors + # output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively) + # N.B.: this returns matches where each trainIdx index is associated to only one queryIdx index def goodMatchesOneToOne(self, matches, des1, des2, ratio_test=None): len_des2 = len(des2) idx1, idx2 = [], [] @@ -149,15 +155,92 @@ def goodMatchesOneToOne(self, matches, des1, des2, ratio_test=None): assert(idx2[index] == m.trainIdx) idx1[index]=m.queryIdx idx2[index]=m.trainIdx - return idx1, idx2 - - def goodMatches(self, matches, des1, des2, ratio_test=None): #return self.goodMatchesSimple(matches, des1, des2, ratio_test) # <= N.B.: this generates problem in SLAM since it can produce matches where a trainIdx index is associated to two (or more) queryIdx indexes return self.goodMatchesOneToOne(matches, des1, des2, ratio_test) + # input: des1 = query-descriptors, des2 = train-descriptors, kps1 = query-keypoints, kps2 = train-keypoints + # output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively) + # N.B.0: cross checking can be also enabled with the BruteForce Matcher below + # N.B.1: after matching there is a model fitting with fundamental matrix estimation + # N.B.2: fitting a fundamental matrix has problems in the following cases: [see Hartley/Zisserman Book] + # - 'geometrical degenerate correspondences', e.g. all the observed features lie on a plane (the correct model for the correspondences is an homography) or lie a ruled quadric + # - degenerate motions such a pure rotation (a sufficient parallax is required) or an infinitesimal viewpoint change (where the translation is almost zero) + # N.B.3: as reported above, in case of pure rotation, this algorithm will compute a useless fundamental matrix which cannot be decomposed to return a correct rotation + # Adapted from https://github.com/lzx551402/geodesc/blob/master/utils/opencvhelper.py + def matchWithCrossCheckAndModelFit(self, des1, des2, kps1, kps2, ratio_test=None, cross_check=True, err_thld=1, info=''): + """Compute putative and inlier matches. + Args: + feat: (n_kpts, 128) Local features. + cv_kpts: A list of keypoints represented as cv2.KeyPoint. + ratio_test: The threshold to apply ratio test. + cross_check: (True by default) Whether to apply cross check. + err_thld: Epipolar error threshold. + info: Info to print out. + Returns: + good_matches: Putative matches. + mask: The mask to distinguish inliers/outliers on putative matches. + """ + idx1, idx2 = [], [] + if ratio_test is None: + ratio_test = self.ratio_test + + init_matches1 = self.matcher.knnMatch(des1, des2, k=2) + init_matches2 = self.matcher.knnMatch(des2, des1, k=2) + + good_matches = [] + + for i,(m1,n1) in enumerate(init_matches1): + cond = True + if cross_check: + cond1 = cross_check and init_matches2[m1.trainIdx][0].trainIdx == i + cond *= cond1 + if ratio_test is not None: + cond2 = m1.distance <= ratio_test * n1.distance + cond *= cond2 + if cond: + good_matches.append(m1) + idx1.append(m1.queryIdx) + idx2.append(m1.trainIdx) + + if type(kps1) is list and type(kps2) is list: + good_kps1 = np.array([kps1[m.queryIdx].pt for m in good_matches]) + good_kps2 = np.array([kps2[m.trainIdx].pt for m in good_matches]) + elif type(kps1) is np.ndarray and type(kps2) is np.ndarray: + good_kps1 = np.array([kps1[m.queryIdx] for m in good_matches]) + good_kps2 = np.array([kps2[m.trainIdx] for m in good_matches]) + else: + raise Exception("Keypoint type error!") + exit(-1) + + ransac_method = None + try: + ransac_method = cv2.USAC_MSAC + except: + ransac_method = cv2.RANSAC + _, mask = cv2.findFundamentalMat(good_kps1, good_kps2, ransac_method, err_thld, confidence=0.999) + n_inlier = np.count_nonzero(mask) + print(info, 'n_putative', len(good_matches), 'n_inlier', n_inlier) + return idx1, idx2, good_matches, mask + + # input: des1 = query-descriptors, des2 = train-descriptors + # output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively) + # N.B.: this may return matches where a trainIdx index is associated to two (or more) queryIdx indexes + def goodMatchesSimple(self, matches, des1, des2, ratio_test=None): + idx1, idx2 = [], [] + #good_matches = [] + if ratio_test is None: + ratio_test = self.ratio_test + if matches is not None: + for m,n in matches: + if m.distance < ratio_test * n.distance: + idx1.append(m.queryIdx) + idx2.append(m.trainIdx) + return idx1, idx2 + + # Brute-Force Matcher class BfFeatureMatcher(FeatureMatcher): diff --git a/feature_shitomasi.py b/feature_shitomasi.py index c494d0e3..f0a8aa6b 100644 --- a/feature_shitomasi.py +++ b/feature_shitomasi.py @@ -31,7 +31,6 @@ def __init__(self, num_features=Parameters.kNumFeatures, quality_level = 0.01, m self.quality_level = quality_level self.min_coner_distance = min_coner_distance self.blockSize=5 # 3 is the default block size - print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") def detect(self, frame, mask=None): pts = cv2.goodFeaturesToTrack(frame, self.num_features, self.quality_level, self.min_coner_distance, blockSize=self.blockSize, mask=mask) diff --git a/feature_superpoint.py b/feature_superpoint.py index 3d4c1f8e..320ab23d 100644 --- a/feature_superpoint.py +++ b/feature_superpoint.py @@ -21,10 +21,11 @@ import os import cv2 import torch +import time import config config.cfg.set_lib('superpoint') -print(config) + from demo_superpoint import SuperPointFrontend from threading import RLock @@ -67,7 +68,7 @@ def transpose_des(des): else: return None -import time + # interface for pySLAM class SuperPointFeature2D: def __init__(self, do_cuda=True): @@ -95,37 +96,29 @@ def __init__(self, do_cuda=True): # compute both keypoints and descriptors def detectAndCompute(self, frame, mask=None): # mask is a fake input - Printer.purple("DETECTANDCOMPUTEEEE") with self.lock: - time0 = time.time() self.frame = frame self.frameFloat = (frame.astype('float32') / 255.) self.pts, self.des, self.heatmap = self.fe.run(self.frameFloat) - #print(self.des.shape) # N.B.: pts are - 3xN numpy array with corners [x_i, y_i, confidence_i]^T. - #print('pts: ', self.pts.T.shape) + #print('pts: ', self.pts.T) self.kps = convert_superpts_to_keypoints(self.pts.T, size=self.keypoint_size) - # print(self.kps) - #print(1/(time.time()-time0)) - #if kVerbose: - #print('detector: SUPERPOINT, #features: ', len(self.kps), ', frame res: ', frame.shape[0:2]) - #print (self.kps,self.des.T.shape) - return self.kps, self.des.T + if kVerbose: + print('detector: SUPERPOINT, #features: ', len(self.kps), ', frame res: ', frame.shape[0:2]) + return self.kps, transpose_des(self.des) # return keypoints if available otherwise call detectAndCompute() - # def detect(self, frame, mask=None): # mask is a fake input - # print("DETECTTTT") - # with self.lock: - # #if self.frame is not frame: - # self.detectAndCompute(frame) - # return self.kps + def detect(self, frame, mask=None): # mask is a fake input + with self.lock: + #if self.frame is not frame: + self.detectAndCompute(frame) + return self.kps - # # return descriptors if available otherwise call detectAndCompute() - # def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input - # print("COMPUTEEEE") - # with self.lock: - # if self.frame is not frame: - # Printer.orange('WARNING: SUPERPOINT is recomputing both kps and des on last input frame', frame.shape) - # self.detectAndCompute(frame) - # return self.kps, transpose_des(self.des) + # return descriptors if available otherwise call detectAndCompute() + def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input + with self.lock: + if self.frame is not frame: + Printer.orange('WARNING: SUPERPOINT is recomputing both kps and des on last input frame', frame.shape) + self.detectAndCompute(frame) + return self.kps, transpose_des(self.des) diff --git a/feature_types.py b/feature_types.py index 6c809346..327456f9 100644 --- a/feature_types.py +++ b/feature_types.py @@ -59,7 +59,7 @@ class FeatureDetectorTypes(Enum): R2D2 = 22 # [end-to-end] joint detector-descriptor - "R2D2: Repeatable and Reliable Detector and Descriptor" KEYNET = 23 # "Key.Net: Keypoint Detection by Handcrafted and Learned CNN Filters" DISK = 24 # [end-to-end] joint detector-descriptor - "DISK: Learning local features with policy gradient" - XFEAT = 25 + XFEAT = 25 # [end-to-end] joint detector-descriptor - "XFeat: Accelerated Features for Lightweight Image Matching" class FeatureDescriptorTypes(Enum): NONE = 0 # used for LK tracker (in main_vo.py) @@ -92,7 +92,7 @@ class FeatureDescriptorTypes(Enum): KEYNET = 27 # keynet descriptor is HARDNET (only with KEYNET detector) - "Key.Net: Keypoint Detection by Handcrafted and Learned CNN Filters" BEBLID = 28 # [binary] only descriptor - " BEBLID: Boosted Efficient Binary Local Image Descriptor" DISK = 29 # [end-to-end] joint detector-descriptor - "DISK: Learning local features with policy gradient" - XFEAT = 30 + XFEAT = 30 # [end-to-end] joint detector-descriptor - "XFeat: Accelerated Features for Lightweight Image Matching" class FeatureInfo(object): norm_type = dict() max_descriptor_distance = dict() # initial reference max descriptor distances used by SLAM for locally searching matches around frame keypoints; @@ -133,7 +133,7 @@ class FeatureInfo(object): norm_type[FeatureDescriptorTypes.SUPERPOINT] = cv2.NORM_L2 max_descriptor_distance[FeatureDescriptorTypes.SUPERPOINT] = 1.30 # SUPERPOINT # - norm_type[FeatureDescriptorTypes.XFEAT] = cv2.NORM_L2 + norm_type[FeatureDescriptorTypes.XFEAT] = cv2.NORM_L2 # XFEAT max_descriptor_distance[FeatureDescriptorTypes.XFEAT] = 1.3 # norm_type[FeatureDescriptorTypes.TFEAT] = cv2.NORM_L2 diff --git a/utils_geom.py b/utils_geom.py index 1e635a28..8aa25630 100644 --- a/utils_geom.py +++ b/utils_geom.py @@ -62,7 +62,8 @@ def s1_dist_rad(angle1,angle2): def poseRt(R, t): ret = np.eye(4) ret[:3, :3] = R - ret[:3, 3] = np.array([t[0][0], t[1][0], t[2][0]]) + #ret[:3, 3] = np.array([t[0][0], t[1][0], t[2][0]]) + ret[:3, 3] = t return ret # [4x4] homogeneous inverse T^-1 from T represented with [3x3] R and [3x1] t diff --git a/visual_odometry.py b/visual_odometry.py index 2cded0ca..371508e5 100644 --- a/visual_odometry.py +++ b/visual_odometry.py @@ -96,7 +96,7 @@ def getAbsoluteScale(self, frame_id): return 1 def computeFundamentalMatrix(self, kps_ref, kps_cur): - F, mask = cv2.findFundamentalMat(kps_ref, kps_cur, cv2.FM_RANSAC, param1=kRansacThresholdPixels, param2=kRansacProb) + F, mask = cv2.findFundamentalMat(kps_ref, kps_cur, cv2.FM_RANSAC, kRansacThresholdPixels, kRansacProb) if F is None or F.shape == (1, 1): # no fundamental matrix found raise Exception('No fundamental matrix found') @@ -182,11 +182,11 @@ def processFrame(self, frame_id): # draw image self.draw_img = self.drawFeatureTracks(self.cur_image) # check if we have enough features to track otherwise detect new ones and start tracking from them (used for LK tracker) - # if (self.feature_tracker.tracker_type == FeatureTrackerTypes.LK) and (self.kps_ref.shape[0] < self.feature_tracker.num_features): - # self.kps_cur, self.des_cur = self.feature_tracker.detectAndCompute(self.cur_image) - # self.kps_cur = np.array([x.pt for x in self.kps_cur], dtype=np.float32) # convert from list of keypoints to an array of points - # if kVerbose: - # print('# new detected points: ', self.kps_cur.shape[0]) + if (self.feature_tracker.tracker_type == FeatureTrackerTypes.LK) and (self.kps_ref.shape[0] < self.feature_tracker.num_features): + self.kps_cur, self.des_cur = self.feature_tracker.detectAndCompute(self.cur_image) + self.kps_cur = np.array([x.pt for x in self.kps_cur], dtype=np.float32) # convert from list of keypoints to an array of points + if kVerbose: + print('# new detected points: ', self.kps_cur.shape[0]) self.kps_ref = self.kps_cur self.des_ref = self.des_cur self.updateHistory() @@ -200,7 +200,7 @@ def track(self, img, frame_id): if img.ndim>2: img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY) # check coherence of image size with camera settings - #assert(img.ndim==2 and img.shape[0]==self.cam.height and img.shape[1]==self.cam.width), "Frame: provided image has not the same size as the camera model or image is not grayscale" + assert(img.ndim==2 and img.shape[0]==self.cam.height and img.shape[1]==self.cam.width), "Frame: provided image has not the same size as the camera model or image is not grayscale" self.cur_image = img # manage and check stage if(self.stage == VoStage.GOT_FIRST_IMAGE):