|
| 1 | +import numpy as np |
| 2 | +import cv2 |
| 3 | + |
| 4 | +STAGE_FIRST_FRAME = 0 |
| 5 | +STAGE_SECOND_FRAME = 1 |
| 6 | +STAGE_DEFAULT_FRAME = 2 |
| 7 | +kMinNumFeature = 1500 |
| 8 | + |
| 9 | +lk_params = dict(winSize = (21, 21), |
| 10 | + #maxLevel = 3, |
| 11 | + criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01)) |
| 12 | + |
| 13 | +def featureTracking(image_ref, image_cur, px_ref): |
| 14 | + kp2, st, err = cv2.calcOpticalFlowPyrLK(image_ref, image_cur, px_ref, None, **lk_params) #shape: [k,2] [k,1] [k,1] |
| 15 | + |
| 16 | + st = st.reshape(st.shape[0]) |
| 17 | + kp1 = px_ref[st == 1] |
| 18 | + kp2 = kp2[st == 1] |
| 19 | + |
| 20 | + return kp1, kp2 |
| 21 | + |
| 22 | + |
| 23 | +class PinholeCamera: |
| 24 | + def __init__(self, width, height, fx, fy, cx, cy, |
| 25 | + k1=0.0, k2=0.0, p1=0.0, p2=0.0, k3=0.0): |
| 26 | + self.width = width |
| 27 | + self.height = height |
| 28 | + self.fx = fx |
| 29 | + self.fy = fy |
| 30 | + self.cx = cx |
| 31 | + self.cy = cy |
| 32 | + self.distortion = (abs(k1) > 0.0000001) |
| 33 | + self.d = [k1, k2, p1, p2, k3] |
| 34 | + |
| 35 | + |
| 36 | +class VisualOdometry: |
| 37 | + def __init__(self, cam, annotations): |
| 38 | + self.frame_stage = 0 |
| 39 | + self.cam = cam |
| 40 | + self.new_frame = None |
| 41 | + self.last_frame = None |
| 42 | + self.cur_R = None |
| 43 | + self.cur_t = None |
| 44 | + self.px_ref = None |
| 45 | + self.px_cur = None |
| 46 | + self.focal = cam.fx |
| 47 | + self.pp = (cam.cx, cam.cy) |
| 48 | + self.trueX, self.trueY, self.trueZ = 0, 0, 0 |
| 49 | + self.detector = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True) |
| 50 | + with open(annotations) as f: |
| 51 | + self.annotations = f.readlines() |
| 52 | + |
| 53 | + def getAbsoluteScale(self, frame_id): #specialized for KITTI odometry dataset |
| 54 | + ss = self.annotations[frame_id-1].strip().split() |
| 55 | + x_prev = float(ss[3]) |
| 56 | + y_prev = float(ss[7]) |
| 57 | + z_prev = float(ss[11]) |
| 58 | + ss = self.annotations[frame_id].strip().split() |
| 59 | + x = float(ss[3]) |
| 60 | + y = float(ss[7]) |
| 61 | + z = float(ss[11]) |
| 62 | + self.trueX, self.trueY, self.trueZ = x, y, z |
| 63 | + return np.sqrt((x - x_prev)*(x - x_prev) + (y - y_prev)*(y - y_prev) + (z - z_prev)*(z - z_prev)) |
| 64 | + |
| 65 | + def processFirstFrame(self): |
| 66 | + self.px_ref = self.detector.detect(self.new_frame) |
| 67 | + self.px_ref = np.array([x.pt for x in self.px_ref], dtype=np.float32) |
| 68 | + self.frame_stage = STAGE_SECOND_FRAME |
| 69 | + |
| 70 | + def processSecondFrame(self): |
| 71 | + self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref) |
| 72 | + E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0) |
| 73 | + _, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp) |
| 74 | + self.frame_stage = STAGE_DEFAULT_FRAME |
| 75 | + self.px_ref = self.px_cur |
| 76 | + |
| 77 | + def processFrame(self, frame_id): |
| 78 | + self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref) |
| 79 | + E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0) |
| 80 | + _, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp) |
| 81 | + absolute_scale = self.getAbsoluteScale(frame_id) |
| 82 | + if(absolute_scale > 0.1): |
| 83 | + self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t) |
| 84 | + self.cur_R = R.dot(self.cur_R) |
| 85 | + if(self.px_ref.shape[0] < kMinNumFeature): |
| 86 | + self.px_cur = self.detector.detect(self.new_frame) |
| 87 | + self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32) |
| 88 | + self.px_ref = self.px_cur |
| 89 | + |
| 90 | + def update(self, img, frame_id): |
| 91 | + assert(img.ndim==2 and img.shape[0]==self.cam.height and img.shape[1]==self.cam.width), "Frame: provided image has not the same size as the camera model or image is not grayscale" |
| 92 | + self.new_frame = img |
| 93 | + if(self.frame_stage == STAGE_DEFAULT_FRAME): |
| 94 | + self.processFrame(frame_id) |
| 95 | + elif(self.frame_stage == STAGE_SECOND_FRAME): |
| 96 | + self.processSecondFrame() |
| 97 | + elif(self.frame_stage == STAGE_FIRST_FRAME): |
| 98 | + self.processFirstFrame() |
| 99 | + self.last_frame = self.new_frame |
0 commit comments