Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
TruongKhang committed Dec 9, 2021
1 parent 664b9f5 commit 11153e7
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 23 deletions.
4 changes: 2 additions & 2 deletions droid_slam/droid.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ def __init__(self, args):
self.disable_vis = args.disable_vis

# dense depth prediction
self.mvsnet = CDSMVSNet(refine=True, ndepths=(64, 32, 8), depth_interals_ratio=(4, 2, 1))
self.mvsnet = CDSMVSNet(refine=True, ndepths=(96, 32, 8), depth_interals_ratio=(4, 2, 1))
mvsnet_ckpt = torch.load(args.mvsnet_ckpt)
state_dict = OrderedDict([
(k.replace("module.", ""), v) for (k, v) in torch.load(mvsnet_ckpt["state_dict"]).items()
(k.replace("module.", ""), v) for (k, v) in mvsnet_ckpt["state_dict"].items()
])
self.mvsnet.load_state_dict(state_dict)
self.mvsnet.to("cuda:0").eval()
Expand Down
37 changes: 27 additions & 10 deletions droid_slam/droid_frontend.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
import lietorch
import numpy as np
import torch.nn.functional as F

from lietorch import SE3
from factor_graph import FactorGraph
Expand Down Expand Up @@ -71,26 +72,43 @@ def __update(self):
if self.mvsnet is not None:
ref_id, src_ids = self.t1 - 3, [self.t1-5, self.t1-4, self.t1-2, self.t1-1]
img_ids = [ref_id] + src_ids
intrinsics = self.video.intrinsics[img_ids]
intrinsics = self.video.intrinsics[img_ids] * 8
poses = SE3(self.video.poses[img_ids]).matrix()
proj_matrices = torch.zeros_like(poses)
proj_matrices[:, 0, 0], proj_matrices[:, 1, 1], proj_matrices[:, :2, 2] = intrinsics[:, 0], intrinsics[:, 1], intrinsics[:, 2:]
proj_matrices = torch.stack((poses, proj_matrices), dim=1)

ref_depth = 1 / self.video.disp[ref_id]
intr_matrices = torch.zeros_like(poses)
intr_matrices[:, 0, 0], intr_matrices[:, 1, 1] = intrinsics[:, 0], intrinsics[:, 1],
intr_matrices[:, :2, 2], intr_matrices[:, 2, 2] = intrinsics[:, 2:], 1.0
proj_stage3 = torch.stack((poses, intr_matrices), dim=1)
proj_stage2 = proj_stage3.clone()
proj_stage2[:, 1, :2] *= 0.5
proj_stage1 = proj_stage2.clone()
proj_stage1[:, 1, :2] *= 0.5
proj_stage0 = proj_stage1.clone()
proj_stage0[:, 1, :2] *= 0.5
proj_matrices = {"stage1": proj_stage0.unsqueeze(0),
"stage2": proj_stage1.unsqueeze(0),
"stage3": proj_stage2.unsqueeze(0),
"stage4": proj_stage3.unsqueeze(0)}

ref_depth = 1 / self.video.disps[ref_id]
val_depths = ref_depth[(ref_depth > 0.001) & (ref_depth < 1000)]
min_d, max_d = val_depths.min(), val_depths.max()
d_interval = (max_d - min_d) / 192
depth_values = torch.arange(min_d, max_d, d_interval).unsqueeze(0)
d_interval = 0.05 #(max_d - min_d) / 256
depth_values = torch.arange(0, 384, dtype=torch.float32, device=min_d.device).unsqueeze(0) * d_interval + min_d

images = self.video.images[img_ids]
images = self.video.images[img_ids].unsqueeze(0) / 255.
with torch.no_grad():
final_depth = self.mvsnet(images, proj_matrices, depth_values.cuda(), temperature=0.01)["refined_depth"]
disp_up = 1 / (final_depth.squeeze(0) + 1e-6)
self.video.disps_up[ref_id] = disp_up.clamp(min=0.001)

# set pose for next itration
self.video.poses[self.t1] = self.video.poses[self.t1-1]
self.video.disps[self.t1] = self.video.disps[self.t1-1].mean()

# update visualization
self.video.dirty[self.graph.ii.min():self.t1] = True
# self.video.dirty[self.graph.ii.min():self.t1] = True
self.video.dirty[self.graph.ii.min():(self.t1 - 2)] = True

def __initialize(self):
""" initialize the SLAM system """
Expand Down Expand Up @@ -127,7 +145,6 @@ def __initialize(self):

def __call__(self):
""" main update """

# do initialization
if not self.is_initialized and self.video.counter.value == self.warmup:
self.__initialize()
Expand Down
2 changes: 1 addition & 1 deletion droid_slam/modules/cds_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import torch.nn as nn
import torch.nn.functional as F

from dynamic_conv import DynamicConv
from droid_slam.modules.dynamic_conv import DynamicConv


def init_bn(module):
Expand Down
20 changes: 10 additions & 10 deletions droid_slam/visualization.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,21 +87,21 @@ def animation_callback(vis):
return

video.dirty[dirty_index] = False

# convert poses to 4x4 matrix
poses = torch.index_select(video.poses, 0, dirty_index)
disps = torch.index_select(video.disps, 0, dirty_index)
poses = torch.index_select(video.poses, 0, dirty_index).clone()
# disps = torch.index_select(video.disps, 0, dirty_index)
disps = torch.index_select(video.disps_up, 0, dirty_index).clone()
Ps = SE3(poses).inv().matrix().cpu().numpy()

images = torch.index_select(video.images, 0, dirty_index)
images = images.cpu()[:,[2,1,0],3::8,3::8].permute(0,2,3,1) / 255.0
points = droid_backends.iproj(SE3(poses).inv().data, disps, video.intrinsics[0]).cpu()
images = torch.index_select(video.images, 0, dirty_index).clone()
# images = images.cpu()[:,[2,1,0],3::8,3::8].permute(0,2,3,1) / 255.0
images = images.cpu()[:, [2, 1, 0]].permute(0, 2, 3, 1) / 255.0
points = droid_backends.iproj(SE3(poses).inv().data, disps, video.intrinsics[0]*8).cpu()

thresh = droid_visualization.filter_thresh * torch.ones_like(disps.mean(dim=[1,2]))

count = droid_backends.depth_filter(
video.poses, video.disps, video.intrinsics[0], dirty_index, thresh)

video.poses.clone(), video.disps_up.clone(), video.intrinsics[0]*8, dirty_index, thresh)
count = count.cpu()
disps = disps.cpu()
masks = ((count >= 2) & (disps > .5*disps.mean(dim=[1,2], keepdim=True)))
Expand All @@ -125,8 +125,8 @@ def animation_callback(vis):
droid_visualization.cameras[ix] = cam_actor

mask = masks[i].reshape(-1)
pts = points[i].reshape(-1, 3)[mask].cpu().numpy()
clr = images[i].reshape(-1, 3)[mask].cpu().numpy()
pts = points[i].reshape(-1, 3)[mask].numpy()
clr = images[i].reshape(-1, 3)[mask].numpy()

## add point actor ###
point_actor = create_point_actor(pts, clr)
Expand Down

0 comments on commit 11153e7

Please sign in to comment.