-
Notifications
You must be signed in to change notification settings - Fork 219
/
demo.py
68 lines (55 loc) · 2.99 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
"""Fuse 1000 RGB-D images from the 7-scenes dataset into a TSDF voxel volume with 2cm resolution.
"""
import time
import cv2
import numpy as np
import fusion
if __name__ == "__main__":
# ======================================================================================================== #
# (Optional) This is an example of how to compute the 3D bounds
# in world coordinates of the convex hull of all camera view
# frustums in the dataset
# ======================================================================================================== #
print("Estimating voxel volume bounds...")
n_imgs = 1000
cam_intr = np.loadtxt("data/camera-intrinsics.txt", delimiter=' ')
vol_bnds = np.zeros((3,2))
for i in range(n_imgs):
# Read depth image and camera pose
depth_im = cv2.imread("data/frame-%06d.depth.png"%(i),-1).astype(float)
depth_im /= 1000. # depth is saved in 16-bit PNG in millimeters
depth_im[depth_im == 65.535] = 0 # set invalid depth to 0 (specific to 7-scenes dataset)
cam_pose = np.loadtxt("data/frame-%06d.pose.txt"%(i)) # 4x4 rigid transformation matrix
# Compute camera view frustum and extend convex hull
view_frust_pts = fusion.get_view_frustum(depth_im, cam_intr, cam_pose)
vol_bnds[:,0] = np.minimum(vol_bnds[:,0], np.amin(view_frust_pts, axis=1))
vol_bnds[:,1] = np.maximum(vol_bnds[:,1], np.amax(view_frust_pts, axis=1))
# ======================================================================================================== #
# ======================================================================================================== #
# Integrate
# ======================================================================================================== #
# Initialize voxel volume
print("Initializing voxel volume...")
tsdf_vol = fusion.TSDFVolume(vol_bnds, voxel_size=0.02)
# Loop through RGB-D images and fuse them together
t0_elapse = time.time()
for i in range(n_imgs):
print("Fusing frame %d/%d"%(i+1, n_imgs))
# Read RGB-D image and camera pose
color_image = cv2.cvtColor(cv2.imread("data/frame-%06d.color.jpg"%(i)), cv2.COLOR_BGR2RGB)
depth_im = cv2.imread("data/frame-%06d.depth.png"%(i),-1).astype(float)
depth_im /= 1000.
depth_im[depth_im == 65.535] = 0
cam_pose = np.loadtxt("data/frame-%06d.pose.txt"%(i))
# Integrate observation into voxel volume (assume color aligned with depth)
tsdf_vol.integrate(color_image, depth_im, cam_intr, cam_pose, obs_weight=1.)
fps = n_imgs / (time.time() - t0_elapse)
print("Average FPS: {:.2f}".format(fps))
# Get mesh from voxel volume and save to disk (can be viewed with Meshlab)
print("Saving mesh to mesh.ply...")
verts, faces, norms, colors = tsdf_vol.get_mesh()
fusion.meshwrite("mesh.ply", verts, faces, norms, colors)
# Get point cloud from voxel volume and save to disk (can be viewed with Meshlab)
print("Saving point cloud to pc.ply...")
point_cloud = tsdf_vol.get_point_cloud()
fusion.pcwrite("pc.ply", point_cloud)