add database.py, modify dnerf point init.
This commit is contained in:
parent
3507d2a509
commit
c30cc414a6
119
database.py
Normal file
119
database.py
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
# This script is based on an original implementation by True Price.
|
||||||
|
# Created by liminghao
|
||||||
|
import sys
|
||||||
|
import numpy as np
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
|
IS_PYTHON3 = sys.version_info[0] >= 3
|
||||||
|
|
||||||
|
def array_to_blob(array):
|
||||||
|
if IS_PYTHON3:
|
||||||
|
return array.tostring()
|
||||||
|
else:
|
||||||
|
return np.getbuffer(array)
|
||||||
|
|
||||||
|
def blob_to_array(blob, dtype, shape=(-1,)):
|
||||||
|
if IS_PYTHON3:
|
||||||
|
return np.fromstring(blob, dtype=dtype).reshape(*shape)
|
||||||
|
else:
|
||||||
|
return np.frombuffer(blob, dtype=dtype).reshape(*shape)
|
||||||
|
|
||||||
|
class COLMAPDatabase(sqlite3.Connection):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def connect(database_path):
|
||||||
|
return sqlite3.connect(database_path, factory=COLMAPDatabase)
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(COLMAPDatabase, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
self.create_tables = lambda: self.executescript(CREATE_ALL)
|
||||||
|
self.create_cameras_table = \
|
||||||
|
lambda: self.executescript(CREATE_CAMERAS_TABLE)
|
||||||
|
self.create_descriptors_table = \
|
||||||
|
lambda: self.executescript(CREATE_DESCRIPTORS_TABLE)
|
||||||
|
self.create_images_table = \
|
||||||
|
lambda: self.executescript(CREATE_IMAGES_TABLE)
|
||||||
|
self.create_two_view_geometries_table = \
|
||||||
|
lambda: self.executescript(CREATE_TWO_VIEW_GEOMETRIES_TABLE)
|
||||||
|
self.create_keypoints_table = \
|
||||||
|
lambda: self.executescript(CREATE_KEYPOINTS_TABLE)
|
||||||
|
self.create_matches_table = \
|
||||||
|
lambda: self.executescript(CREATE_MATCHES_TABLE)
|
||||||
|
self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX)
|
||||||
|
|
||||||
|
def update_camera(self, model, width, height, params, camera_id):
|
||||||
|
params = np.asarray(params, np.float64)
|
||||||
|
cursor = self.execute(
|
||||||
|
"UPDATE cameras SET model=?, width=?, height=?, params=?, prior_focal_length=True WHERE camera_id=?",
|
||||||
|
(model, width, height, array_to_blob(params),camera_id))
|
||||||
|
return cursor.lastrowid
|
||||||
|
|
||||||
|
def camTodatabase():
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
camModelDict = {'SIMPLE_PINHOLE': 0,
|
||||||
|
'PINHOLE': 1,
|
||||||
|
'SIMPLE_RADIAL': 2,
|
||||||
|
'RADIAL': 3,
|
||||||
|
'OPENCV': 4,
|
||||||
|
'FULL_OPENCV': 5,
|
||||||
|
'SIMPLE_RADIAL_FISHEYE': 6,
|
||||||
|
'RADIAL_FISHEYE': 7,
|
||||||
|
'OPENCV_FISHEYE': 8,
|
||||||
|
'FOV': 9,
|
||||||
|
'THIN_PRISM_FISHEYE': 10}
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--database_path", type=str, default="database.db")
|
||||||
|
parser.add_argument("--txt_path", type=str, default="colmap/sparse_cameras.txt")
|
||||||
|
# breakpoint()
|
||||||
|
args = parser.parse_args()
|
||||||
|
if os.path.exists(args.database_path)==False:
|
||||||
|
print("ERROR: database path dosen't exist -- please check database.db.")
|
||||||
|
return
|
||||||
|
# Open the database.
|
||||||
|
db = COLMAPDatabase.connect(args.database_path)
|
||||||
|
|
||||||
|
idList=list()
|
||||||
|
modelList=list()
|
||||||
|
widthList=list()
|
||||||
|
heightList=list()
|
||||||
|
paramsList=list()
|
||||||
|
# Update real cameras from .txt
|
||||||
|
with open(args.txt_path, "r") as cam:
|
||||||
|
lines = cam.readlines()
|
||||||
|
for i in range(0,len(lines),1):
|
||||||
|
if lines[i][0]!='#':
|
||||||
|
strLists = lines[i].split()
|
||||||
|
cameraId=int(strLists[0])
|
||||||
|
cameraModel=camModelDict[strLists[1]] #SelectCameraModel
|
||||||
|
width=int(strLists[2])
|
||||||
|
height=int(strLists[3])
|
||||||
|
paramstr=np.array(strLists[4:12])
|
||||||
|
params = paramstr.astype(np.float64)
|
||||||
|
idList.append(cameraId)
|
||||||
|
modelList.append(cameraModel)
|
||||||
|
widthList.append(width)
|
||||||
|
heightList.append(height)
|
||||||
|
paramsList.append(params)
|
||||||
|
camera_id = db.update_camera(cameraModel, width, height, params, cameraId)
|
||||||
|
|
||||||
|
# Commit the data to the file.
|
||||||
|
db.commit()
|
||||||
|
# Read and check cameras.
|
||||||
|
rows = db.execute("SELECT * FROM cameras")
|
||||||
|
for i in range(0,len(idList),1):
|
||||||
|
camera_id, model, width, height, params, prior = next(rows)
|
||||||
|
params = blob_to_array(params, np.float64)
|
||||||
|
assert camera_id == idList[i]
|
||||||
|
assert model == modelList[i] and width == widthList[i] and height == heightList[i]
|
||||||
|
assert np.allclose(params, paramsList[i])
|
||||||
|
|
||||||
|
# Close database.db.
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import sys,os
|
||||||
|
|
||||||
|
camTodatabase()
|
||||||
@ -332,8 +332,7 @@ def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
|
|||||||
print(f"Generating random point cloud ({num_pts})...")
|
print(f"Generating random point cloud ({num_pts})...")
|
||||||
|
|
||||||
# We create random points inside the bounds of the synthetic Blender scenes
|
# We create random points inside the bounds of the synthetic Blender scenes
|
||||||
# xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
|
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
|
||||||
xyz = np.random.random((num_pts, 3)) * 0.5 - 0.25
|
|
||||||
shs = np.random.random((num_pts, 3)) / 255.0
|
shs = np.random.random((num_pts, 3)) / 255.0
|
||||||
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
|
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
|
||||||
# storePly(ply_path, xyz, SH2RGB(shs) * 255)
|
# storePly(ply_path, xyz, SH2RGB(shs) * 255)
|
||||||
@ -382,20 +381,11 @@ def readHyperDataInfos(datadir,use_bg_points,eval):
|
|||||||
video_cam_infos = copy.deepcopy(test_cam_infos)
|
video_cam_infos = copy.deepcopy(test_cam_infos)
|
||||||
video_cam_infos.split="video"
|
video_cam_infos.split="video"
|
||||||
|
|
||||||
# ply_path = os.path.join(datadir, "points.npy")
|
|
||||||
|
|
||||||
# xyz = np.load(ply_path,allow_pickle=True)
|
|
||||||
# xyz -= train_cam_infos.scene_center
|
|
||||||
# xyz *= train_cam_infos.coord_scale
|
|
||||||
# xyz = xyz.astype(np.float32)
|
|
||||||
# shs = np.random.random((xyz.shape[0], 3)) / 255.0
|
|
||||||
# pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((xyz.shape[0], 3)))
|
|
||||||
ply_path = os.path.join(datadir, "points3D_downsample.ply")
|
ply_path = os.path.join(datadir, "points3D_downsample.ply")
|
||||||
# ply_path = os.path.join(datadir, "points3D.ply")
|
|
||||||
pcd = fetchPly(ply_path)
|
pcd = fetchPly(ply_path)
|
||||||
xyz = np.array(pcd.points)
|
xyz = np.array(pcd.points)
|
||||||
# xyz -= train_cam_infos.scene_center
|
|
||||||
# xyz *= train_cam_infos.coord_scale
|
|
||||||
pcd = pcd._replace(points=xyz)
|
pcd = pcd._replace(points=xyz)
|
||||||
nerf_normalization = getNerfppNorm(train_cam)
|
nerf_normalization = getNerfppNorm(train_cam)
|
||||||
plot_camera_orientations(train_cam_infos, pcd.points)
|
plot_camera_orientations(train_cam_infos, pcd.points)
|
||||||
@ -433,12 +423,7 @@ def format_render_poses(poses,data_infos):
|
|||||||
image_path=image_path, image_name=image_name, width=image.shape[2], height=image.shape[1],
|
image_path=image_path, image_name=image_name, width=image.shape[2], height=image.shape[1],
|
||||||
time = time, mask=None))
|
time = time, mask=None))
|
||||||
return cameras
|
return cameras
|
||||||
# plydata = PlyData.read(path)
|
|
||||||
# vertices = plydata['vertex']
|
|
||||||
# positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
|
|
||||||
# colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
|
|
||||||
# normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
|
|
||||||
# return BasicPointCloud(points=positions, colors=colors, normals=normals)
|
|
||||||
def add_points(pointsclouds, xyz_min, xyz_max):
|
def add_points(pointsclouds, xyz_min, xyz_max):
|
||||||
add_points = (np.random.random((100000, 3)))* (xyz_max-xyz_min) + xyz_min
|
add_points = (np.random.random((100000, 3)))* (xyz_max-xyz_min) + xyz_min
|
||||||
add_points = add_points.astype(np.float32)
|
add_points = add_points.astype(np.float32)
|
||||||
@ -478,26 +463,8 @@ def readdynerfInfo(datadir,use_bg_points,eval):
|
|||||||
eval_index=0,
|
eval_index=0,
|
||||||
)
|
)
|
||||||
train_cam_infos = format_infos(train_dataset,"train")
|
train_cam_infos = format_infos(train_dataset,"train")
|
||||||
|
|
||||||
# test_cam_infos = format_infos(test_dataset,"test")
|
|
||||||
val_cam_infos = format_render_poses(test_dataset.val_poses,test_dataset)
|
val_cam_infos = format_render_poses(test_dataset.val_poses,test_dataset)
|
||||||
nerf_normalization = getNerfppNorm(train_cam_infos)
|
nerf_normalization = getNerfppNorm(train_cam_infos)
|
||||||
# create pcd
|
|
||||||
# if not os.path.exists(ply_path):
|
|
||||||
# Since this data set has no colmap data, we start with random points
|
|
||||||
# num_pts = 2000
|
|
||||||
# print(f"Generating random point cloud ({num_pts})...")
|
|
||||||
# threshold = 3
|
|
||||||
# xyz_max = np.array([1.5*threshold, 1.5*threshold, 1.5*threshold])
|
|
||||||
# xyz_min = np.array([-1.5*threshold, -1.5*threshold, -3*threshold])
|
|
||||||
# xyz_max = np.array([1.5*threshold, 1.5*threshold, 1.5*threshold])
|
|
||||||
# xyz_min = np.array([-1.5*threshold, -1.5*threshold, -1.5*threshold])
|
|
||||||
# We create random points inside the bounds of the synthetic Blender scenes
|
|
||||||
# xyz = (np.random.random((num_pts, 3)))* (xyz_max-xyz_min) + xyz_min
|
|
||||||
# print("point cloud initialization:",xyz.max(axis=0),xyz.min(axis=0))
|
|
||||||
# shs = np.random.random((num_pts, 3)) / 255.0
|
|
||||||
# pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
|
|
||||||
# storePly(ply_path, xyz, SH2RGB(shs) * 255)
|
|
||||||
|
|
||||||
# xyz = np.load
|
# xyz = np.load
|
||||||
pcd = fetchPly(ply_path)
|
pcd = fetchPly(ply_path)
|
||||||
@ -557,12 +524,9 @@ def plot_camera_orientations(cam_list, xyz):
|
|||||||
# 提取 R 和 T
|
# 提取 R 和 T
|
||||||
R = cam.R
|
R = cam.R
|
||||||
T = cam.T
|
T = cam.T
|
||||||
# print(R,T)
|
|
||||||
# breakpoint()
|
|
||||||
# 计算相机朝向(一个单位向量)
|
|
||||||
direction = R @ np.array([0, 0, 1])
|
direction = R @ np.array([0, 0, 1])
|
||||||
|
|
||||||
# 绘制相机位置和朝向
|
|
||||||
ax.quiver(T[0], T[1], T[2], direction[0], direction[1], direction[2], length=1)
|
ax.quiver(T[0], T[1], T[2], direction[0], direction[1], direction[2], length=1)
|
||||||
|
|
||||||
ax.set_xlabel('X Axis')
|
ax.set_xlabel('X Axis')
|
||||||
|
|||||||
@ -88,8 +88,9 @@ class GaussianModel:
|
|||||||
def restore(self, model_args, training_args):
|
def restore(self, model_args, training_args):
|
||||||
(self.active_sh_degree,
|
(self.active_sh_degree,
|
||||||
self._xyz,
|
self._xyz,
|
||||||
|
deform_state,
|
||||||
self._deformation_table,
|
self._deformation_table,
|
||||||
self._deformation,
|
|
||||||
# self.grid,
|
# self.grid,
|
||||||
self._features_dc,
|
self._features_dc,
|
||||||
self._features_rest,
|
self._features_rest,
|
||||||
@ -101,6 +102,7 @@ class GaussianModel:
|
|||||||
denom,
|
denom,
|
||||||
opt_dict,
|
opt_dict,
|
||||||
self.spatial_lr_scale) = model_args
|
self.spatial_lr_scale) = model_args
|
||||||
|
self._deformation.load_state_dict(deform_state)
|
||||||
self.training_setup(training_args)
|
self.training_setup(training_args)
|
||||||
self.xyz_gradient_accum = xyz_gradient_accum
|
self.xyz_gradient_accum = xyz_gradient_accum
|
||||||
self.denom = denom
|
self.denom = denom
|
||||||
|
|||||||
1
train.py
1
train.py
@ -45,6 +45,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
|
|||||||
|
|
||||||
gaussians.training_setup(opt)
|
gaussians.training_setup(opt)
|
||||||
if checkpoint:
|
if checkpoint:
|
||||||
|
breakpoint()
|
||||||
(model_params, first_iter) = torch.load(checkpoint)
|
(model_params, first_iter) = torch.load(checkpoint)
|
||||||
gaussians.restore(model_params, opt)
|
gaussians.restore(model_params, opt)
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user