Merge pull request #103 from jsxzs/master
add preprocess_dynerf.py and update README
This commit is contained in:
commit
0b0b1a2d78
12
README.md
12
README.md
@ -93,6 +93,18 @@ For training synthetic scenes such as `bouncingballs`, run
|
||||
python train.py -s data/dnerf/bouncingballs --port 6017 --expname "dnerf/bouncingballs" --configs arguments/dnerf/bouncingballs.py
|
||||
```
|
||||
|
||||
For training dynerf scenes such as `cut_roasted_beef`, run
|
||||
```python
|
||||
# First, extract the frames of each video.
|
||||
python scripts/preprocess_dynerf.py --datadir data/dynerf/cut_roasted_beef
|
||||
# Second, generate point clouds from input data.
|
||||
bash colmap.sh data/dynerf/cut_roasted_beef llff
|
||||
# Third, downsample the point clouds generated in the second step.
|
||||
python scripts/downsample_point.py data/dynerf/cut_roasted_beef/colmap/dense/workspace/fused.ply data/dynerf/cut_roasted_beef/points3D_downsample2.ply
|
||||
# Finally, train.
|
||||
python train.py -s data/dynerf/cut_roasted_beef --port 6017 --expname "dynerf/cut_roasted_beef" --configs arguments/dynerf/cut_roasted_beef.py
|
||||
```
|
||||
|
||||
You can customize your training config through the config files.
|
||||
|
||||
Checkpoint
|
||||
|
||||
@ -262,7 +262,7 @@ class Neural3D_NDC_Dataset(Dataset):
|
||||
poses_arr = np.load(os.path.join(self.root_dir, "poses_bounds.npy"))
|
||||
poses = poses_arr[:, :-2].reshape([-1, 3, 5]) # (N_cams, 3, 5)
|
||||
self.near_fars = poses_arr[:, -2:]
|
||||
videos = glob.glob(os.path.join(self.root_dir, "cam*"))
|
||||
videos = glob.glob(os.path.join(self.root_dir, "cam*.mp4"))
|
||||
videos = sorted(videos)
|
||||
# breakpoint()
|
||||
assert len(videos) == poses_arr.shape[0]
|
||||
|
||||
@ -91,7 +91,7 @@ if not os.path.exists(colmap_dir):
|
||||
poses_arr = np.load(os.path.join(root_dir, "poses_bounds.npy"))
|
||||
poses = poses_arr[:, :-2].reshape([-1, 3, 5]) # (N_cams, 3, 5)
|
||||
near_fars = poses_arr[:, -2:]
|
||||
videos = glob.glob(os.path.join(root_dir, "cam*"))
|
||||
videos = glob.glob(os.path.join(root_dir, "cam[0-9][0-9]"))
|
||||
videos = sorted(videos)
|
||||
assert len(videos) == poses_arr.shape[0]
|
||||
H, W, focal = poses[0, :, -1]
|
||||
@ -109,7 +109,7 @@ poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1)
|
||||
# poses[..., 3] /= scale_factor
|
||||
# Sample N_views poses for validation - NeRF-like camera trajectory.
|
||||
# val_poses = directions
|
||||
videos = glob.glob(os.path.join(root_dir, "cam*"))
|
||||
videos = glob.glob(os.path.join(root_dir, "cam[0-9][0-9]"))
|
||||
videos = sorted(videos)
|
||||
image_paths = []
|
||||
for index, video_path in enumerate(videos):
|
||||
|
||||
15
scripts/preprocess_dynerf.py
Normal file
15
scripts/preprocess_dynerf.py
Normal file
@ -0,0 +1,15 @@
|
||||
from argparse import ArgumentParser
|
||||
import sys
|
||||
sys.path.append('./scene')
|
||||
from neural_3D_dataset_NDC import Neural3D_NDC_Dataset
|
||||
# import scene
|
||||
# from scene.neural_3D_dataset_NDC import Neural3D_NDC_Dataset
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = ArgumentParser(description="Extract images from dynerf videos")
|
||||
parser.add_argument("--datadir", default='data/dynerf/cut_roasted_beef', type=str)
|
||||
args = parser.parse_args()
|
||||
train_dataset = Neural3D_NDC_Dataset(args.datadir, "train", 1.0, time_scale=1,
|
||||
scene_bbox_min=[-2.5, -2.0, -1.0], scene_bbox_max=[2.5, 2.0, 1.0], eval_index=0)
|
||||
test_dataset = Neural3D_NDC_Dataset(args.datadir, "test", 1.0, time_scale=1,
|
||||
scene_bbox_min=[-2.5, -2.0, -1.0], scene_bbox_max=[2.5, 2.0, 1.0], eval_index=0)
|
||||
Loading…
x
Reference in New Issue
Block a user