diff --git a/README.md b/README.md index 94dc821..fc344fe 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ In our environment, we use pytorch=1.13.1+cu116. The dataset provided in [D-NeRF](https://github.com/albertpumarola/D-NeRF) is used. You can download the dataset from [dropbox](https://www.dropbox.com/s/0bf6fl0ye2vz3vr/data.zip?dl=0). **For real dynamic scenes:** -The dataset provided in [HyperNeRF](https://github.com/google/hypernerf) is used. You can download scenes from [Hypernerf Dataset](https://github.com/google/hypernerf/releases/tag/v0.1) and organize them as [Nerfies](https://github.com/google/nerfies#datasets). Meanwhile, [Plenoptic Dataset](https://github.com/facebookresearch/Neural_3D_Video) could be downloaded from their official websites. To save the memory, you should extract the frames of each video and then organize your dataset as follows. +The dataset provided in [HyperNeRF](https://github.com/google/hypernerf) is used. You can download scenes from [Hypernerf Dataset](https://github.com/google/hypernerf/releases/tag/v0.1) and organize them as [Nerfies](https://github.com/google/nerfies#datasets). Meanwhile, [Plenoptic Dataset](https://github.com/facebookresearch/Neural_3D_Video) could be downloaded from their official websites. To save the memory, you should extract the frames of each video using `preprocess_dynerf.py` in the scripts and then organize your dataset as follows. ``` ├── data @@ -166,6 +166,14 @@ export exp_name="dynerf" python merge_many_4dgs.py --model_path output/$exp_name/sear_steak ``` +`preprocess_dynerf.py`: +extract the frames of each video. +usage: + +``` +python scripts/preprocess_dynerf.py --datadir data/dynerf/sear_steak +``` + `colmap.sh`: generate point clouds from input data diff --git a/scene/neural_3D_dataset_NDC.py b/scene/neural_3D_dataset_NDC.py index 7b49877..36a2ded 100644 --- a/scene/neural_3D_dataset_NDC.py +++ b/scene/neural_3D_dataset_NDC.py @@ -262,7 +262,7 @@ class Neural3D_NDC_Dataset(Dataset): poses_arr = np.load(os.path.join(self.root_dir, "poses_bounds.npy")) poses = poses_arr[:, :-2].reshape([-1, 3, 5]) # (N_cams, 3, 5) self.near_fars = poses_arr[:, -2:] - videos = glob.glob(os.path.join(self.root_dir, "cam*")) + videos = glob.glob(os.path.join(self.root_dir, "cam*.mp4")) videos = sorted(videos) # breakpoint() assert len(videos) == poses_arr.shape[0] diff --git a/scripts/llff2colmap.py b/scripts/llff2colmap.py index ce13f9b..e836cd5 100644 --- a/scripts/llff2colmap.py +++ b/scripts/llff2colmap.py @@ -91,7 +91,7 @@ if not os.path.exists(colmap_dir): poses_arr = np.load(os.path.join(root_dir, "poses_bounds.npy")) poses = poses_arr[:, :-2].reshape([-1, 3, 5]) # (N_cams, 3, 5) near_fars = poses_arr[:, -2:] -videos = glob.glob(os.path.join(root_dir, "cam*")) +videos = glob.glob(os.path.join(root_dir, "cam[0-9][0-9]")) videos = sorted(videos) assert len(videos) == poses_arr.shape[0] H, W, focal = poses[0, :, -1] @@ -109,7 +109,7 @@ poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1) # poses[..., 3] /= scale_factor # Sample N_views poses for validation - NeRF-like camera trajectory. # val_poses = directions -videos = glob.glob(os.path.join(root_dir, "cam*")) +videos = glob.glob(os.path.join(root_dir, "cam[0-9][0-9]")) videos = sorted(videos) image_paths = [] for index, video_path in enumerate(videos): diff --git a/scripts/preprocess_dynerf.py b/scripts/preprocess_dynerf.py new file mode 100644 index 0000000..dc0ed15 --- /dev/null +++ b/scripts/preprocess_dynerf.py @@ -0,0 +1,15 @@ +from argparse import ArgumentParser +import sys +sys.path.append('./scene') +from neural_3D_dataset_NDC import Neural3D_NDC_Dataset +# import scene +# from scene.neural_3D_dataset_NDC import Neural3D_NDC_Dataset + +if __name__ == '__main__': + parser = ArgumentParser(description="Extract images from dynerf videos") + parser.add_argument("--datadir", default='data/dynerf/cut_roasted_beef', type=str) + args = parser.parse_args() + train_dataset = Neural3D_NDC_Dataset(args.datadir, "train", 1.0, time_scale=1, + scene_bbox_min=[-2.5, -2.0, -1.0], scene_bbox_max=[2.5, 2.0, 1.0], eval_index=0) + test_dataset = Neural3D_NDC_Dataset(args.datadir, "test", 1.0, time_scale=1, + scene_bbox_min=[-2.5, -2.0, -1.0], scene_bbox_max=[2.5, 2.0, 1.0], eval_index=0)