first-commit

This commit is contained in:
xiaoyuxi 2025-07-08 16:06:29 +08:00
parent 8ee7575f91
commit c41f211309
3 changed files with 1 additions and 10 deletions

View File

@ -71,7 +71,7 @@ By following these steps, you should have a working environment ready to run the
We gave two examples to illustrate the usage of `SpaTrack2`.
### Type1: Monocular video as input *(Example0)*
```
python inference.py --data_type="RGB" --data_dir="examples" --video_name="protein" --fps=3
python inference.py --data_type="RGB" --data_dir="examples" --video_name="protein" --fps=5
```
### Type2: Customized Posed RGBD video as input *(Example1)*

View File

@ -170,14 +170,6 @@ if __name__ == "__main__":
depth_tensor = T.Resize((new_h, new_w))(torch.from_numpy(depth_tensor))
if viz:
for i in range(c2w_traj.shape[0]):
img = (video)[i].permute(1,2,0).cpu().numpy()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imwrite(os.path.join(out_dir, f'frame_{i:04d}.png'), img)
point_map_i = point_map[i].cpu().numpy()
np.save(os.path.join(out_dir, f'point_{i:04d}.npy'), point_map_i)
np.save(os.path.join(out_dir, f'frame_{i:04d}.npy'), point_map_i[0])
viser.visualize(video=video[None],
tracks=track2d_pred[None][...,:2],
visibility=vis_pred[None],filename="test")

View File

@ -24,7 +24,6 @@ from models.SpaTrackV2.models.tracker3D.spatrack_modules.utils import (
)
from models.SpaTrackV2.models.tracker3D.spatrack_modules.ba import extract_static_from_3DTracks, ba_pycolmap
from models.SpaTrackV2.models.tracker3D.spatrack_modules.pointmap_updator import PointMapUpdator
from models.SpaTrackV2.models.depth_refiner.depth_refiner import TrackStablizer
from models.SpaTrackV2.models.tracker3D.spatrack_modules.alignment import affine_invariant_global_loss
from models.SpaTrackV2.models.tracker3D.delta_utils.upsample_transformer import UpsampleTransformerAlibi