Candle commited on
Commit ·
0d35102
1
Parent(s): 04f0973
stuff
Browse files- docs/video_loop_segment_detection.md +43 -0
- loop_labeler_ui.py +148 -0
- requirements.txt +2 -1
docs/video_loop_segment_detection.md
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Video Loop Segment Detection
|
| 2 |
+
|
| 3 |
+
## Problem Definition
|
| 4 |
+
|
| 5 |
+
Given a video (or animation), the goal is to automatically detect loopable segments, defined by start and end frame indices, such that the segment can be played repeatedly with minimal visible discontinuity at the loop point.
|
| 6 |
+
|
| 7 |
+
### Inputs
|
| 8 |
+
- Video file or sequence of frames
|
| 9 |
+
- (Optional) Preprocessing: downscaling, grayscale conversion, etc.
|
| 10 |
+
|
| 11 |
+
### Outputs
|
| 12 |
+
- List of candidate loop segments: each with start index, end index, and a score indicating loop quality
|
| 13 |
+
|
| 14 |
+
## Approach
|
| 15 |
+
|
| 16 |
+
### 1. Similarity-Based Loop Detection
|
| 17 |
+
- For each possible segment (start, end):
|
| 18 |
+
- Compute similarity between the start and end frames (e.g., MSE, cosine similarity, SSIM)
|
| 19 |
+
- Optionally, use motion energy or perceptual features to adjust similarity
|
| 20 |
+
- Apply a length penalty to discourage very short or very long loops
|
| 21 |
+
- Rank candidates by a combined score
|
| 22 |
+
|
| 23 |
+
### 2. Human Feedback
|
| 24 |
+
- Present detected loop segments to a human annotator
|
| 25 |
+
- Allow manual review, correction, and annotation of loop quality
|
| 26 |
+
- Use feedback to refine detection or train supervised models
|
| 27 |
+
|
| 28 |
+
## Evaluation
|
| 29 |
+
- Quantitative: similarity score, motion energy, length penalty
|
| 30 |
+
- Qualitative: human annotation (good/bad, corrected indices)
|
| 31 |
+
|
| 32 |
+
## Related Work
|
| 33 |
+
- "Seamless Video Looping" (Liao et al., SIGGRAPH 2013): Patch-based optimization for seamless loops
|
| 34 |
+
- Deep features (CNN, ViT, CLIP) can be used for perceptual similarity, but no SOTA end-to-end loop detector exists as of 2025
|
| 35 |
+
|
| 36 |
+
## Future Directions
|
| 37 |
+
- Use deep video features for similarity
|
| 38 |
+
- Train supervised models with human-annotated loop segments
|
| 39 |
+
- Explore generative models for loop synthesis
|
| 40 |
+
|
| 41 |
+
## References
|
| 42 |
+
- Liao, S., et al. "Seamless Video Looping." SIGGRAPH 2013.
|
| 43 |
+
- Other recent works on video similarity, perceptual metrics, and generative video models
|
loop_labeler_ui.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import json
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from PIL import Image, ImageQt
|
| 5 |
+
from PyQt5.QtWidgets import (
|
| 6 |
+
QApplication, QWidget, QLabel, QPushButton, QSlider, QHBoxLayout, QVBoxLayout, QFileDialog, QListWidget, QSpinBox
|
| 7 |
+
)
|
| 8 |
+
from PyQt5.QtCore import Qt, QTimer
|
| 9 |
+
from PyQt5.QtGui import QPixmap
|
| 10 |
+
|
| 11 |
+
class LoopLabeler(QWidget):
|
| 12 |
+
def __init__(self):
|
| 13 |
+
super().__init__()
|
| 14 |
+
self.setWindowTitle("Video Loop Segment Labeler")
|
| 15 |
+
self.resize(900, 600)
|
| 16 |
+
self.data_dir = Path("data/loops")
|
| 17 |
+
self.shots_dir = Path("data/shots")
|
| 18 |
+
self.json_files = sorted(self.data_dir.glob("*.json"))
|
| 19 |
+
self.current_json = None
|
| 20 |
+
self.frames = []
|
| 21 |
+
self.loop_candidates = []
|
| 22 |
+
self.current_candidate = 0
|
| 23 |
+
self.timer = QTimer()
|
| 24 |
+
self.timer.timeout.connect(self.update_preview)
|
| 25 |
+
self.preview_idx = 0
|
| 26 |
+
self.init_ui()
|
| 27 |
+
if self.json_files:
|
| 28 |
+
self.load_json(self.json_files[0])
|
| 29 |
+
|
| 30 |
+
def init_ui(self):
|
| 31 |
+
layout = QVBoxLayout()
|
| 32 |
+
# File selector
|
| 33 |
+
self.file_list = QListWidget()
|
| 34 |
+
for f in self.json_files:
|
| 35 |
+
self.file_list.addItem(f.name)
|
| 36 |
+
self.file_list.currentRowChanged.connect(self.on_file_selected)
|
| 37 |
+
layout.addWidget(self.file_list)
|
| 38 |
+
# Loop candidate selector
|
| 39 |
+
self.candidate_spin = QSpinBox()
|
| 40 |
+
self.candidate_spin.setMinimum(0)
|
| 41 |
+
self.candidate_spin.valueChanged.connect(self.on_candidate_changed)
|
| 42 |
+
layout.addWidget(QLabel("Loop candidate index:"))
|
| 43 |
+
layout.addWidget(self.candidate_spin)
|
| 44 |
+
# Preview
|
| 45 |
+
self.preview_label = QLabel()
|
| 46 |
+
self.preview_label.setFixedSize(320, 320)
|
| 47 |
+
layout.addWidget(self.preview_label)
|
| 48 |
+
# Sliders for start/end
|
| 49 |
+
self.start_slider = QSlider(Qt.Orientation(1)) # Qt.Horizontal is 1
|
| 50 |
+
self.end_slider = QSlider(Qt.Orientation(1))
|
| 51 |
+
self.start_slider.valueChanged.connect(self.on_slider_changed)
|
| 52 |
+
self.end_slider.valueChanged.connect(self.on_slider_changed)
|
| 53 |
+
layout.addWidget(QLabel("Start index"))
|
| 54 |
+
layout.addWidget(self.start_slider)
|
| 55 |
+
layout.addWidget(QLabel("End index"))
|
| 56 |
+
layout.addWidget(self.end_slider)
|
| 57 |
+
# Save button
|
| 58 |
+
self.save_btn = QPushButton("Save annotation")
|
| 59 |
+
self.save_btn.clicked.connect(self.save_annotation)
|
| 60 |
+
layout.addWidget(self.save_btn)
|
| 61 |
+
self.setLayout(layout)
|
| 62 |
+
|
| 63 |
+
def load_json(self, json_path):
|
| 64 |
+
self.current_json = json_path
|
| 65 |
+
with open(json_path) as f:
|
| 66 |
+
self.loop_candidates = json.load(f)
|
| 67 |
+
self.candidate_spin.setMaximum(len(self.loop_candidates)-1)
|
| 68 |
+
self.current_candidate = 0
|
| 69 |
+
self.candidate_spin.setValue(0)
|
| 70 |
+
# Find corresponding webp
|
| 71 |
+
stem = json_path.name.split(".")[0]
|
| 72 |
+
webp_path = self.shots_dir / f"{stem}.webp"
|
| 73 |
+
self.frames = []
|
| 74 |
+
if webp_path.exists():
|
| 75 |
+
with Image.open(webp_path) as im:
|
| 76 |
+
try:
|
| 77 |
+
while True:
|
| 78 |
+
self.frames.append(im.convert("RGB"))
|
| 79 |
+
im.seek(im.tell() + 1)
|
| 80 |
+
except EOFError:
|
| 81 |
+
pass
|
| 82 |
+
self.update_candidate()
|
| 83 |
+
|
| 84 |
+
def on_file_selected(self, idx):
|
| 85 |
+
if idx >= 0 and idx < len(self.json_files):
|
| 86 |
+
self.load_json(self.json_files[idx])
|
| 87 |
+
|
| 88 |
+
def on_candidate_changed(self, idx):
|
| 89 |
+
self.current_candidate = idx
|
| 90 |
+
self.update_candidate()
|
| 91 |
+
|
| 92 |
+
def update_candidate(self):
|
| 93 |
+
candidate = self.loop_candidates[self.current_candidate]
|
| 94 |
+
start = candidate["start"]
|
| 95 |
+
end = candidate["end"]
|
| 96 |
+
self.start_slider.setMinimum(0)
|
| 97 |
+
self.start_slider.setMaximum(len(self.frames)-2)
|
| 98 |
+
self.end_slider.setMinimum(1)
|
| 99 |
+
self.end_slider.setMaximum(len(self.frames)-1)
|
| 100 |
+
self.start_slider.setValue(start)
|
| 101 |
+
self.end_slider.setValue(end)
|
| 102 |
+
self.preview_idx = 0
|
| 103 |
+
self.timer.start(40)
|
| 104 |
+
|
| 105 |
+
def on_slider_changed(self):
|
| 106 |
+
start = self.start_slider.value()
|
| 107 |
+
end = self.end_slider.value()
|
| 108 |
+
if start >= end:
|
| 109 |
+
end = start + 1
|
| 110 |
+
self.end_slider.setValue(end)
|
| 111 |
+
self.preview_idx = 0
|
| 112 |
+
|
| 113 |
+
def update_preview(self):
|
| 114 |
+
start = self.start_slider.value()
|
| 115 |
+
end = self.end_slider.value()
|
| 116 |
+
if self.frames and end > start:
|
| 117 |
+
frame = self.frames[start + (self.preview_idx % (end-start))]
|
| 118 |
+
import numpy as np
|
| 119 |
+
import cv2
|
| 120 |
+
frame_resized = frame.resize((320,320))
|
| 121 |
+
arr = np.array(frame_resized)
|
| 122 |
+
# Convert RGB to BGR for OpenCV, then to QImage
|
| 123 |
+
arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)
|
| 124 |
+
h, w, ch = arr.shape
|
| 125 |
+
bytes_per_line = ch * w
|
| 126 |
+
from PyQt5.QtGui import QImage
|
| 127 |
+
qimg = QImage(arr.data, w, h, bytes_per_line, QImage.Format_BGR888)
|
| 128 |
+
pixmap = QPixmap.fromImage(qimg)
|
| 129 |
+
self.preview_label.setPixmap(pixmap)
|
| 130 |
+
self.preview_idx += 1
|
| 131 |
+
|
| 132 |
+
def save_annotation(self):
|
| 133 |
+
candidate = self.loop_candidates[self.current_candidate]
|
| 134 |
+
candidate["start"] = self.start_slider.value()
|
| 135 |
+
candidate["end"] = self.end_slider.value()
|
| 136 |
+
# Save to new file (or overwrite)
|
| 137 |
+
if self.current_json is not None:
|
| 138 |
+
out_path = self.current_json.with_suffix(".hf.json")
|
| 139 |
+
with open(out_path, "w") as f:
|
| 140 |
+
json.dump(self.loop_candidates, f, indent=2)
|
| 141 |
+
else:
|
| 142 |
+
print("No JSON file loaded. Cannot save annotation.")
|
| 143 |
+
|
| 144 |
+
if __name__ == "__main__":
|
| 145 |
+
app = QApplication(sys.argv)
|
| 146 |
+
win = LoopLabeler()
|
| 147 |
+
win.show()
|
| 148 |
+
sys.exit(app.exec_())
|
requirements.txt
CHANGED
|
@@ -4,4 +4,5 @@ numpy
|
|
| 4 |
matplotlib
|
| 5 |
streamlit
|
| 6 |
opencv-python
|
| 7 |
-
scikit-image
|
|
|
|
|
|
| 4 |
matplotlib
|
| 5 |
streamlit
|
| 6 |
opencv-python
|
| 7 |
+
scikit-image
|
| 8 |
+
pyqt5
|