Update README.md
Browse files
README.md
CHANGED
|
@@ -59,17 +59,17 @@ This is the dataset proposed in our paper **TIP-I2V: A Million-Scale Real Prompt
|
|
| 59 |
TIP-I2V is the first dataset comprising over 1.70 million unique user-provided text and image prompts. Besides the prompts, TIP-I2V also includes videos generated by five state-of-the-art image-to-video models (Pika, Stable Video Diffusion, Open-Sora, I2VGen-XL, and CogVideoX-5B). The TIP-I2V contributes to the development of better and safer image-to-video models.
|
| 60 |
|
| 61 |
<p align="center">
|
| 62 |
-
<img src="https://huggingface.co/datasets/
|
| 63 |
</p>
|
| 64 |
|
| 65 |
# Datapoint
|
| 66 |
<p align="center">
|
| 67 |
-
<img src="https://huggingface.co/datasets/
|
| 68 |
</p>
|
| 69 |
|
| 70 |
# Statistics
|
| 71 |
<p align="center">
|
| 72 |
-
<img src="https://huggingface.co/datasets/
|
| 73 |
</p>
|
| 74 |
|
| 75 |
# Download
|
|
@@ -80,7 +80,7 @@ For users in mainland China, try setting `export HF_ENDPOINT=https://hf-mirror.c
|
|
| 80 |
```python
|
| 81 |
# Full (text and compressed image) prompts: ~13.4G
|
| 82 |
from datasets import load_dataset
|
| 83 |
-
ds = load_dataset("
|
| 84 |
|
| 85 |
# Convert to Pandas format (it may be slow)
|
| 86 |
import pandas as pd
|
|
@@ -91,7 +91,7 @@ df = pd.DataFrame(ds)
|
|
| 91 |
```python
|
| 92 |
# 100k subset (text and compressed image) prompts: ~0.8G
|
| 93 |
from datasets import load_dataset
|
| 94 |
-
ds = load_dataset("
|
| 95 |
|
| 96 |
# Convert to Pandas format (it may be slow)
|
| 97 |
import pandas as pd
|
|
@@ -101,7 +101,7 @@ df = pd.DataFrame(ds)
|
|
| 101 |
```python
|
| 102 |
# 10k TIP-Eval (text and compressed image) prompts: ~0.08G
|
| 103 |
from datasets import load_dataset
|
| 104 |
-
ds = load_dataset("
|
| 105 |
|
| 106 |
# Convert to Pandas format (it may be slow)
|
| 107 |
import pandas as pd
|
|
@@ -113,22 +113,22 @@ df = pd.DataFrame(ds)
|
|
| 113 |
```python
|
| 114 |
# Embeddings for full text prompts (~21G) and image prompts (~3.5G)
|
| 115 |
from huggingface_hub import hf_hub_download
|
| 116 |
-
hf_hub_download(repo_id="
|
| 117 |
-
hf_hub_download(repo_id="
|
| 118 |
```
|
| 119 |
|
| 120 |
```python
|
| 121 |
# Embeddings for 100k subset text prompts (~1.2G) and image prompts (~0.2G)
|
| 122 |
from huggingface_hub import hf_hub_download
|
| 123 |
-
hf_hub_download(repo_id="
|
| 124 |
-
hf_hub_download(repo_id="
|
| 125 |
```
|
| 126 |
|
| 127 |
```python
|
| 128 |
# Embeddings for 10k TIP-Eval text prompts (~0.1G) and image prompts (~0.02G)
|
| 129 |
from huggingface_hub import hf_hub_download
|
| 130 |
-
hf_hub_download(repo_id="
|
| 131 |
-
hf_hub_download(repo_id="
|
| 132 |
```
|
| 133 |
|
| 134 |
## Download uncompressed image prompts
|
|
@@ -137,20 +137,20 @@ hf_hub_download(repo_id="TIP-I2V/TIP-I2V", filename="Embedding/Eval_Image_Embedd
|
|
| 137 |
# Full uncompressed image prompts: ~1T
|
| 138 |
from huggingface_hub import hf_hub_download
|
| 139 |
for i in range(1,52):
|
| 140 |
-
hf_hub_download(repo_id="
|
| 141 |
```
|
| 142 |
|
| 143 |
```python
|
| 144 |
# 100k subset uncompressed image prompts: ~69.6G
|
| 145 |
from huggingface_hub import hf_hub_download
|
| 146 |
for i in range(1,3):
|
| 147 |
-
hf_hub_download(repo_id="
|
| 148 |
```
|
| 149 |
|
| 150 |
```python
|
| 151 |
# 10k TIP-Eval uncompressed image prompts: ~6.5G
|
| 152 |
from huggingface_hub import hf_hub_download
|
| 153 |
-
hf_hub_download(repo_id="
|
| 154 |
```
|
| 155 |
|
| 156 |
## Download generated videos
|
|
@@ -159,40 +159,40 @@ hf_hub_download(repo_id="TIP-I2V/TIP-I2V", filename="eval_image_prompt_tar/eval_
|
|
| 159 |
# Full videos generated by Pika: ~1T
|
| 160 |
from huggingface_hub import hf_hub_download
|
| 161 |
for i in range(1,52):
|
| 162 |
-
hf_hub_download(repo_id="
|
| 163 |
```
|
| 164 |
|
| 165 |
```python
|
| 166 |
# 100k subset videos generated by Pika (~57.6G), Stable Video Diffusion (~38.9G), Open-Sora (~47.2G), I2VGen-XL (~54.4G), and CogVideoX-5B (~36.7G)
|
| 167 |
from huggingface_hub import hf_hub_download
|
| 168 |
-
hf_hub_download(repo_id="
|
| 169 |
-
hf_hub_download(repo_id="
|
| 170 |
-
hf_hub_download(repo_id="
|
| 171 |
-
hf_hub_download(repo_id="
|
| 172 |
-
hf_hub_download(repo_id="
|
| 173 |
-
hf_hub_download(repo_id="
|
| 174 |
-
hf_hub_download(repo_id="
|
| 175 |
```
|
| 176 |
|
| 177 |
```python
|
| 178 |
# 10k TIP-Eval videos generated by Pika (~5.8G), Stable Video Diffusion (~3.9G), Open-Sora (~4.7G), I2VGen-XL (~5.4G), and CogVideoX-5B (~3.6G)
|
| 179 |
from huggingface_hub import hf_hub_download
|
| 180 |
-
hf_hub_download(repo_id="
|
| 181 |
-
hf_hub_download(repo_id="
|
| 182 |
-
hf_hub_download(repo_id="
|
| 183 |
-
hf_hub_download(repo_id="
|
| 184 |
-
hf_hub_download(repo_id="
|
| 185 |
```
|
| 186 |
|
| 187 |
# Comparison with VidProM and DiffusionDB
|
| 188 |
<p align="center">
|
| 189 |
-
<img src="https://huggingface.co/datasets/
|
| 190 |
</p>
|
| 191 |
<p align="center">
|
| 192 |
-
<img src="https://huggingface.co/datasets/
|
| 193 |
</p>
|
| 194 |
|
| 195 |
-
Click the [WizMap (TIP-I2V VS VidProM)](https://poloclub.github.io/wizmap/?dataURL=https%3A%2F%2Fhuggingface.co%2Fdatasets%
|
| 196 |
(wait for 5 seconds) for an interactive visualization of our 1.70 million prompts.
|
| 197 |
|
| 198 |
|
|
|
|
| 59 |
TIP-I2V is the first dataset comprising over 1.70 million unique user-provided text and image prompts. Besides the prompts, TIP-I2V also includes videos generated by five state-of-the-art image-to-video models (Pika, Stable Video Diffusion, Open-Sora, I2VGen-XL, and CogVideoX-5B). The TIP-I2V contributes to the development of better and safer image-to-video models.
|
| 60 |
|
| 61 |
<p align="center">
|
| 62 |
+
<img src="https://huggingface.co/datasets/tipi2v/TIP-I2V/resolve/main/assets/teasor.png" width="1000">
|
| 63 |
</p>
|
| 64 |
|
| 65 |
# Datapoint
|
| 66 |
<p align="center">
|
| 67 |
+
<img src="https://huggingface.co/datasets/tipi2v/TIP-I2V/resolve/main/assets/datapoint.png" width="1000">
|
| 68 |
</p>
|
| 69 |
|
| 70 |
# Statistics
|
| 71 |
<p align="center">
|
| 72 |
+
<img src="https://huggingface.co/datasets/tipi2v/TIP-I2V/resolve/main/assets/stat.png" width="1000">
|
| 73 |
</p>
|
| 74 |
|
| 75 |
# Download
|
|
|
|
| 80 |
```python
|
| 81 |
# Full (text and compressed image) prompts: ~13.4G
|
| 82 |
from datasets import load_dataset
|
| 83 |
+
ds = load_dataset("tipi2v/TIP-I2V", split='Full', streaming=True)
|
| 84 |
|
| 85 |
# Convert to Pandas format (it may be slow)
|
| 86 |
import pandas as pd
|
|
|
|
| 91 |
```python
|
| 92 |
# 100k subset (text and compressed image) prompts: ~0.8G
|
| 93 |
from datasets import load_dataset
|
| 94 |
+
ds = load_dataset("tipi2v/TIP-I2V", split='Subset', streaming=True)
|
| 95 |
|
| 96 |
# Convert to Pandas format (it may be slow)
|
| 97 |
import pandas as pd
|
|
|
|
| 101 |
```python
|
| 102 |
# 10k TIP-Eval (text and compressed image) prompts: ~0.08G
|
| 103 |
from datasets import load_dataset
|
| 104 |
+
ds = load_dataset("tipi2v/TIP-I2V", split='Eval', streaming=True)
|
| 105 |
|
| 106 |
# Convert to Pandas format (it may be slow)
|
| 107 |
import pandas as pd
|
|
|
|
| 113 |
```python
|
| 114 |
# Embeddings for full text prompts (~21G) and image prompts (~3.5G)
|
| 115 |
from huggingface_hub import hf_hub_download
|
| 116 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="Embedding/Full_Text_Embedding.parquet", repo_type="dataset")
|
| 117 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="Embedding/Full_Image_Embedding.parquet", repo_type="dataset")
|
| 118 |
```
|
| 119 |
|
| 120 |
```python
|
| 121 |
# Embeddings for 100k subset text prompts (~1.2G) and image prompts (~0.2G)
|
| 122 |
from huggingface_hub import hf_hub_download
|
| 123 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="Embedding/Subset_Text_Embedding.parquet", repo_type="dataset")
|
| 124 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="Embedding/Subset_Image_Embedding.parquet", repo_type="dataset")
|
| 125 |
```
|
| 126 |
|
| 127 |
```python
|
| 128 |
# Embeddings for 10k TIP-Eval text prompts (~0.1G) and image prompts (~0.02G)
|
| 129 |
from huggingface_hub import hf_hub_download
|
| 130 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="Embedding/Eval_Text_Embedding.parquet", repo_type="dataset")
|
| 131 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="Embedding/Eval_Image_Embedding.parquet", repo_type="dataset")
|
| 132 |
```
|
| 133 |
|
| 134 |
## Download uncompressed image prompts
|
|
|
|
| 137 |
# Full uncompressed image prompts: ~1T
|
| 138 |
from huggingface_hub import hf_hub_download
|
| 139 |
for i in range(1,52):
|
| 140 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="image_prompt_tar/image_prompt_%d.tar"%i, repo_type="dataset")
|
| 141 |
```
|
| 142 |
|
| 143 |
```python
|
| 144 |
# 100k subset uncompressed image prompts: ~69.6G
|
| 145 |
from huggingface_hub import hf_hub_download
|
| 146 |
for i in range(1,3):
|
| 147 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="sub_image_prompt_tar/sub_image_prompt_%d.tar"%i, repo_type="dataset")
|
| 148 |
```
|
| 149 |
|
| 150 |
```python
|
| 151 |
# 10k TIP-Eval uncompressed image prompts: ~6.5G
|
| 152 |
from huggingface_hub import hf_hub_download
|
| 153 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="eval_image_prompt_tar/eval_image_prompt.tar", repo_type="dataset")
|
| 154 |
```
|
| 155 |
|
| 156 |
## Download generated videos
|
|
|
|
| 159 |
# Full videos generated by Pika: ~1T
|
| 160 |
from huggingface_hub import hf_hub_download
|
| 161 |
for i in range(1,52):
|
| 162 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="pika_videos_tar/pika_videos_%d.tar"%i, repo_type="dataset")
|
| 163 |
```
|
| 164 |
|
| 165 |
```python
|
| 166 |
# 100k subset videos generated by Pika (~57.6G), Stable Video Diffusion (~38.9G), Open-Sora (~47.2G), I2VGen-XL (~54.4G), and CogVideoX-5B (~36.7G)
|
| 167 |
from huggingface_hub import hf_hub_download
|
| 168 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="subset_videos_tar/pika_videos_subset_1.tar", repo_type="dataset")
|
| 169 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="subset_videos_tar/pika_videos_subset_2.tar", repo_type="dataset")
|
| 170 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="subset_videos_tar/svd_videos_subset.tar", repo_type="dataset")
|
| 171 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="subset_videos_tar/opensora_videos_subset.tar", repo_type="dataset")
|
| 172 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="subset_videos_tar/i2vgenxl_videos_subset_1.tar", repo_type="dataset")
|
| 173 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="subset_videos_tar/i2vgenxl_videos_subset_2.tar", repo_type="dataset")
|
| 174 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="subset_videos_tar/cog_videos_subset.tar", repo_type="dataset")
|
| 175 |
```
|
| 176 |
|
| 177 |
```python
|
| 178 |
# 10k TIP-Eval videos generated by Pika (~5.8G), Stable Video Diffusion (~3.9G), Open-Sora (~4.7G), I2VGen-XL (~5.4G), and CogVideoX-5B (~3.6G)
|
| 179 |
from huggingface_hub import hf_hub_download
|
| 180 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="eval_videos_tar/pika_videos_eval.tar", repo_type="dataset")
|
| 181 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="eval_videos_tar/svd_videos_eval.tar", repo_type="dataset")
|
| 182 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="eval_videos_tar/opensora_videos_eval.tar", repo_type="dataset")
|
| 183 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="eval_videos_tar/i2vgenxl_videos_eval.tar", repo_type="dataset")
|
| 184 |
+
hf_hub_download(repo_id="tipi2v/TIP-I2V", filename="eval_videos_tar/cog_videos_eval.tar", repo_type="dataset")
|
| 185 |
```
|
| 186 |
|
| 187 |
# Comparison with VidProM and DiffusionDB
|
| 188 |
<p align="center">
|
| 189 |
+
<img src="https://huggingface.co/datasets/tipi2v/TIP-I2V/resolve/main/assets/table.png" width="1000">
|
| 190 |
</p>
|
| 191 |
<p align="center">
|
| 192 |
+
<img src="https://huggingface.co/datasets/tipi2v/TIP-I2V/resolve/main/assets/comparison.png" width="1000">
|
| 193 |
</p>
|
| 194 |
|
| 195 |
+
Click the [WizMap (TIP-I2V VS VidProM)](https://poloclub.github.io/wizmap/?dataURL=https%3A%2F%2Fhuggingface.co%2Fdatasets%2Ftipi2v%2FTIP-I2V%2Fresolve%2Fmain%2Ftip-i2v-visualize%2Fdata_tip-i2v_vidprom.ndjson&gridURL=https%3A%2F%2Fhuggingface.co%2Fdatasets%2Ftipi2v%2FTIP-I2V%2Fresolve%2Fmain%2Ftip-i2v-visualize%2Fgrid_tip-i2v_vidprom.json) and [WizMap (TIP-I2V VS DiffusionDB)](https://poloclub.github.io/wizmap/?dataURL=https%3A%2F%2Fhuggingface.co%2Fdatasets%2Ftipi2v%2FTIP-I2V%2Fresolve%2Fmain%2Ftip-i2v-visualize%2Fdata_tip-i2v_diffusiondb.ndjson&gridURL=https%3A%2F%2Fhuggingface.co%2Fdatasets%2Ftipi2v%2FTIP-I2V%2Fresolve%2Fmain%2Ftip-i2v-visualize%2Fgrid_tip-i2v_diffusiondb.json)
|
| 196 |
(wait for 5 seconds) for an interactive visualization of our 1.70 million prompts.
|
| 197 |
|
| 198 |
|