Update README.md
#5
by
bweisslt
- opened
README.md
CHANGED
|
@@ -198,7 +198,7 @@ export_to_video(video, "output.mp4", fps=24)
|
|
| 198 |
import torch
|
| 199 |
from diffusers import LTXConditionPipeline, LTXLatentUpsamplePipeline
|
| 200 |
from diffusers.pipelines.ltx.pipeline_ltx_condition import LTXVideoCondition
|
| 201 |
-
from diffusers.utils import export_to_video, load_image
|
| 202 |
|
| 203 |
pipe = LTXConditionPipeline.from_pretrained("Lightricks/LTX-Video-0.9.7-dev", torch_dtype=torch.bfloat16)
|
| 204 |
pipe_upsample = LTXLatentUpsamplePipeline.from_pretrained("Lightricks/ltxv-spatial-upscaler-0.9.7", vae=pipe.vae, torch_dtype=torch.bfloat16)
|
|
@@ -212,12 +212,12 @@ def round_to_nearest_resolution_acceptable_by_vae(height, width):
|
|
| 212 |
return height, width
|
| 213 |
|
| 214 |
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/penguin.png")
|
| 215 |
-
video = [image]
|
| 216 |
condition1 = LTXVideoCondition(video=video, frame_index=0)
|
| 217 |
|
| 218 |
-
prompt = "
|
| 219 |
negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted"
|
| 220 |
-
expected_height, expected_width =
|
| 221 |
downscale_factor = 2 / 3
|
| 222 |
num_frames = 96
|
| 223 |
|
|
|
|
| 198 |
import torch
|
| 199 |
from diffusers import LTXConditionPipeline, LTXLatentUpsamplePipeline
|
| 200 |
from diffusers.pipelines.ltx.pipeline_ltx_condition import LTXVideoCondition
|
| 201 |
+
from diffusers.utils import export_to_video, load_image, load_video
|
| 202 |
|
| 203 |
pipe = LTXConditionPipeline.from_pretrained("Lightricks/LTX-Video-0.9.7-dev", torch_dtype=torch.bfloat16)
|
| 204 |
pipe_upsample = LTXLatentUpsamplePipeline.from_pretrained("Lightricks/ltxv-spatial-upscaler-0.9.7", vae=pipe.vae, torch_dtype=torch.bfloat16)
|
|
|
|
| 212 |
return height, width
|
| 213 |
|
| 214 |
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/penguin.png")
|
| 215 |
+
video = load_video(export_to_video([image])) # compress the image using video compression as the model was trained on videos
|
| 216 |
condition1 = LTXVideoCondition(video=video, frame_index=0)
|
| 217 |
|
| 218 |
+
prompt = "A cute little penguin takes out a book and starts reading it"
|
| 219 |
negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted"
|
| 220 |
+
expected_height, expected_width = 480, 832
|
| 221 |
downscale_factor = 2 / 3
|
| 222 |
num_frames = 96
|
| 223 |
|