add tags and inference example with diffusers (#2)
Browse files- add tags and inference example with diffusers (9f83f192c4bf294179e85eac45ead1a5db40acef)
Co-authored-by: Linoy Tsaban <[email protected]>
README.md
CHANGED
|
@@ -9,6 +9,55 @@ pipeline_tag: text-to-image
|
|
| 9 |
tags:
|
| 10 |
- Qwen-Image;
|
| 11 |
- distillation;
|
|
|
|
|
|
|
| 12 |
---
|
| 13 |
|
| 14 |
-
Please refer to [Qwen-Image-Lightning github](https://github.com/ModelTC/Qwen-Image-Lightning/) to learn how to use the models.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
tags:
|
| 10 |
- Qwen-Image;
|
| 11 |
- distillation;
|
| 12 |
+
- LoRA
|
| 13 |
+
library_name: diffusers
|
| 14 |
---
|
| 15 |
|
| 16 |
+
Please refer to [Qwen-Image-Lightning github](https://github.com/ModelTC/Qwen-Image-Lightning/) to learn how to use the models.
|
| 17 |
+
|
| 18 |
+
use with diffusers 🧨:
|
| 19 |
+
|
| 20 |
+
make sure to install diffusers from `main` (`pip install git+https://github.com/huggingface/diffusers.git`)
|
| 21 |
+
```
|
| 22 |
+
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
|
| 23 |
+
import torch
|
| 24 |
+
import math
|
| 25 |
+
|
| 26 |
+
# From https://github.com/ModelTC/Qwen-Image-Lightning/blob/342260e8f5468d2f24d084ce04f55e101007118b/generate_with_diffusers.py#L82C9-L97C10
|
| 27 |
+
scheduler_config = {
|
| 28 |
+
"base_image_seq_len": 256,
|
| 29 |
+
"base_shift": math.log(3), # We use shift=3 in distillation
|
| 30 |
+
"invert_sigmas": False,
|
| 31 |
+
"max_image_seq_len": 8192,
|
| 32 |
+
"max_shift": math.log(3), # We use shift=3 in distillation
|
| 33 |
+
"num_train_timesteps": 1000,
|
| 34 |
+
"shift": 1.0,
|
| 35 |
+
"shift_terminal": None, # set shift_terminal to None
|
| 36 |
+
"stochastic_sampling": False,
|
| 37 |
+
"time_shift_type": "exponential",
|
| 38 |
+
"use_beta_sigmas": False,
|
| 39 |
+
"use_dynamic_shifting": True,
|
| 40 |
+
"use_exponential_sigmas": False,
|
| 41 |
+
"use_karras_sigmas": False,
|
| 42 |
+
}
|
| 43 |
+
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
|
| 44 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 45 |
+
"Qwen/Qwen-Image", scheduler=scheduler, torch_dtype=torch.bfloat16
|
| 46 |
+
).to("cuda")
|
| 47 |
+
pipe.load_lora_weights(
|
| 48 |
+
"lightx2v/Qwen-Image-Lightning", weight_name="Qwen-Image-Lightning-8steps-V1.0.safetensors"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
prompt = "a tiny astronaut hatching from an egg on the moon, Ultra HD, 4K, cinematic composition."
|
| 52 |
+
negative_prompt = " "
|
| 53 |
+
image = pipe(
|
| 54 |
+
prompt=prompt,
|
| 55 |
+
negative_prompt=negative_prompt,
|
| 56 |
+
width=1024,
|
| 57 |
+
height=1024,
|
| 58 |
+
num_inference_steps=8,
|
| 59 |
+
true_cfg_scale=1.0,
|
| 60 |
+
generator=torch.manual_seed(0),
|
| 61 |
+
).images[0]
|
| 62 |
+
image.save("qwen_fewsteps.png")
|
| 63 |
+
```
|