Update README.md
Browse files
README.md
CHANGED
@@ -35,7 +35,7 @@ pipeline = QwenImageEditPipeline.from_pretrained(model_path, torch_dtype=torch.b
|
|
35 |
print("pipeline loaded") # not true but whatever. do not move to cuda
|
36 |
|
37 |
pipeline.set_progress_bar_config(disable=None)
|
38 |
-
pipeline.enable_model_cpu_offload() #if you have enough VRAM
|
39 |
image = Image.open("./example.png").convert("RGB")
|
40 |
prompt = "Remove the lady head with white hair"
|
41 |
inputs = {
|
@@ -44,7 +44,7 @@ inputs = {
|
|
44 |
"generator": torch.manual_seed(0),
|
45 |
"true_cfg_scale": 4.0,
|
46 |
"negative_prompt": " ",
|
47 |
-
"num_inference_steps": 20,
|
48 |
}
|
49 |
|
50 |
with torch.inference_mode():
|
|
|
35 |
print("pipeline loaded") # not true but whatever. do not move to cuda
|
36 |
|
37 |
pipeline.set_progress_bar_config(disable=None)
|
38 |
+
pipeline.enable_model_cpu_offload() #if you have enough VRAM replace this line with `pipeline.to("cuda")` which is 20GB VRAM
|
39 |
image = Image.open("./example.png").convert("RGB")
|
40 |
prompt = "Remove the lady head with white hair"
|
41 |
inputs = {
|
|
|
44 |
"generator": torch.manual_seed(0),
|
45 |
"true_cfg_scale": 4.0,
|
46 |
"negative_prompt": " ",
|
47 |
+
"num_inference_steps": 20, # even 10 steps should be enough in many cases
|
48 |
}
|
49 |
|
50 |
with torch.inference_mode():
|