Update README.md
Browse files
README.md
CHANGED
@@ -43,6 +43,85 @@ Folder Structure
|
|
43 |
# Blockers
|
44 |
1) NotImplementedError: Cannot copy out of meta tensor; no data! Please use torch.nn.Module.to_empty() instead of torch.nn.Module.to() when moving module from meta to a different device.
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
2) KeyError: <class 'diffusers.models.transformers.transformer_flux.FluxAttention'>
|
47 |
|
48 |
|
|
|
43 |
# Blockers
|
44 |
1) NotImplementedError: Cannot copy out of meta tensor; no data! Please use torch.nn.Module.to_empty() instead of torch.nn.Module.to() when moving module from meta to a different device.
|
45 |
|
46 |
+
potential fix: app.diffusion.pipeline.config.py
|
47 |
+
```python
|
48 |
+
@staticmethod
|
49 |
+
def _default_build(
|
50 |
+
name: str,
|
51 |
+
path: str,
|
52 |
+
dtype: str | torch.dtype,
|
53 |
+
device: str | torch.device,
|
54 |
+
shift_activations: bool
|
55 |
+
) -> DiffusionPipeline:
|
56 |
+
if not path:
|
57 |
+
if name == "sdxl":
|
58 |
+
path = "stabilityai/stable-diffusion-xl-base-1.0"
|
59 |
+
elif name == "sdxl-turbo":
|
60 |
+
path = "stabilityai/sdxl-turbo"
|
61 |
+
elif name == "pixart-sigma":
|
62 |
+
path = "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS"
|
63 |
+
elif name == "flux.1-dev":
|
64 |
+
path = "black-forest-labs/FLUX.1-dev"
|
65 |
+
elif name == "flux.1-canny-dev":
|
66 |
+
path = "black-forest-labs/FLUX.1-Canny-dev"
|
67 |
+
elif name == "flux.1-depth-dev":
|
68 |
+
path = "black-forest-labs/FLUX.1-Depth-dev"
|
69 |
+
elif name == "flux.1-fill-dev":
|
70 |
+
path = "black-forest-labs/FLUX.1-Fill-dev"
|
71 |
+
elif name == "flux.1-schnell":
|
72 |
+
path = "black-forest-labs/FLUX.1-schnell"
|
73 |
+
else:
|
74 |
+
raise ValueError(f"Path for {name} is not specified.")
|
75 |
+
|
76 |
+
# Instantiate the pipeline
|
77 |
+
if name in ["flux.1-canny-dev", "flux.1-depth-dev"]:
|
78 |
+
pipeline = FluxControlPipeline.from_pretrained(path, torch_dtype=dtype)
|
79 |
+
elif name == "flux.1-fill-dev":
|
80 |
+
pipeline = FluxFillPipeline.from_pretrained(path, torch_dtype=dtype)
|
81 |
+
elif name.startswith("sana-"):
|
82 |
+
if dtype == torch.bfloat16:
|
83 |
+
pipeline = SanaPipeline.from_pretrained(
|
84 |
+
path, variant="bf16", torch_dtype=dtype, use_safetensors=True
|
85 |
+
)
|
86 |
+
pipeline.vae.to(dtype)
|
87 |
+
pipeline.text_encoder.to(dtype)
|
88 |
+
else:
|
89 |
+
pipeline = SanaPipeline.from_pretrained(path, torch_dtype=dtype)
|
90 |
+
else:
|
91 |
+
pipeline = AutoPipelineForText2Image.from_pretrained(path, torch_dtype=dtype)
|
92 |
+
|
93 |
+
# Debug output
|
94 |
+
print(">>> DEVICE:", device)
|
95 |
+
print(">>> PIPELINE TYPE:", type(pipeline))
|
96 |
+
|
97 |
+
# Try to move each component using .to_empty()
|
98 |
+
for name in ["unet", "transformer", "vae", "text_encoder"]:
|
99 |
+
module = getattr(pipeline, name, None)
|
100 |
+
if isinstance(module, torch.nn.Module):
|
101 |
+
try:
|
102 |
+
print(f">>> Moving {name} to {device} using to_empty()")
|
103 |
+
module.to_empty(device)
|
104 |
+
except Exception as e:
|
105 |
+
print(f">>> WARNING: {name}.to_empty({device}) failed: {e}")
|
106 |
+
try:
|
107 |
+
print(f">>> Falling back to {name}.to({device})")
|
108 |
+
module.to(device)
|
109 |
+
except Exception as ee:
|
110 |
+
print(f">>> ERROR: {name}.to({device}) also failed: {ee}")
|
111 |
+
|
112 |
+
# Identify main model (for patching)
|
113 |
+
model = getattr(pipeline, "unet", None) or getattr(pipeline, "transformer", None)
|
114 |
+
if model is not None:
|
115 |
+
replace_fused_linear_with_concat_linear(model)
|
116 |
+
replace_up_block_conv_with_concat_conv(model)
|
117 |
+
if shift_activations:
|
118 |
+
shift_input_activations(model)
|
119 |
+
else:
|
120 |
+
print(">>> WARNING: No model (unet/transformer) found for patching")
|
121 |
+
|
122 |
+
return pipeline
|
123 |
+
```
|
124 |
+
|
125 |
2) KeyError: <class 'diffusers.models.transformers.transformer_flux.FluxAttention'>
|
126 |
|
127 |
|