Update README.md
Browse files
README.md
CHANGED
@@ -7,3 +7,32 @@ Exported with
|
|
7 |
```bash
|
8 |
optimum-cli export neuron --model black-forest-labs/FLUX.1-schnell --tensor_parallel_size 8 --batch_size 1 --height 1024 --width 1024 --num_images_per_prompt 1 --sequence_length 256 --torch_dtype bfloat16 flux_schnell_neuron_1024_tp8/
|
9 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
```bash
|
8 |
optimum-cli export neuron --model black-forest-labs/FLUX.1-schnell --tensor_parallel_size 8 --batch_size 1 --height 1024 --width 1024 --num_images_per_prompt 1 --sequence_length 256 --torch_dtype bfloat16 flux_schnell_neuron_1024_tp8/
|
9 |
```
|
10 |
+
|
11 |
+
Or
|
12 |
+
|
13 |
+
```python
|
14 |
+
# [Export]
|
15 |
+
import torch
|
16 |
+
from optimum.neuron import NeuronFluxInpaintPipeline
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
compiler_args = {"auto_cast": "none"}
|
20 |
+
input_shapes = {"batch_size": 1, "height": 1024, "width": 1024, "sequence_length": 256}
|
21 |
+
|
22 |
+
pipe = NeuronFluxInpaintPipeline.from_pretrained(
|
23 |
+
"black-forest-labs/FLUX.1-schnell",
|
24 |
+
torch_dtype=torch.bfloat16,
|
25 |
+
export=True,
|
26 |
+
tensor_parallel_size=8,
|
27 |
+
**compiler_args,
|
28 |
+
**input_shapes
|
29 |
+
)
|
30 |
+
|
31 |
+
# Save locally
|
32 |
+
pipe.save_pretrained("flux_schnell_neuron_1024x1024_tp8/")
|
33 |
+
|
34 |
+
# Upload to the HuggingFace Hub
|
35 |
+
pipe.push_to_hub(
|
36 |
+
"flux_schnell_neuron_1024x1024_tp8/", repository_id="Jingya/Flux.1-Schnell-1024x1024-neuronx-tp8" # Replace with your HF Hub repo id
|
37 |
+
)
|
38 |
+
```
|