Update README.md
Browse files
README.md
CHANGED
|
@@ -18,10 +18,16 @@ _io32: model input is fp32, model will convert the input to fp16, perform ops in
|
|
| 18 |
|
| 19 |
_io16: model input is fp16, perform ops in fp16 and write the final result in fp16
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
https://github.com/TensorStack-AI/OnnxStack
|
| 24 |
|
|
|
|
| 25 |
// csharp example
|
| 26 |
// Create Pipeline
|
| 27 |
var pipeline = FluxPipeline.CreatePipeline("D:\\Models\\Flux.1-dev_amdgpu");
|
|
@@ -44,5 +50,6 @@ var result = await pipeline.GenerateImageAsync(promptOptions, schedulerOptions);
|
|
| 44 |
// Save Image Result
|
| 45 |
await result.SaveAsync("Result.png");
|
| 46 |
```
|
|
|
|
| 47 |
## Inference Result
|
| 48 |

|
|
|
|
| 18 |
|
| 19 |
_io16: model input is fp16, perform ops in fp16 and write the final result in fp16
|
| 20 |
|
| 21 |
+
## Running
|
| 22 |
+
|
| 23 |
+
### 1. Using Amuse GUI Application
|
| 24 |
+
Use Amuse GUI application to run it: https://www.amuse-ai.com/
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
### 2. C# Inference Demo
|
| 28 |
https://github.com/TensorStack-AI/OnnxStack
|
| 29 |
|
| 30 |
+
```
|
| 31 |
// csharp example
|
| 32 |
// Create Pipeline
|
| 33 |
var pipeline = FluxPipeline.CreatePipeline("D:\\Models\\Flux.1-dev_amdgpu");
|
|
|
|
| 50 |
// Save Image Result
|
| 51 |
await result.SaveAsync("Result.png");
|
| 52 |
```
|
| 53 |
+
|
| 54 |
## Inference Result
|
| 55 |

|