Commit
·
6072368
1
Parent(s):
78b3ae9
Update README.md
Browse files
README.md
CHANGED
|
@@ -69,12 +69,18 @@ import torch
|
|
| 69 |
from torch import autocast
|
| 70 |
from diffusers import StableDiffusionPipeline
|
| 71 |
|
| 72 |
-
model_id = "CompVis/stable-diffusion-v1-
|
| 73 |
device = "cuda"
|
| 74 |
|
| 75 |
|
| 76 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True)
|
| 77 |
pipe = pipe.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
```
|
| 79 |
|
| 80 |
**Note**:
|
|
@@ -84,8 +90,14 @@ If you are limited by GPU memory and have less than 10GB of GPU RAM available, p
|
|
| 84 |
```py
|
| 85 |
import torch
|
| 86 |
|
| 87 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, use_auth_token=True)
|
| 88 |
pipe = pipe.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
```
|
| 90 |
|
| 91 |
To swap out the noise scheduler, pass it to `from_pretrained`:
|
|
@@ -98,6 +110,12 @@ model_id = "CompVis/stable-diffusion-v1-3"
|
|
| 98 |
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 99 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, use_auth_token=True)
|
| 100 |
pipe = pipe.to("cuda")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
```
|
| 102 |
|
| 103 |
# Uses
|
|
|
|
| 69 |
from torch import autocast
|
| 70 |
from diffusers import StableDiffusionPipeline
|
| 71 |
|
| 72 |
+
model_id = "CompVis/stable-diffusion-v1-3"
|
| 73 |
device = "cuda"
|
| 74 |
|
| 75 |
|
| 76 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True)
|
| 77 |
pipe = pipe.to(device)
|
| 78 |
+
|
| 79 |
+
prompt = "a photo of an astronaut riding a horse on mars"
|
| 80 |
+
with autocast("cuda"):
|
| 81 |
+
image = pipe(prompt, guidance_scale=7.5)["sample"][0]
|
| 82 |
+
|
| 83 |
+
image.save("astronaut_rides_horse.png")
|
| 84 |
```
|
| 85 |
|
| 86 |
**Note**:
|
|
|
|
| 90 |
```py
|
| 91 |
import torch
|
| 92 |
|
| 93 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", use_auth_token=True)
|
| 94 |
pipe = pipe.to(device)
|
| 95 |
+
|
| 96 |
+
prompt = "a photo of an astronaut riding a horse on mars"
|
| 97 |
+
with autocast("cuda"):
|
| 98 |
+
image = pipe(prompt, guidance_scale=7.5)["sample"][0]
|
| 99 |
+
|
| 100 |
+
image.save("astronaut_rides_horse.png")
|
| 101 |
```
|
| 102 |
|
| 103 |
To swap out the noise scheduler, pass it to `from_pretrained`:
|
|
|
|
| 110 |
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 111 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, use_auth_token=True)
|
| 112 |
pipe = pipe.to("cuda")
|
| 113 |
+
|
| 114 |
+
prompt = "a photo of an astronaut riding a horse on mars"
|
| 115 |
+
with autocast("cuda"):
|
| 116 |
+
image = pipe(prompt, guidance_scale=7.5)["sample"][0]
|
| 117 |
+
|
| 118 |
+
image.save("astronaut_rides_horse.png")
|
| 119 |
```
|
| 120 |
|
| 121 |
# Uses
|