Commit
·
9b0dffe
1
Parent(s):
3cad578
Update README.md
Browse files
README.md
CHANGED
|
@@ -70,12 +70,18 @@ import torch
|
|
| 70 |
from torch import autocast
|
| 71 |
from diffusers import StableDiffusionPipeline
|
| 72 |
|
| 73 |
-
model_id = "CompVis/stable-diffusion-v1-
|
| 74 |
device = "cuda"
|
| 75 |
|
| 76 |
|
| 77 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True)
|
| 78 |
pipe = pipe.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
```
|
| 80 |
|
| 81 |
**Note**:
|
|
@@ -85,8 +91,14 @@ If you are limited by GPU memory and have less than 10GB of GPU RAM available, p
|
|
| 85 |
```py
|
| 86 |
import torch
|
| 87 |
|
| 88 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, use_auth_token=True)
|
| 89 |
pipe = pipe.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
```
|
| 91 |
|
| 92 |
To swap out the noise scheduler, pass it to `from_pretrained`:
|
|
@@ -99,6 +111,12 @@ model_id = "CompVis/stable-diffusion-v1-2"
|
|
| 99 |
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 100 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, use_auth_token=True)
|
| 101 |
pipe = pipe.to("cuda")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
```
|
| 103 |
|
| 104 |
# Uses
|
|
|
|
| 70 |
from torch import autocast
|
| 71 |
from diffusers import StableDiffusionPipeline
|
| 72 |
|
| 73 |
+
model_id = "CompVis/stable-diffusion-v1-2"
|
| 74 |
device = "cuda"
|
| 75 |
|
| 76 |
|
| 77 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True)
|
| 78 |
pipe = pipe.to(device)
|
| 79 |
+
|
| 80 |
+
prompt = "a photo of an astronaut riding a horse on mars"
|
| 81 |
+
with autocast("cuda"):
|
| 82 |
+
image = pipe(prompt, guidance_scale=7.5)["sample"][0]
|
| 83 |
+
|
| 84 |
+
image.save("astronaut_rides_horse.png")
|
| 85 |
```
|
| 86 |
|
| 87 |
**Note**:
|
|
|
|
| 91 |
```py
|
| 92 |
import torch
|
| 93 |
|
| 94 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", use_auth_token=True)
|
| 95 |
pipe = pipe.to(device)
|
| 96 |
+
|
| 97 |
+
prompt = "a photo of an astronaut riding a horse on mars"
|
| 98 |
+
with autocast("cuda"):
|
| 99 |
+
image = pipe(prompt, guidance_scale=7.5)["sample"][0]
|
| 100 |
+
|
| 101 |
+
image.save("astronaut_rides_horse.png")
|
| 102 |
```
|
| 103 |
|
| 104 |
To swap out the noise scheduler, pass it to `from_pretrained`:
|
|
|
|
| 111 |
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 112 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, use_auth_token=True)
|
| 113 |
pipe = pipe.to("cuda")
|
| 114 |
+
|
| 115 |
+
prompt = "a photo of an astronaut riding a horse on mars"
|
| 116 |
+
with autocast("cuda"):
|
| 117 |
+
image = pipe(prompt, guidance_scale=7.5)["sample"][0]
|
| 118 |
+
|
| 119 |
+
image.save("astronaut_rides_horse.png")
|
| 120 |
```
|
| 121 |
|
| 122 |
# Uses
|