Image-to-Image
Diffusers
Safetensors
StableDiffusionPipeline
stable-diffusion
jonathanpark commited on
Commit
5f7473f
·
1 Parent(s): ccd3af2

make it load data_uris

Browse files
Files changed (1) hide show
  1. handler.py +30 -25
handler.py CHANGED
@@ -2,7 +2,7 @@ from typing import Dict, List, Any
2
  import torch
3
  from PIL import Image
4
  from io import BytesIO
5
- import requests
6
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DDIMScheduler
7
 
8
  # set device
@@ -36,33 +36,38 @@ class EndpointHandler():
36
  prompt = data.pop("inputs", data)
37
  url = data.pop("url", data)
38
 
39
- response = requests.get(url)
40
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
41
- init_image.thumbnail((512, 512))
 
 
42
 
43
 
44
- params = data.pop("parameters", data)
45
 
46
- # hyperparamters
47
- num_inference_steps = params.pop("num_inference_steps", 25)
48
- guidance_scale = params.pop("guidance_scale", 7.5)
49
- negative_prompt = params.pop("negative_prompt", None)
50
- height = params.pop("height", None)
51
- width = params.pop("width", None)
52
- manual_seed = params.pop("manual_seed", -1)
 
53
 
54
- out = None
55
 
56
- generator = torch.Generator(device='cuda')
57
- generator.manual_seed(manual_seed)
58
- # run img2img pipeline
59
- out = self.imgPipe(prompt,
60
- image=init_image,
61
- num_inference_steps=num_inference_steps,
62
- guidance_scale=guidance_scale,
63
- num_images_per_prompt=1,
64
- negative_prompt=negative_prompt
65
- )
 
 
66
 
67
- # return first generated PIL image
68
- return out.images[0]
 
2
  import torch
3
  from PIL import Image
4
  from io import BytesIO
5
+ from urllib import request
6
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DDIMScheduler
7
 
8
  # set device
 
36
  prompt = data.pop("inputs", data)
37
  url = data.pop("url", data)
38
 
39
+ with request.urlopen(data_uri) as response:
40
+ data = response.read()
41
+ init_image = Image.open(BytesIO(data)).convert("RGB")
42
+ init_image = Image.open(url)
43
+ init_image.thumbnail((512, 512))
44
 
45
 
46
+ params = data.pop("parameters", data)
47
 
48
+ # hyperparamters
49
+ num_inference_steps = params.pop("num_inference_steps", 25)
50
+ guidance_scale = params.pop("guidance_scale", 7.5)
51
+ negative_prompt = params.pop("negative_prompt", None)
52
+ prompt = params.pop("prompt", None)
53
+ height = params.pop("height", None)
54
+ width = params.pop("width", None)
55
+ manual_seed = params.pop("manual_seed", -1)
56
 
57
+ out = None
58
 
59
+ generator = torch.Generator(device='cuda')
60
+ generator.manual_seed(manual_seed)
61
+ # run img2img pipeline
62
+ out = self.imgPipe(prompt,
63
+ image=init_image,
64
+ num_inference_steps=num_inference_steps,
65
+ guidance_scale=guidance_scale,
66
+ num_images_per_prompt=1,
67
+ negative_prompt=negative_prompt,
68
+ height=height,
69
+ width=width
70
+ )
71
 
72
+ # return first generated PIL image
73
+ return out.images[0]