Texttra commited on
Commit
4409dea
·
verified ·
1 Parent(s): 1c6f25b

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +50 -0
handler.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+ import torch
3
+ from diffusers import FluxKontextPipeline
4
+ from io import BytesIO
5
+ import base64
6
+ from PIL import Image
7
+
8
+ class EndpointHandler:
9
+ def __init__(self, path: str = ""):
10
+ print("🚀 Initializing Flux Kontext pipeline...")
11
+
12
+ # Load Flux Kontext model from Hugging Face Hub
13
+ self.pipe = FluxKontextPipeline.from_pretrained(
14
+ "black-forest-labs/FLUX.1-Kontext-dev", # replace with your specific Kontext model if different
15
+ torch_dtype=torch.float16,
16
+ )
17
+ self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
18
+ print("✅ Model ready.")
19
+
20
+ def __call__(self, data: Dict) -> Dict:
21
+ print("🔧 Received data:", data)
22
+
23
+ inputs = data.get("inputs", {})
24
+ prompt = inputs.get("prompt")
25
+ image_base64 = inputs.get("image")
26
+
27
+ if not prompt or not image_base64:
28
+ return {"error": "Both 'prompt' and 'image' inputs are required."}
29
+
30
+ # Decode input image from base64
31
+ image_bytes = base64.b64decode(image_base64)
32
+ image = Image.open(BytesIO(image_bytes)).convert("RGB")
33
+
34
+ # Generate edited image with Kontext
35
+ output = self.pipe(
36
+ prompt=prompt,
37
+ image=image,
38
+ num_inference_steps=28, # context standard
39
+ guidance_scale=3.5
40
+ ).images[0]
41
+
42
+ print("🎨 Image generated.")
43
+
44
+ # Encode output image to base64
45
+ buffer = BytesIO()
46
+ output.save(buffer, format="PNG")
47
+ base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
48
+
49
+ print("✅ Returning image.")
50
+ return {"image": base64_image}