Texttra commited on
Commit
6f92f77
·
verified ·
1 Parent(s): f08dfbf

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +23 -13
handler.py CHANGED
@@ -9,43 +9,53 @@ class EndpointHandler:
9
  def __init__(self, path: str = ""):
10
  print("🚀 Initializing Flux Kontext pipeline...")
11
 
12
- # Load Flux Kontext model from Hugging Face Hub
13
  self.pipe = FluxKontextPipeline.from_pretrained(
14
- "black-forest-labs/FLUX.1-Kontext-dev", # replace if using your own model repo
15
  torch_dtype=torch.float16,
16
  )
17
  self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
18
  print("✅ Model ready.")
19
 
20
  def __call__(self, data: Dict) -> Dict:
21
- print("🔧 Received data:", data)
 
22
 
23
- # Validate data structure
24
- inputs = data.get("inputs")
25
- if not inputs or not isinstance(inputs, dict):
26
- return {"error": "'inputs' must be a JSON object containing 'prompt' and 'image'."}
 
27
 
28
- prompt = inputs.get("prompt")
29
- image_input = inputs.get("image")
 
 
 
 
 
 
 
 
30
 
31
  if not prompt:
32
- return {"error": "'prompt' is required in 'inputs'."}
33
  if not image_input:
34
- return {"error": "'image' (base64 encoded string) is required in 'inputs'."}
35
 
36
  # Decode image from base64
37
  try:
38
  image_bytes = base64.b64decode(image_input)
39
  image = Image.open(BytesIO(image_bytes)).convert("RGB")
40
  except Exception as e:
41
- return {"error": f"Failed to decode 'image' input as base64: {str(e)}"}
42
 
43
  # Generate edited image with Kontext
44
  try:
45
  output = self.pipe(
46
  prompt=prompt,
47
  image=image,
48
- num_inference_steps=28, # Kontext standard
49
  guidance_scale=3.5
50
  ).images[0]
51
  print("🎨 Image generated.")
 
9
  def __init__(self, path: str = ""):
10
  print("🚀 Initializing Flux Kontext pipeline...")
11
 
12
+ # Load Flux Kontext model
13
  self.pipe = FluxKontextPipeline.from_pretrained(
14
+ "black-forest-labs/FLUX.1-Kontext-dev",
15
  torch_dtype=torch.float16,
16
  )
17
  self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
18
  print("✅ Model ready.")
19
 
20
  def __call__(self, data: Dict) -> Dict:
21
+ print("🔧 Received raw data type:", type(data))
22
+ print("🔧 Received raw data content:", data)
23
 
24
+ # Defensive parsing
25
+ if isinstance(data, dict):
26
+ # Some endpoints send data directly as prompt/image dict
27
+ prompt = data.get("prompt")
28
+ image_input = data.get("image")
29
 
30
+ # If 'inputs' key is used (as per HF Inference default schema)
31
+ if prompt is None and image_input is None:
32
+ inputs = data.get("inputs")
33
+ if isinstance(inputs, dict):
34
+ prompt = inputs.get("prompt")
35
+ image_input = inputs.get("image")
36
+ else:
37
+ return {"error": "Expected 'inputs' to be a JSON object containing 'prompt' and 'image'."}
38
+ else:
39
+ return {"error": "Input payload must be a JSON object."}
40
 
41
  if not prompt:
42
+ return {"error": "Missing 'prompt' in input data."}
43
  if not image_input:
44
+ return {"error": "Missing 'image' (base64) in input data."}
45
 
46
  # Decode image from base64
47
  try:
48
  image_bytes = base64.b64decode(image_input)
49
  image = Image.open(BytesIO(image_bytes)).convert("RGB")
50
  except Exception as e:
51
+ return {"error": f"Failed to decode 'image' as base64 PNG: {str(e)}"}
52
 
53
  # Generate edited image with Kontext
54
  try:
55
  output = self.pipe(
56
  prompt=prompt,
57
  image=image,
58
+ num_inference_steps=28,
59
  guidance_scale=3.5
60
  ).images[0]
61
  print("🎨 Image generated.")