add custom handler
Browse files- __pycache__/handler.cpython-38.pyc +0 -0
- handler.py +94 -0
- requirements.txt +3 -0
__pycache__/handler.cpython-38.pyc
ADDED
Binary file (3.42 kB). View file
|
|
handler.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, Any
|
2 |
+
|
3 |
+
import base64
|
4 |
+
import math
|
5 |
+
import numpy as np
|
6 |
+
import tensorflow as tf
|
7 |
+
from tensorflow import keras
|
8 |
+
|
9 |
+
from keras_cv.models.generative.stable_diffusion.constants import _ALPHAS_CUMPROD
|
10 |
+
from keras_cv.models.generative.stable_diffusion.diffusion_model import DiffusionModel
|
11 |
+
|
12 |
+
class MyEndpointHandler():
|
13 |
+
def __init__(self, path=""):
|
14 |
+
self.seed = None
|
15 |
+
|
16 |
+
img_height = 512
|
17 |
+
img_width = 512
|
18 |
+
self.img_height = round(img_height / 128) * 128
|
19 |
+
self.img_width = round(img_width / 128) * 128
|
20 |
+
|
21 |
+
self.MAX_PROMPT_LENGTH = 77
|
22 |
+
self.diffusion_model = DiffusionModel(self.img_height, self.img_width, self.MAX_PROMPT_LENGTH)
|
23 |
+
|
24 |
+
def _get_initial_diffusion_noise(self, batch_size, seed):
|
25 |
+
if seed is not None:
|
26 |
+
return tf.random.stateless_normal(
|
27 |
+
(batch_size, self.img_height // 8, self.img_width // 8, 4),
|
28 |
+
seed=[seed, seed],
|
29 |
+
)
|
30 |
+
else:
|
31 |
+
return tf.random.normal(
|
32 |
+
(batch_size, self.img_height // 8, self.img_width // 8, 4)
|
33 |
+
)
|
34 |
+
|
35 |
+
def _get_initial_alphas(self, timesteps):
|
36 |
+
alphas = [_ALPHAS_CUMPROD[t] for t in timesteps]
|
37 |
+
alphas_prev = [1.0] + alphas[:-1]
|
38 |
+
|
39 |
+
return alphas, alphas_prev
|
40 |
+
|
41 |
+
def _get_timestep_embedding(self, timestep, batch_size, dim=320, max_period=10000):
|
42 |
+
half = dim // 2
|
43 |
+
freqs = tf.math.exp(
|
44 |
+
-math.log(max_period) * tf.range(0, half, dtype=tf.float32) / half
|
45 |
+
)
|
46 |
+
args = tf.convert_to_tensor([timestep], dtype=tf.float32) * freqs
|
47 |
+
embedding = tf.concat([tf.math.cos(args), tf.math.sin(args)], 0)
|
48 |
+
embedding = tf.reshape(embedding, [1, -1])
|
49 |
+
return tf.repeat(embedding, batch_size, axis=0)
|
50 |
+
|
51 |
+
def __call__(self, data: Dict[str, Any]) -> str:
|
52 |
+
# get inputs
|
53 |
+
tmp_data = data.pop("inputs", data)
|
54 |
+
|
55 |
+
context = base64.b64decode(tmp_data[0])
|
56 |
+
context = np.frombuffer(context, dtype="float32")
|
57 |
+
context = np.reshape(context, (1, 77, 768))
|
58 |
+
|
59 |
+
unconditional_context = base64.b64decode(tmp_data[1])
|
60 |
+
unconditional_context = np.frombuffer(unconditional_context, dtype="float32")
|
61 |
+
unconditional_context = np.reshape(unconditional_context, (1, 77, 768))
|
62 |
+
|
63 |
+
batch_size = data.pop("batch_size", 1)
|
64 |
+
|
65 |
+
num_steps = data.pop("num_steps", 50)
|
66 |
+
unconditional_guidance_scale = data.pop("unconditional_guidance_scale", 7.5)
|
67 |
+
|
68 |
+
latent = self._get_initial_diffusion_noise(batch_size, self.seed)
|
69 |
+
|
70 |
+
# Iterative reverse diffusion stage
|
71 |
+
timesteps = tf.range(1, 1000, 1000 // num_steps)
|
72 |
+
alphas, alphas_prev = self._get_initial_alphas(timesteps)
|
73 |
+
progbar = keras.utils.Progbar(len(timesteps))
|
74 |
+
iteration = 0
|
75 |
+
for index, timestep in list(enumerate(timesteps))[::-1]:
|
76 |
+
latent_prev = latent # Set aside the previous latent vector
|
77 |
+
t_emb = self._get_timestep_embedding(timestep, batch_size)
|
78 |
+
unconditional_latent = self.diffusion_model.predict_on_batch(
|
79 |
+
[latent, t_emb, unconditional_context]
|
80 |
+
)
|
81 |
+
latent = self.diffusion_model.predict_on_batch([latent, t_emb, context])
|
82 |
+
latent = unconditional_latent + unconditional_guidance_scale * (
|
83 |
+
latent - unconditional_latent
|
84 |
+
)
|
85 |
+
a_t, a_prev = alphas[index], alphas_prev[index]
|
86 |
+
pred_x0 = (latent_prev - math.sqrt(1 - a_t) * latent) / math.sqrt(a_t)
|
87 |
+
latent = latent * math.sqrt(1.0 - a_prev) + math.sqrt(a_prev) * pred_x0
|
88 |
+
iteration += 1
|
89 |
+
progbar.update(iteration)
|
90 |
+
|
91 |
+
latent_b64 = base64.b64encode(latent.numpy().tobytes())
|
92 |
+
latent_b64str = latent_b64.decode()
|
93 |
+
|
94 |
+
return latent_b64str
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
keras-cv
|
2 |
+
tensorflow
|
3 |
+
tensorflow_datasets
|