text
stringlengths 0
4.99k
|
---|
epochs = 2
|
# Iterate over epochs.
|
for epoch in range(epochs):
|
print("Start of epoch %d" % (epoch,))
|
# Iterate over the batches of the dataset.
|
for step, x_batch_train in enumerate(train_dataset):
|
with tf.GradientTape() as tape:
|
reconstructed = vae(x_batch_train)
|
# Compute reconstruction loss
|
loss = mse_loss_fn(x_batch_train, reconstructed)
|
loss += sum(vae.losses) # Add KLD regularization loss
|
grads = tape.gradient(loss, vae.trainable_weights)
|
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
|
loss_metric(loss)
|
if step % 100 == 0:
|
print("step %d: mean loss = %.4f" % (step, loss_metric.result()))
|
Start of epoch 0
|
step 0: mean loss = 0.3577
|
step 100: mean loss = 0.1258
|
step 200: mean loss = 0.0994
|
step 300: mean loss = 0.0893
|
step 400: mean loss = 0.0843
|
step 500: mean loss = 0.0809
|
step 600: mean loss = 0.0788
|
step 700: mean loss = 0.0772
|
step 800: mean loss = 0.0760
|
step 900: mean loss = 0.0750
|
Start of epoch 1
|
step 0: mean loss = 0.0747
|
step 100: mean loss = 0.0740
|
step 200: mean loss = 0.0735
|
step 300: mean loss = 0.0730
|
step 400: mean loss = 0.0727
|
step 500: mean loss = 0.0723
|
step 600: mean loss = 0.0720
|
step 700: mean loss = 0.0717
|
step 800: mean loss = 0.0715
|
step 900: mean loss = 0.0712
|
Note that since the VAE is subclassing Model, it features built-in training loops. So you could also have trained it like this:
|
vae = VariationalAutoEncoder(784, 64, 32)
|
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
|
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
|
vae.fit(x_train, x_train, epochs=2, batch_size=64)
|
Epoch 1/2
|
938/938 [==============================] - 1s 1ms/step - loss: 0.0745
|
Epoch 2/2
|
938/938 [==============================] - 1s 1ms/step - loss: 0.0676
|
<tensorflow.python.keras.callbacks.History at 0x15f10e150>
|
Beyond object-oriented development: the Functional API
|
Was this example too much object-oriented development for you? You can also build models using the Functional API. Importantly, choosing one style or another does not prevent you from leveraging components written in the other style: you can always mix-and-match.
|
For instance, the Functional API example below reuses the same Sampling layer we defined in the example above:
|
original_dim = 784
|
intermediate_dim = 64
|
latent_dim = 32
|
# Define encoder model.
|
original_inputs = tf.keras.Input(shape=(original_dim,), name="encoder_input")
|
x = layers.Dense(intermediate_dim, activation="relu")(original_inputs)
|
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
|
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
|
z = Sampling()((z_mean, z_log_var))
|
encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name="encoder")
|
# Define decoder model.
|
latent_inputs = tf.keras.Input(shape=(latent_dim,), name="z_sampling")
|
x = layers.Dense(intermediate_dim, activation="relu")(latent_inputs)
|
outputs = layers.Dense(original_dim, activation="sigmoid")(x)
|
decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name="decoder")
|
# Define VAE model.
|
outputs = decoder(z)
|
vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name="vae")
|
# Add KL divergence regularization loss.
|
kl_loss = -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
|
vae.add_loss(kl_loss)
|
# Train.
|
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
|
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
|
vae.fit(x_train, x_train, epochs=3, batch_size=64)
|
Epoch 1/3
|
938/938 [==============================] - 1s 1ms/step - loss: 0.0747
|
Epoch 2/3
|
938/938 [==============================] - 1s 1ms/step - loss: 0.0676
|
Epoch 3/3
|
938/938 [==============================] - 1s 1ms/step - loss: 0.0676
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.