class ConvAutoEncoder(HyperModel):
def __init__(self, input_shape, is_deeper=False):
--snip--
""" encoder """
def encoder(self, input_img, hp):
conv1 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(input_img) #224 x 224 x 32
conv1 = tf.keras.layers.BatchNormalization()(conv1)
conv1 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
--snip--
conv6 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
conv6 = tf.keras.layers.BatchNormalization()(conv6)
conv6 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv6)
conv6 = tf.keras.layers.BatchNormalization()(conv6)
# Comment (asinha) : conv6 output would be (14,14,512)
# However, we do not flatten this before building a dense below this
# would this be correct?
dense = tf.keras.layers.Dense(16, activation='relu', name='encoder')(conv6)
class ConvAutoEncoder(HyperModel):
def __init__(self, input_shape, is_deeper=False):
--snip--
""" decoder """
def decoder(self, dense, hp):
# Comment (asinha) : If we are getting a dense input, which I am believing a vector if 16 (as
# per last layer at encoder), needs to be reshaped before we apply a CONV2D(256)
# Also I do not understand why do we have #56x56x128 added ahead of conv5?
conv5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(dense) #56 x 56 x 128
conv5 = tf.keras.layers.BatchNormalization()(conv5)
conv5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv5)
conv5 = tf.keras.layers.BatchNormalization()(conv5)