1

I use 's Dataset such that y is a dictionary of 6 tensors which I all use in a single loss function which looks likes this:

def CustomLoss():
    def custom_loss(y_true, y_pred):
        a = tf.keras.losses.binary_crossentropy(y_true['a_0'], y_pred[0]) * y_true['a_1']
        b = tf.square(y_true['b_0'] - y_pred[1]) * y_true['b_1']
        c = tf.abs(y_true['c_0'] - y_pred[2]) * y_true['c_1']
        return a + b + c
    return custom_loss

And I have a model with 3 outputs of different shapes. When I compile the model and call fit method I get Value Error

model.compile(optimizer=optimizer, loss=CustomLoss())
model.fit(dataset, epochs=10)
ValueError: Found unexpected keys that do not correspond to any 
Model output: dict_keys(['a_0', 'a_1', 'b_0', 'b_1', 'c_0', 'c_1']). 
Expected: ['output_0', 'output_1', 'output_2']

where output_0, 'output_1', 'output_2' are names of the output layers.

I figured that naming the output layers by the keys in the dataset should solve the issue but the problem is I have 6 tensors in the dataset and only 3 outputs. I'm aware I can assign a loss function to every output with a single dataset ground truth tensor, but again I need to pass at least two tensors as GT.

So far I've used a custom training loop but I'd rather use the fit method. I'm using 2.3.1

EDIT:

Example model:

inputs = x = tf.keras.layers.Input((256, 256, 3))
x = tf.keras.applications.ResNet50(include_top=False, weights=None)(x)
x1 = tf.keras.layers.Flatten()(x)
x1 = tf.keras.layers.Dense(2, name='output_1')(x1)
x2 = tf.keras.layers.Conv2D(256, 1, name='output_2')(x)
x3 = tf.keras.layers.Flatten()(x)
x3 = tf.keras.layers.Dense(64, name='output_3')(x3)
model = tf.keras.Model(inputs=inputs, outputs=[x1, x2, x3])

Custom training loop:

avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
for epoch in range(1, epochs+1):
    for batch, (images, labels) in enumerate(train_dataset):
        with tf.GradientTape() as tape:
            outputs = model(images, training=False)
            reg_loss = tf.reduce_sum(model.losses)
            pred_loss = loss(labels, outputs)
            total_loss = tf.reduce_sum(pred_loss) + reg_loss
        grads = tape.gradient(total_loss, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))
        avg_loss.update_state(total_loss)
    print(f'Epoch {epoch}/{epochs} - Loss: {avg_loss.result().numpy()}')
    avg_loss.reset_states()

Minimal reproducible code:

import tensorflow as tf

def CustomLoss():
    def custom_loss(y_true, y_pred):
        a = tf.keras.losses.binary_crossentropy(y_true['a_0'], y_pred[0]) * y_true['a_1']
        b = tf.square(y_true['b_0'] - y_pred[1]) * y_true['b_1']
        b = tf.reduce_sum(b, axis=(1, 2, 3))
        c = tf.abs(y_true['c_0'] - y_pred[2]) * y_true['c_1']
        c = tf.reduce_sum(c, axis=1)
        return a + b + c
    return custom_loss

dataset = tf.data.Dataset.from_tensors((
    tf.random.uniform((256, 256, 3)),
    {'a_0': [0., 1.], 'a_1': [1.], 'b_0': tf.random.uniform((8, 8, 256)), 'b_1': [1.], 'c_0': tf.random.uniform((64,)), 'c_1': [1.]}
))
dataset = dataset.batch(1)

inputs = x = tf.keras.layers.Input((256, 256, 3))
x = tf.keras.applications.ResNet50(include_top=False, weights=None)(x)
x1 = tf.keras.layers.Flatten()(x)
x1 = tf.keras.layers.Dense(2, name='output_1')(x1)
x2 = tf.keras.layers.Conv2D(256, 1, name='output_2')(x)
x3 = tf.keras.layers.Flatten()(x)
x3 = tf.keras.layers.Dense(64, name='output_3')(x3)
model = tf.keras.Model(inputs=inputs, outputs=[x1, x2, x3])

optimizer = tf.keras.optimizers.Adam(1e-3)
model.compile(optimizer=optimizer, loss=CustomLoss())
model.fit(dataset, epochs=1)
Innat
  • 16,113
  • 6
  • 53
  • 101
Dominik Ficek
  • 544
  • 5
  • 18

1 Answers1

3

Here is one approach for your case. We will still use a custom training loop but also take the leverage of the convenient .fit method by customizing this method. Please check the document for more details of this: Customizing what happens in fit()


Here is one simple demonstration, extending your reproducible code.

import tensorflow as tf

# data set 
dataset = tf.data.Dataset.from_tensors((
    tf.random.uniform((256, 256, 3)),
    {'a_0': [0., 1.], 'a_1': [1.], 'b_0': tf.random.uniform((8, 8, 256)),
     'b_1': [1.], 'c_0': tf.random.uniform((64,)), 'c_1': [1.]}
))
dataset = dataset.batch(1)

# custom loss 
def loss(y_true, y_pred):
        a = tf.keras.losses.binary_crossentropy(y_true['a_0'], y_pred[0]) * y_true['a_1']
        b = tf.square(y_true['b_0'] - y_pred[1]) * y_true['b_1']
        b = tf.reduce_sum(b, axis=(1, 2, 3))
        c = tf.abs(y_true['c_0'] - y_pred[2]) * y_true['c_1']
        c = tf.reduce_sum(c, axis=1)
        return a + b + c

Custom Model

This is basically overriding the train_step that will run repeatedly over each batch of data.

avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)

class custom_fit(tf.keras.Model):
    def train_step(self, data):
        images, labels = data
        with tf.GradientTape() as tape:
            outputs = self(images, training=True) # forward pass 
            reg_loss = tf.reduce_sum(self.losses)
            pred_loss = loss(labels, outputs)
            total_loss = tf.reduce_sum(pred_loss) + reg_loss
        gradients = tape.gradient(total_loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
        avg_loss.update_state(total_loss)
        return {"loss": avg_loss.result()}
    
    @property
    def metrics(self):
        # We list our `Metric` objects here so that `reset_states()` can be
        # called automatically at the start of each epoch
        # or at the start of `evaluate()`.
        # If you don't implement this property, you have to call
        # `reset_states()` yourself at the time of your choosing.
        return [avg_loss]

Build Model

# model 
inputs = x = tf.keras.layers.Input((256, 256, 3))
x = tf.keras.applications.ResNet50(include_top=False, weights=None)(x)
x1 = tf.keras.layers.Flatten()(x)
x1 = tf.keras.layers.Dense(2, name='output_1')(x1)
x2 = tf.keras.layers.Conv2D(256, 1, name='output_2')(x)
x3 = tf.keras.layers.Flatten()(x)
x3 = tf.keras.layers.Dense(64, name='output_3')(x3)

# simply pass input and outps to the custom model
custom_model = custom_fit(inputs=[inputs], 
                          outputs=[x1, x2, x3])

Compile and Fit

custom_model.compile(optimizer='adam')
custom_model.fit(dataset, epochs=5, verbose=2)

Epoch 1/5
1/1 - 6s - loss: 73784.0078
Epoch 2/5
1/1 - 1s - loss: 64882.8984
Epoch 3/5
1/1 - 1s - loss: 54760.2500
Epoch 4/5
1/1 - 1s - loss: 47696.7031
Epoch 5/5
1/1 - 1s - loss: 40574.6328
Innat
  • 16,113
  • 6
  • 53
  • 101
  • will this method work if I still have `x1 and x2` as model outputs, yet I only want to compute a single loss for `x3` ? I essentially want to ignore x1 and x2 model outputs – HuckleberryFinn Mar 02 '23 at 14:41