Gradient tape error

I am running to this error while executing gradient penalty for WGAN model

File c:\Users\Mega PC\Desktop\Internship\GANs\GANs_for_Credit_Card_Data-master\GAN_171103.py:405, in adversarial_training_WGAN(arguments, train, data_cols, label_cols, seed, starting_step)
403 # update f by taking an SGD step on mini-batch loss LD(f)
404 disc_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5, beta_2=0.9)
→ 405 disc_optimizer.minimize(_disc_loss, var_list=discriminator_model.trainable_weights)
407 sess = K.get_session()
409 # Compile models

File c:\Users\Mega PC\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\optimizers\optimizer.py:542, in _BaseOptimizer.minimize(self, loss, var_list, tape)
521 def minimize(self, loss, var_list, tape=None):
522 “”“Minimize loss by updating var_list.
523
524 This method simply computes gradient using tf.GradientTape and calls
(…)
540 None
541 “””
→ 542 grads_and_vars = self.compute_gradients(loss, var_list, tape)
543 self.apply_gradients(grads_and_vars)

File c:\Users\Mega PC\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\optimizers\optimizer.py:261, in _BaseOptimizer.compute_gradients(self, loss, var_list, tape)
243 “”“Compute gradients of loss on trainable variables.
244
245 Args:
(…)
258 gradient can be None.
259 “””
260 if not callable(loss) and tape is None:
→ 261 raise ValueError(
262 “tape is required when a Tensor loss is passed. "
263 f"Received: loss={loss}, tape={tape}.”
264 )
265 if tape is None:
266 tape = tf.GradientTape()

ValueError: tape is required when a Tensor loss is passed. Received: loss=Tensor(“add_31:0”, shape=(), dtype=float32), tape=None.

def adversarial_training_WGAN(arguments, train, data_cols, label_cols=[], seed=0, starting_step=0):
    [rand_dim, nb_steps, batch_size, k_d, k_g, critic_pre_train_steps, log_interval, learning_rate, base_n_count,
     data_dir, generator_model_path, discriminator_model_path, loss_pickle_path, show] = arguments

    np.random.seed(seed)  # set random seed

    data_dim = len(data_cols)
    print('data_dim: ', data_dim)
    print('data_cols: ', data_cols)

    label_dim = 0
    with_class = False
    if len(label_cols) > 0:
        with_class = True
        label_dim = len(label_cols)
        print('label_dim: ', label_dim)
        print('label_cols: ', label_cols)

    # Define network models
    tf.random.set_seed(seed)  # set random seed for TensorFlow 2.x
    K.set_learning_phase(1)  # 1 = train

    if with_class:
        cache_prefix = 'WCGAN'
        generator_model, discriminator_model, combined_model = define_models_CGAN(rand_dim, data_dim, label_dim,
                                                                                 base_n_count, type='Wasserstein')
    else:
        cache_prefix = 'WGAN'
        generator_model, discriminator_model, combined_model = define_models_GAN(rand_dim, data_dim, base_n_count,
                                                                                type='Wasserstein')

    # Construct computation graph for calculating the gradient penalty (improved WGAN) and training the discriminator
    _z = tf.keras.Input(shape=(rand_dim,))

    _labels = None
    if with_class:
        _x = tf.keras.Input(shape=(data_dim + label_dim))
        _labels = tf.keras.Input(shape=(label_dim,))
        _g_z = generator_model(inputs=[_z, _labels])
    else:
        _x = tf.keras.Input(shape=(data_dim,))
        _g_z = generator_model(_z)

    epsilon = tf.keras.Input(shape=(batch_size, 1))

    x_hat = epsilon * _x + (1.0 - epsilon) * _g_z
    x_hat_reshaped = tf.reshape(x_hat, shape=(-1, 4))
    gradients = tf.gradients(discriminator_model(x_hat_reshaped), [x_hat_reshaped])

    # gradients = tf.gradients(discriminator_model(x_hat), [x_hat])
    _gradient_penalty = 10.0 * tf.square(tf.norm(gradients[0], ord=2) - 1.0)

    # Calculate discriminator's loss
    _disc_loss_generated = em_loss(tf.ones(batch_size), discriminator_model(_g_z))
    _disc_loss_real = em_loss(tf.ones(batch_size), discriminator_model(_x))
    _disc_loss = _disc_loss_generated - _disc_loss_real + _gradient_penalty

   # update f by taking an SGD step on mini-batch loss LD(f)
    disc_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5, beta_2=0.9)
    disc_optimizer.minimize(_disc_loss, var_list=discriminator_model.trainable_weights)

    sess = K.get_session()

    # Compile models
    adam = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.5, beta_2=0.9)
    discriminator_model.trainable = False
    combined_model.compile(optimizer=adam, loss=[em_loss])

    combined_loss, disc_loss_generated, disc_loss_real, xgb_losses = [], [], [], []

    model_components = [cache_prefix, with_class, starting_step,
                        train, data_cols, data_dim,
                        label_cols, label_dim,
                        generator_model, discriminator_model, combined_model,
                        rand_dim, nb_steps, batch_size,
                        k_d, k_g, critic_pre_train_steps, log_interval, learning_rate, base_n_count,
                        data_dir, generator_model_path, discriminator_model_path,
                        _z, _x, _labels, _g_z, epsilon, x_hat_reshaped, gradients, _gradient_penalty,
                        _disc_loss_generated, _disc_loss_real, _disc_loss, disc_optimizer,
                        show,
                        combined_loss, disc_loss_generated, disc_loss_real, xgb_losses]

    if show:
        print(generator_model.summary())
        print(discriminator_model.summary())
        print(combined_model.summary())

    if loss_pickle_path:
        print('Loading loss pickles')
        [combined_loss, disc_loss_generated, disc_loss_real, xgb_losses] = pickle.load(open(loss_pickle_path, 'rb'))
    if generator_model_path:
        print('Loading generator model')
        generator_model.load_weights(generator_model_path)
    if discriminator_model_path:
        print('Loading discriminator model')
        discriminator_model.load_weights(discriminator_model_path)
    else:
        print('Pre-training the critic...')
        K.set_learning_phase(1)  # 1 = train
        for i in range(critic_pre_train_steps):
            if i % 20 == 0:
                print('Step: {} of {} critic pre-training.'.format(i, critic_pre_train_steps))
            loss = train_discriminator_step(model_components, seed=i)
        print('Last batch of critic pre-training disc_loss: {}.'.format(loss))

    model_components = [cache_prefix, with_class, starting_step,
                        train, data_cols, data_dim,
                        label_cols, label_dim,
                        generator_model, discriminator_model, combined_model,
                        rand_dim, nb_steps, batch_size,
                        k_d, k_g, critic_pre_train_steps, log_interval, learning_rate, base_n_count,
                        data_dir, generator_model_path, discriminator_model_path,
                        _z, _x, _labels, _g_z, epsilon, x_hat_reshaped, gradients, _gradient_penalty,
                        _disc_loss_generated, _disc_loss_real, _disc_loss, disc_optimizer,
                        show,
                        combined_loss, disc_loss_generated, disc_loss_real, xgb_losses]

    [combined_loss, disc_loss_generated, disc_loss_real, xgb_losses] = training_steps_WGAN(model_components)

   

Hello! I noticed your functions are making calls in TensorFlow and Keras. I would suggest copying your question to the forums for TensorFlow to find help from more developers who are familiar with those libraries: https://discuss.tensorflow.org/