公司新闻

Tensorflow 2.* 网络训练(一) compile(optimizer, loss, metrics, loss_weights)

以下是使用ConvRNN2D层替换ConvLSTM2D层的修改后的代码: ```python from tensorflow.keras.layers import Conv2D, Conv2DTranspose, ConvRNN2D, Input, BatchNormalization, LeakyReLU, Flatten, Dense, Reshape from tensorflow.keras.models import Model class Model(): def __init__(self): self.img_seq_shape=(10,128,128,3) self.img_shape=(128,128,3) self.train_img=dataset patch=int(128 / 2 ** 4) self.disc_patch=(patch, patch, 1) self.optimizer=tf.keras.optimizers.Adam(learning_rate=0.001) self.build_generator=self.build_generator() self.build_discriminator=self.build_discriminator() self.build_discriminator.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=['accuracy']) self.build_generator.compile(loss='binary_crossentropy', optimizer=self.optimizer) img_seq_A=Input(shape=(10,128,128,3)) #输入图片 img_B=Input(shape=self.img_shape) #目标图片 fake_B=self.build_generator(img_seq_A) #生成的伪目标图片 self.build_discriminator.trainable=False valid=self.build_discriminator([img_seq_A, fake_B]) self.combined=Model([img_seq_A, img_B], [valid, fake_B]) self.combined.compile(loss=['binary_crossentropy', 'mse'], loss_weights=[1, 100], optimizer=self.optimizer, metrics=['accuracy']) def build_generator(self): def res_net(inputs, filters): x=inputs net=conv2d(x, filters // 2, (1, 1), 1) net=conv2d(net, filters, (3, 3), 1) net=net + x # net=tf.keras.layers.LeakyReLU(0.2)(net) return net def conv2d(inputs, filters, kernel_size, strides): x=Conv2D(filters, kernel_size, strides, 'same')(inputs) x=BatchNormalization()(x) x=LeakyReLU(alpha=0.2)(x) return x d0=Input(shape=(10, 128, 128, 3)) out=ConvRNN2D(filters=32, kernel_size=3, padding='same')(d0) out=conv2d(out, 3, 1, 1) return Model(inputs=d0, outputs=out) def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, normalization=True): """Discriminator layer""" d=Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) if normalization: d=BatchNormalization()(d) d=LeakyReLU(alpha=0.2)(d) return d img_A=Input(shape=self.img_seq_shape) img_B=Input(shape=self.img_shape) combined_imgs=tf.keras.layers.concatenate([img_A, img_B]) d1=d_layer(combined_imgs, 64, normalization=False) d2=d_layer(d1, 128) d3=d_layer(d2, 256) d4=d_layer(d3, 512) validity=Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model([img_A, img_B], validity) ``` 在代码中,我们将ConvLSTM2D层替换为了ConvRNN2D层,并添加了新的模块`convrnn`。

平台注册入口