具有周期性噪声的 GAN 发生器输出

人工智能 神经网络 卷积神经网络 生成对抗网络 半监督学习
2021-11-01 10:02:03

我正在训练一个半监督 GAN,使用具有形状窗口 (180*80) 的多元时间序列以及下面的生成器和鉴别器架构。我的数据是使用 Robust Scaler 缩放的,所以我为生成器输出保持线性激活。

在训练期间,我在生成的信号中得到了噪音,我无法理解为什么原始数据是平滑的。产生这种噪音的原因是什么?

生成的信号。另一个生成的信号。

def make_generator_model(noise):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    
    def residual_layer(layer_input):
        
        res_block = Conv1D(128, 3, strides=1, padding='same')(layer_input)
        res_block = BatchNormalization(gamma_initializer=gamma_init)(res_block)
        res_block = LeakyReLU()(res_block)
        res_block = Conv1D(128, 3, strides=1, padding='same')(res_block)
        res_block = BatchNormalization(gamma_initializer=gamma_init)(res_block)
        res_block = LeakyReLU()(res_block)
        res_add = Add()([res_block, layer_input])
        
        return res_add
    
    in_noise = Input(shape=(100,))
    

    gen = Dense(180*65, kernel_initializer=w_init, use_bias=None)(in_noise)
    gen = BatchNormalization(gamma_initializer=gamma_init)(gen)
    gen = LeakyReLU()(gen)

    gen = Reshape((180, 65))(gen)
    #assert model.output_shape == (None, 45, 256) # Note: None is the batch size

    gen = Conv1D(64, 7, strides=1, padding='same', kernel_initializer=w_init, use_bias=None)(gen)
    #assert model.output_shape == (None, 45, 128)
    gen = BatchNormalization(gamma_initializer=gamma_init)(gen)
    gen = LeakyReLU()(gen)
        
    gen = Conv1D(64, 4, strides=2, padding='same', kernel_initializer=w_init, use_bias=None)(gen)
    #assert model.output_shape == (None, 45, 128)
    gen = BatchNormalization(gamma_initializer=gamma_init)(gen)
    gen = LeakyReLU()(gen)
    
    gen = Conv1D(128, 4, strides=2, padding='same', kernel_initializer=w_init, use_bias=None)(gen)
    #assert model.output_shape == (None, 45, 128)
    gen = BatchNormalization(gamma_initializer=gamma_init)(gen)
    gen = LeakyReLU()(gen)
    
    for i in range(6):
        gen = residual_layer(gen)

    gen = Conv1DTranspose(128, 4, strides=2, padding='same', kernel_initializer=w_init, use_bias=None)(gen)
    #assert model.output_shape == (None, 90, 64)
    gen = BatchNormalization(gamma_initializer=gamma_init)(gen)
    gen = LeakyReLU()(gen)
    
    gen = Conv1DTranspose(128, 4, strides=2, padding='same', kernel_initializer=w_init, use_bias=None)(gen)
    #assert model.output_shape == (None, 90, 64)
    gen = BatchNormalization(gamma_initializer=gamma_init)(gen)
    gen = LeakyReLU()(gen)
    

    out_layer = Conv1D(65, 7, strides=1, padding='same', kernel_initializer=w_init, use_bias=None)(gen)
    #assert model.output_shape == (None, 180, 65)
    
    model = Model(in_noise, out_layer)

    return model

def make_discriminator_model(n_classes=8):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)   
    
    in_window = Input(shape=(180, 65))

    disc = Conv1D(64, 4, strides=1, padding='same', kernel_initializer=w_init)(in_window)
    disc = LeakyReLU()(disc)
    disc = Dropout(0.3)(disc)

    disc = Conv1D(64*2, 4, strides=1, padding='same', kernel_initializer=w_init)(disc)
    disc = LeakyReLU()(disc)
    disc = Dropout(0.3)(disc)
    
    disc = Conv1D(64*4, 4, strides=1, padding='same', kernel_initializer=w_init)(disc)
    disc = LeakyReLU()(disc)
    disc = Dropout(0.3)(disc)
    
    disc = Conv1D(64*8, 4, strides=1, padding='same', kernel_initializer=w_init)(disc)
    disc = LeakyReLU()(disc)
    disc = Dropout(0.3)(disc)
    
    disc = Conv1D(64*16, 4, strides=1, padding='same', kernel_initializer=w_init)(disc)
    disc = LeakyReLU()(disc)
    disc = Dropout(0.3)(disc)
    
    disc = Flatten()(disc)
    
    disc = Dense(128)(disc)
    disc = Dense(128)(disc)
    
    out_layer = Dense(1)(disc)
    
    c_out_layer = Dense(8, activation='softmax')(disc)
    
    model = Model(in_window, out_layer)
    c_model = Model(in_window, c_out_layer)

    return model, c_model
1个回答

抱歉无法直接回复您的评论,因为我没有帐户,您是对的!我用 Upscale1D+Conv1D 替换了转置层,这解决了这个问题。

gen = Conv1DTranspose(128, 4, strides=2, padding='same', kernel_initializer=w_init, use_bias=None)(gen)

应该变成(注意 strides=2 变成 strides=1):

gen = Upscale1D()(gen)
gen = Conv1D(128, 4, strides=1, padding='same', kernel_initializer=w_init, use_bias=None)(gen)