keras
如果您已安装并安装以下代码段,则应复制该错误tensorflow
:
import tensorflow as tf
import keras
from keras.layers import Input, Conv2D, MaxPooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.layers import Concatenate, Dense, LSTM, Flatten, RepeatVector, TimeDistributed, Dropout
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import Bidirectional, GaussianNoise, BatchNormalization
from keras.layers import CuDNNLSTM as LSTM
# Dimensions
input_shape = (105, 29)
dec_input_shape = (104, 29)
output_len = 104
output_dims = 29
lstm_dim = 256
bottleneck_dim = 128
h_activation = "relu"
bn_momentum=0.9
# Architecture
encoder_inputs = Input(shape=input_shape)
x = encoder_inputs
encoder = Bidirectional(LSTM(lstm_dim//2,
return_sequences=True,
return_state=True,
name="Encoder_LSTM"))
encoder2 = Bidirectional(LSTM(lstm_dim//2,
return_state=True,
name="Encoder_LSTM2"))
x ,state_h, state_c, state_h_reverse, state_c_reverse = encoder(x)
x = BatchNormalization(momentum=bn_momentum)(x)
encoder_outputs, state_h2, state_c2 , state_h2_reverse, state_c2_reverse = encoder2(x)
states = Concatenate(axis=-1)([state_h, state_c, state_h2, state_c2,
state_h_reverse, state_c_reverse, state_h2_reverse, state_c2_reverse])
states = BatchNormalization(momentum=bn_momentum)(states)
neck_relu = Dense(bottleneck_dim, activation=h_activation, name='bottleneck_relu')
neck_outputs = neck_relu(states)
neck_outputs = BatchNormalization(momentum=bn_momentum, name="BN_bottleneck")(neck_outputs)
decode_h = Dense(lstm_dim, activation="relu")
decode_c = Dense(lstm_dim, activation="relu")
decode_h2 = Dense(lstm_dim, activation="relu")
decode_c2 = Dense(lstm_dim, activation="relu")
state_h_decoded = decode_h(neck_outputs)
state_c_decoded = decode_c(neck_outputs)
state_h_decoded2 = decode_h2(neck_outputs)
state_c_decoded2 = decode_c2(neck_outputs)
state_h_decoded_BN = BatchNormalization(momentum=bn_momentum)
state_c_decoded_BN = BatchNormalization(momentum=bn_momentum)
state_h_decoded2_BN = BatchNormalization(momentum=bn_momentum)
state_c_decoded2_BN = BatchNormalization(momentum=bn_momentum)
state_h_decoded = state_h_decoded_BN(state_h_decoded)
state_c_decoded = state_c_decoded_BN(state_c_decoded)
state_h_decoded2 = state_h_decoded2_BN(state_h_decoded2)
state_c_decoded2 = state_c_decoded2_BN(state_c_decoded2)
encoder_states = [state_h_decoded, state_c_decoded]
encoder_states2 = [state_h_decoded2, state_c_decoded2]
decoder_inputs = Input(shape=dec_input_shape)
decoder_lstm = LSTM(lstm_dim,
return_sequences=True,
name='LSTM1_decoder'
)
decoder_lstm2 = LSTM(lstm_dim,
return_sequences=True,
name='LSTM2_decoder'
)
xo = decoder_lstm(decoder_inputs, initial_state=encoder_states)
xo = BatchNormalization(momentum=bn_momentum, name="BN_decoder")(xo)
decoder_outputs = decoder_lstm2(xo, initial_state=encoder_states2)
outputs = Dense(output_dims, activation='softmax', name="Dense_decoder")(decoder_outputs)
# Define model
model = Model([encoder_inputs, decoder_inputs], outputs)
我想从完整模型中创建一个子模型,它将层的输出作为输入input_11
,将最后一层的输出作为输出,即Dense_decoder
. 因此,我将新模型定义为:
model_new = Model(model.get_layer("input_11").output, model.get_layer("Dense_decoder").output)
这给了我以下错误,即图表已断开连接:
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_10:0", shape=(?, 105, 29), dtype=float32) at layer "input_10". The following previous layers were accessed without issue: []
知道为什么会这样吗?或者一般如何规避问题并将其定义model_new
为现有完整模型的子模型?
谢谢!