Hyperas LSTM 配置分配错误

数据挖掘 Python 喀拉斯 超参数调整
2022-02-21 02:37:29

我一直在研究我的琐碎 keras lstm 模型,试图使用以下代码实现 Hyperas,这给了我一个我无法解决的错误。我刚刚在 Hyperas 上进行了试验,如果能让它发挥作用会很棒。我在一个文件中的代码如下所示:

from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, Dropout, Activation
from keras.layers import LSTM
from keras.datasets import imdb
from pandas import DataFrame
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot
import h5py
from keras.callbacks import TensorBoard
from numpy import *

from hyperas import optim
from hyperas.distributions import choice, uniform
from hyperopt import Trials, STATUS_OK, tpe
from configuration.data_loader import *

def data():

    # normalise features
    scaler = MinMaxScaler(feature_range=(0, 1))

    X_train_df , y_train_df , X_val_df , y_val_df , X_test_df , y_test_df  = load_saved_datasets()

    X_train_df =  scaler.fit_transform(X_train_df.get_values())
    X_val_df =  scaler.fit_transform(X_val_df.get_values())

    y_train_df = y_train_df.get_values()
    y_val_df = y_val_df.get_values()

    X_train = X_train_df
    y_train = y_train_df
    X_val = X_val_df
    y_val = y_val_df

    return (X_train, y_train, X_val, y_val)


def model(X_train, y_train, X_val, y_val):
    """

    :param X_train: SCALED
    :param y_train:
    :param X_val: SCALED
    :param y_val:
    :return:
    """

    X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
    X_val = X_val.reshape((X_val.shape[0], 1, X_val.shape[1]))


    model = Sequential()

    # Layer 1
    model.add(LSTM({{uniform(4,70)}},
                    input_shape=(X_train.shape[1],X_train.shape[2])))
    model.add(Activation({{choice(['tanh', 'relu'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if {{choice(['two', 'three'])}} == 'two':
        # Layer 2
        model.add(LSTM({{uniform(4,100)}},
                       input_shape=(X_train.shape[1],X_train.shape[2])))
        model.add(Activation({{choice(['tanh', 'relu'])}}))
        model.add(Dropout({{uniform(0, 1)}}))


    model.add(Dense(1))
    model.add(Activation({{choice(['softmax', 'relu', 'tanh'])}}))

    model.compile(loss='rmse', metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    result = model.fit(X_train, y_train,
                       batch_size={{choice([64, 128])}},
                       epochs=2,
                       verbose=2,
                       validation_data=(X_val, y_val))

    #get the highest validation accuracy of the training epochs
    validation_acc = amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}

def hyperas_main():

    trials = Trials()
    best_run, best_model = optim.minimize(data=data,
                                        model=model,
                                          algo=tpe.suggest,
                                          max_evals=20,
                                          trials=trials)

    X_train, Y_train, X_test, Y_test = data()
    print("Evaluation of best performing model:")
    print(best_model.evaluate(X_test, Y_test))
    print("Best performing model chosen hyper-parameters:")
    print(best_run)

    # print("Evalutation of best performing model:")
    # # print(best_model.evaluate(X_val, y_val))
    # print("Best performing model chosen hyper-parameters:")
    # print(best_run)

函数 load_saved_datasets() 只是用 pandas 加载我的集合。

但是,错误输出如下所示:

   >>> Hyperas search space:

def get_space():
    return {
        'LSTM': hp.uniform('LSTM', 4,70),
        'Activation': hp.choice('Activation', ['tanh', 'relu']),
        'Dropout': hp.uniform('Dropout', 0, 1),
        'Dropout_1': hp.choice('Dropout_1', ['two', 'three']),
        'LSTM_1': hp.uniform('LSTM_1', 4,100),
        'Activation_1': hp.choice('Activation_1', ['tanh', 'relu']),
        'Dropout_2': hp.uniform('Dropout_2', 0, 1),
        'Activation_2': hp.choice('Activation_2', ['softmax', 'relu', 'tanh']),
        'optimizer': hp.choice('optimizer', ['rmsprop', 'adam', 'sgd']),
        'batch_size': hp.choice('batch_size', [64, 128]),
    }

>>> Data
  1: 
  2: 
  3: # normalise features
  4: scaler = MinMaxScaler(feature_range=(0, 1))
  5: 
  6: X_train_df , y_train_df , X_val_df , y_val_df , X_test_df , y_test_df  = load_saved_datasets()
  7: 
  8: X_train_df =  scaler.fit_transform(X_train_df.get_values())
  9: X_val_df =  scaler.fit_transform(X_val_df.get_values())
 10: 
 11: y_train_df = y_train_df.get_values()
 12: y_val_df = y_val_df.get_values()
 13: 
 14: X_train = X_train_df
 15: y_train = y_train_df
 16: X_val = X_val_df
 17: y_val = y_val_df
 18: 
 19: 
 20: 
 21: 
>>> Resulting replaced keras model:

   1: def keras_fmin_fnct(space):
   2: 
   3:     """
   4: 
   5:     :param X_train: SCALED
   6:     :param y_train:
   7:     :param X_val: SCALED
   8:     :param y_val:
   9:     :return:
  10:     """
  11: 
  12:     X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
  13:     X_val = X_val.reshape((X_val.shape[0], 1, X_val.shape[1]))
  14: 
  15: 
  16:     model = Sequential()
  17: 
  18:     # Layer 1
  19:     model.add(LSTM(space['LSTM'],
  20:                     input_shape=(X_train.shape[1],X_train.shape[2])))
  21:     model.add(Activation(space['Activation']))
  22:     model.add(Dropout(space['Dropout']))
  23: 
  24:     # If we choose 'four', add an additional fourth layer
  25:     if space['Dropout_1'] == 'two':
  26:         # Layer 2
  27:         model.add(LSTM(space['LSTM_1'],
  28:                        input_shape=(X_train.shape[1],X_train.shape[2])))
  29:         model.add(Activation(space['Activation_1']))
  30:         model.add(Dropout(space['Dropout_2']))
  31: 
  32: 
  33:     model.add(Dense(1))
  34:     model.add(Activation(space['Activation_2']))
  35: 
  36:     model.compile(loss='rmse', metrics=['accuracy'],
  37:                   optimizer=space['optimizer'])
  38: 
  39:     result = model.fit(X_train, y_train,
  40:                        batch_size=space['batch_size'],
  41:                        epochs=2,
  42:                        verbose=2,
  43:                        validation_data=(X_val, y_val))
  44: 
  45:     #get the highest validation accuracy of the training epochs
  46:     validation_acc = amax(result.history['val_acc'])
  47:     print('Best validation acc of epoch:', validation_acc)
  48:     return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
  49: 
Traceback (most recent call last):
  File "C:/Users/user/Desktop/AI/Backend/src/main.py", line 40, in <module>
    lstm_training.hyperas_main()
  File "C:\Users\user\Desktop\AI\Backend\src\training\lstm_training.py", line 94, in hyperas_main
    trials=trials)
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperas\optim.py", line 67, in minimize
    verbose=verbose)
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperas\optim.py", line 133, in base_minimizer
    return_argmin=True),
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperopt\fmin.py", line 307, in fmin
    return_argmin=return_argmin,
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperopt\base.py", line 635, in fmin
    return_argmin=return_argmin)
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperopt\fmin.py", line 320, in fmin
    rval.exhaust()
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperopt\fmin.py", line 199, in exhaust
    self.run(self.max_evals - n_done, block_until_done=self.async)
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperopt\fmin.py", line 173, in run
    self.serial_evaluate()
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperopt\fmin.py", line 92, in serial_evaluate
    result = self.domain.evaluate(spec, ctrl)
  File "C:\Users\user\Anaconda3\envs\AI\lib\site-packages\hyperopt\base.py", line 840, in evaluate
    rval = self.fn(pyll_rval)
  File "C:\Users\user\Desktop\AI\Backend\src\temp_model.py", line 110, in keras_fmin_fnct
UnboundLocalError: local variable 'X_train' referenced before assignment

使用前在哪里参考 X_train?这是由于命名约定吗?问题可能出在计算图上吗?

任何帮助表示赞赏。

1个回答

尝试将两个 reshape() 语句放在 data() 函数的末尾而不是 model() 函数中,并在 hyperas_main() 函数中,在运行 optim.minimize() 之前定义 data()。