我正在尝试运行简单转换器文档中的超参数优化示例中给出的模型,但是在经过一定次数的迭代后搜索超参数时,会发生 CUDA 内存不足错误。此外,在搜索超参数期间,分配的 GPU 内存不断增加。
下面是内存分配图:
这是我的代码。我在 Google Collab 上发布的代码。我该如何解决这个错误?
import logging
import pandas as pd
import sklearn
import wandb
from simpletransformers.classification import (
ClassificationArgs,
ClassificationModel,
)
sweep_config = {
"method": "bayes", # grid, random
"metric": {"name": "train_loss", "goal": "minimize"},
"parameters": {
"num_train_epochs": {"values": [2, 3, 5]},
"learning_rate": {"min": 5e-5, "max": 4e-4},
},
}
sweep_id = wandb.sweep(sweep_config, project="Simple Sweep")
logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.WARNING)
# Preparing train data
train_data = [
["Aragorn was the heir of Isildur", "true"],
["Frodo was the heir of Isildur", "false"],
]
train_df = pd.DataFrame(train_data)
train_df.columns = ["text", "labels"]
# Preparing eval data
eval_data = [
["Theoden was the king of Rohan", "true"],
["Merry was the king of Rohan", "false"],
]
eval_df = pd.DataFrame(eval_data)
eval_df.columns = ["text", "labels"]
model_args = ClassificationArgs()
model_args.reprocess_input_data = True
model_args.overwrite_output_dir = True
model_args.evaluate_during_training = True
model_args.manual_seed = 4
model_args.use_multiprocessing = True
model_args.train_batch_size = 16
model_args.eval_batch_size = 8
model_args.labels_list = ["true", "false"]
model_args.wandb_project = "Simple Sweep"
def train():
# Initialize a new wandb run
wandb.init()
# Create a TransformerModel
model = ClassificationModel(
"roberta",
"roberta-base",
use_cuda=True,
args=model_args,
sweep_config=wandb.config,
)
# Train the model
model.train_model(train_df, eval_df=eval_df)
# Evaluate the model
model.eval_model(eval_df)
# Sync wandb
wandb.join()
wandb.agent(sweep_id, train)