我正在训练非常简单的初始块,然后在 NVIDIA GeForce RTX 2070 GPU 上训练一个 maxpool 和全连接层,并且迭代需要很长时间。刚刚在超过 24 小时内完成了 10 次迭代。
这是初始模型定义的代码
class Net(nn.Module):
def __init__(self):
super().__init__()
input_channels = 3
conv_block = BasicConv2d(input_channels, 64, kernel_size=1)
self.branch3x3stack = nn.Sequential(
BasicConv2d(input_channels, 64, kernel_size=1),
BasicConv2d(64, 96, kernel_size=3, padding=1),
BasicConv2d(96, 96, kernel_size=3, padding=1),
)
self.branch3x3 = nn.Sequential(
BasicConv2d(input_channels, 64, kernel_size=1),
BasicConv2d(64, 96, kernel_size=3, padding=1),
)
self.branch1x1 = BasicConv2d(input_channels, 96, kernel_size=1)
self.branchpool = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
BasicConv2d(input_channels, 96, kernel_size=1),
)
self.maxpool = nn.MaxPool2d(kernel_size=8, stride=8)
self.fc_seqn = nn.Sequential(nn.Linear(301056, 1))
def forward(self, x):
x = [
self.branch3x3stack(x),
self.branch3x3(x),
self.branch1x1(x),
self.branchpool(x),
]
x = torch.cat(x, 1)
x = self.maxpool(x)
x = x.view(x.size(0), -1)
x = self.fc_seqn(x)
return x # torch.cat(x, 1)
和我使用的培训代码
def training_code(self, model):
model = copy.deepcopy(model)
model = model.to(self.device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=self.learning_rate)
for epoch in range(self.epochs):
print("\n epoch :", epoch)
running_loss = 0.0
start_epoch = time.time()
for i, (inputs, labels) in enumerate(self.train_data_loader):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
optimizer.zero_grad()
outputs = (model(inputs)).squeeze()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % log_step == log_step - 1:
avg_loss = running_loss / float(log_step)
print("[epoch:%d, batch:%5d] loss: %.3f" % (epoch + 1, i + 1, avg_loss))
running_loss = 0.0
if epoch % save_model_step == 0:
self.path_saved_model = path_saved_model + "_" + str(epoch)
self.save_model(model)
print("\nFinished Training\n")
self.save_model(model)
这里是卷积神经网络的总结。谁能帮我加快训练速度?
这是减少全连接(FC)层的输入数量后卷积神经网络的总结。但是训练时间仍然非常高,与以前相似。可以就如何加快培训提出建议将有所帮助。