64gb ram CPU
这是我在尝试迷恋时尝试运行的训练代码RTX 2070
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
tf.keras.backend.set_session(tf.Session(config=config))
model = efn.EfficientNetB7()
model.summary()
# create new output layer
output_layer = Dense(5, activation='sigmoid', name="retrain_output")(model.get_layer('top_dropout').output)
new_model = Model(model.input, output=output_layer)
new_model.summary()
# lock previous weights
for i, l in enumerate(new_model.layers):
if i < 228:
l.trainable = False
# lock probs weights
new_model.compile(loss='mean_squared_error', optimizer='adam')
batch_size = 5
samples_per_epoch = 30
epochs = 20
# generate train data
train_datagen = ImageDataGenerator(
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0)
train_generator = train_datagen.flow_from_directory(
train_data_input_folder,
target_size=(input_dim, input_dim),
batch_size=batch_size,
class_mode='categorical',
seed=2019,
subset='training')
validation_generator = train_datagen.flow_from_directory(
validation_data_input_folder,
target_size=(input_dim, input_dim),
batch_size=batch_size,
class_mode='categorical',
seed=2019,
subset='validation')
new_model.fit_generator(
train_generator,
samples_per_epoch=samples_per_epoch,
epochs=epochs,
validation_steps=20,
validation_data=validation_generator,
nb_worker=24)
new_model.save(model_output_path)
exception:
2019-11-17 08:52:52.903583: 我在 tensorflow/stream_executor/dso_loader.cc:152] 本地成功打开了 CUDA 库 libcublas.so.10.0 .... ... 2019-11-17 08:53:24.713020: I tensorflow/core/common_runtime/bfc_allocator.cc:641] 110 个大小为 27724800 的块,总计 2.84GiB 2019-11-17 08:53:24.713024:I tensorflow/core/common_runtime/bfc_allocator.cc:641] 6 个大小为 38814720 的块总计 222.10MiB 2019-11-17 08:53:24.713027: I tensorflow/core/common_runtime/bfc_allocator.cc:641] 23 个大小为 54000128 的块总计 1.16GiB 2019-11-17 08:53:24.713031: I tensorflow/core /common_runtime/bfc_allocator.cc:641] 1 个大小为 73760000 的块,总计 70.34MiB 2019-11-17 08:53:24.713034:I tensorflow/core/common_runtime/bfc_allocator.cc:645] 使用中块的总和:5.45 GiB 2019-11-17 08:53:24.713040:I tensorflow/core/common_runtime/bfc_allocator.cc:647] 统计:限制:5856749158 InUse:5848048896 MaxInUse:5848061440 NumAllocs:6140 MaxAllocSize:3259170816
2019-11-17 08:53:24.713214: W tensorflow/core/common_runtime/bfc_allocator.cc:271] ************************** ****************************************************** ************************ 2019-11-17 08:53:24.713232: W tensorflow/core/framework/op_kernel.cc:1401] OP_REQUIRES 失败在 cwise_ops_common.cc:70 :资源耗尽:OOM 分配具有形状 [5,1344,38,38] 的张量并通过分配器 GPU_0_bfc 在 /job:localhost/replica:0/task:0/device:GPU:0 上键入 float Traceback(最近一次通话最后一次):文件“/home/naort/Desktop/deep-learning-data-preparation-tools/EfficientNet-Transfer-Learning-Boiler-Plate/model_retrain.py”,第 76 行,在 nb_worker=24 中)文件“/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py”,第 91 行,在包装器中返回 func(*args, **kwargs) 文件“/usr/local/lib/蟒蛇3。6/dist-packages/keras/engine/training.py”,第 1732 行,在 fit_generator initial_epoch=initial_epoch)文件“/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py”,第 220 行,在 fit_generator reset_metrics=False) 文件“/usr/local/lib/python3.6/dist-packages/keras/engine/training.py”,第 1514 行,在 train_on_batch 输出 = self.train_function(ins) 文件中“ /home/naort/.local/lib/python3.6/site-packages/tensorflow/python/keras/backend.py”,第 3076 行,在调用 run_metadata=self.run_metadata)文件“/home/naort/.local/ lib/python3.6/site-packages/tensorflow/python/client/session.py”,第 1439 行,在调用 run_metadata_ptr)文件“/home/naort/.local/lib/python3.6/site-packages/tensorflow/ python/framework/errors_impl.py",第 528 行,退出 c_api.TF_GetCode(self.status.状态)) tensorflow.python.framework.errors_impl.ResourceExhaustedError:OOM 分配具有形状[5,1344,38,38] 的张量并在 /job:localhost/replica:0/task:0/device:GPU:0 上键入 float通过分配器 GPU_0_bfc [[{{node training/Adam/gradients/AddN_387-0-TransposeNHWCToNCHW-LayoutOptimizer}}]] 提示:如果您想在 OOM 发生时查看已分配张量的列表,请将 report_tensor_allocations_upon_oom 添加到 RunOptions 以获取当前分配信息。将 report_tensor_allocations_upon_oom 添加到 RunOptions 以获取当前分配信息。将 report_tensor_allocations_upon_oom 添加到 RunOptions 以获取当前分配信息。
[[{{node Mean}}]] 提示:如果您想在 OOM 发生时查看已分配张量的列表,请将 report_tensor_allocations_upon_oom 添加到 RunOptions 以获取当前分配信息。