我想弄清楚是否有办法加快 Detectron2 的推理。我目前在 Databricks 中使用数据科学 GPU 集群。我将设备设置为 CUDA。网络的架构是GeneralizedRCNN。使用 Detectron2 的 DefaultPredictor,每张图像大约需要 0.37 秒。当我扩大规模时,我需要它更快。我确实读过它可以通过降低图像分辨率来加速。我考虑这样做,但我发现我必须保存图像才能改变质量。有没有办法通过从 URL 打开图像而不保存它(例如with Image.open(requests.get(url, stream=True).raw) as image:
)来做到这一点?
predictor = DefaultPredictor(cfg)
from detectron2.engine import DefaultPredictor
outputs = predictor(np_image)
np_image
上面只是一个表示为 Numpy 数组的图像。并且cfg
在下面。
CfgNode({'VERSION': 2, 'MODEL': CfgNode({'LOAD_PROPOSALS': False, 'MASK_ON': False, 'KEYPOINT_ON': False, 'DEVICE': 'cuda', 'META_ARCHITECTURE': 'GeneralizedRCNN', 'WEIGHTS': '/dbfs/mnt/datalake/rs/model_output/2021-08-24/model_final.pth', 'PIXEL_MEAN': [103.53, 116.28, 123.675], 'PIXEL_STD': [57.375, 57.12, 58.395], 'BACKBONE': CfgNode({'NAME': 'build_resnet_fpn_backbone', 'FREEZE_AT': 2}), 'FPN': CfgNode({'IN_FEATURES': ['res2', 'res3', 'res4', 'res5'], 'OUT_CHANNELS': 256, 'NORM': '', 'FUSE_TYPE': 'sum'}), 'PROPOSAL_GENERATOR': CfgNode({'NAME': 'RPN', 'MIN_SIZE': 0}), 'ANCHOR_GENERATOR': CfgNode({'NAME': 'DefaultAnchorGenerator', 'SIZES': [[32], [64], [128], [256], [512]], 'ASPECT_RATIOS': [[0.5, 1.0, 2.0]], 'ANGLES': [[-90, 0, 90]], 'OFFSET': 0.0}), 'RPN': CfgNode({'HEAD_NAME': 'StandardRPNHead', 'IN_FEATURES': ['p2', 'p3', 'p4', 'p5', 'p6'], 'BOUNDARY_THRESH': -1, 'IOU_THRESHOLDS': [0.3, 0.7], 'IOU_LABELS': [0, -1, 1], 'BATCH_SIZE_PER_IMAGE': 256, 'POSITIVE_FRACTION': 0.5, 'BBOX_REG_LOSS_TYPE': 'smooth_l1', 'BBOX_REG_LOSS_WEIGHT': 1.0, 'BBOX_REG_WEIGHTS': (1.0, 1.0, 1.0, 1.0), 'SMOOTH_L1_BETA': 0.0, 'LOSS_WEIGHT': 1.0, 'PRE_NMS_TOPK_TRAIN': 2000, 'PRE_NMS_TOPK_TEST': 1000, 'POST_NMS_TOPK_TRAIN': 1000, 'POST_NMS_TOPK_TEST': 1000, 'NMS_THRESH': 0.7, 'CONV_DIMS': [-1]}), 'ROI_HEADS': CfgNode({'NAME': 'StandardROIHeads', 'NUM_CLASSES': 1, 'IN_FEATURES': ['p2', 'p3', 'p4', 'p5'], 'IOU_THRESHOLDS': [0.5], 'IOU_LABELS': [0, 1], 'BATCH_SIZE_PER_IMAGE': 64, 'POSITIVE_FRACTION': 0.25, 'SCORE_THRESH_TEST': 0.7, 'NMS_THRESH_TEST': 0.5, 'PROPOSAL_APPEND_GT': True}), 'ROI_BOX_HEAD': CfgNode({'NAME': 'FastRCNNConvFCHead', 'BBOX_REG_LOSS_TYPE': 'smooth_l1', 'BBOX_REG_LOSS_WEIGHT': 1.0, 'BBOX_REG_WEIGHTS': (10.0, 10.0, 5.0, 5.0), 'SMOOTH_L1_BETA': 0.0, 'POOLER_RESOLUTION': 7, 'POOLER_SAMPLING_RATIO': 0, 'POOLER_TYPE': 'ROIAlignV2', 'NUM_FC': 2, 'FC_DIM': 1024, 'NUM_CONV': 0, 'CONV_DIM': 256, 'NORM': '', 'CLS_AGNOSTIC_BBOX_REG': False, 'TRAIN_ON_PRED_BOXES': False}), 'ROI_BOX_CASCADE_HEAD': CfgNode({'BBOX_REG_WEIGHTS': ([10.0, 10.0, 5.0, 5.0], [20.0, 20.0, 10.0, 10.0], [30.0, 30.0, 15.0, 15.0]), 'IOUS': (0.5, 0.6, 0.7)}), 'ROI_MASK_HEAD': CfgNode({'NAME': 'MaskRCNNConvUpsampleHead', 'POOLER_RESOLUTION': 14, 'POOLER_SAMPLING_RATIO': 0, 'NUM_CONV': 4, 'CONV_DIM': 256, 'NORM': '', 'CLS_AGNOSTIC_MASK': False, 'POOLER_TYPE': 'ROIAlignV2'}), 'ROI_KEYPOINT_HEAD': CfgNode({'NAME': 'KRCNNConvDeconvUpsampleHead', 'POOLER_RESOLUTION': 14, 'POOLER_SAMPLING_RATIO': 0, 'CONV_DIMS': (512, 512, 512, 512, 512, 512, 512, 512), 'NUM_KEYPOINTS': 17, 'MIN_KEYPOINTS_PER_IMAGE': 1, 'NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS': True, 'LOSS_WEIGHT': 1.0, 'POOLER_TYPE': 'ROIAlignV2'}), 'SEM_SEG_HEAD': CfgNode({'NAME': 'SemSegFPNHead', 'IN_FEATURES': ['p2', 'p3', 'p4', 'p5'], 'IGNORE_VALUE': 255, 'NUM_CLASSES': 54, 'CONVS_DIM': 128, 'COMMON_STRIDE': 4, 'NORM': 'GN', 'LOSS_WEIGHT': 1.0}), 'PANOPTIC_FPN': CfgNode({'INSTANCE_LOSS_WEIGHT': 1.0, 'COMBINE': CfgNode({'ENABLED': True, 'OVERLAP_THRESH': 0.5, 'STUFF_AREA_LIMIT': 4096, 'INSTANCES_CONFIDENCE_THRESH': 0.5})}), 'RETINANET': CfgNode({'NUM_CLASSES': 80, 'IN_FEATURES': ['p3', 'p4', 'p5', 'p6', 'p7'], 'NUM_CONVS': 4, 'IOU_THRESHOLDS': [0.4, 0.5], 'IOU_LABELS': [0, -1, 1], 'PRIOR_PROB': 0.01, 'SCORE_THRESH_TEST': 0.05, 'TOPK_CANDIDATES_TEST': 1000, 'NMS_THRESH_TEST': 0.5, 'BBOX_REG_WEIGHTS': (1.0, 1.0, 1.0, 1.0), 'FOCAL_LOSS_GAMMA': 2.0, 'FOCAL_LOSS_ALPHA': 0.25, 'SMOOTH_L1_LOSS_BETA': 0.1, 'BBOX_REG_LOSS_TYPE': 'smooth_l1', 'NORM': ''}), 'RESNETS': CfgNode({'DEPTH': 101, 'OUT_FEATURES': ['res2', 'res3', 'res4', 'res5'], 'NUM_GROUPS': 32, 'NORM': 'FrozenBN', 'WIDTH_PER_GROUP': 8, 'STRIDE_IN_1X1': False, 'RES5_DILATION': 1, 'RES2_OUT_CHANNELS': 256, 'STEM_OUT_CHANNELS': 64, 'DEFORM_ON_PER_STAGE': [False, False, False, False], 'DEFORM_MODULATED': False, 'DEFORM_NUM_GROUPS': 1})}), 'INPUT': CfgNode({'MIN_SIZE_TRAIN': (640, 672, 704, 736, 768, 800), 'MIN_SIZE_TRAIN_SAMPLING': 'choice', 'MAX_SIZE_TRAIN': 1333, 'MIN_SIZE_TEST': 800, 'MAX_SIZE_TEST': 1333, 'RANDOM_FLIP': 'horizontal', 'CROP': CfgNode({'ENABLED': False, 'TYPE': 'relative_range', 'SIZE': [0.9, 0.9]}), 'FORMAT': 'BGR', 'MASK_FORMAT': 'polygon'}), 'DATASETS': CfgNode({'TRAIN': ('house_train',), 'PROPOSAL_FILES_TRAIN': (), 'PRECOMPUTED_PROPOSAL_TOPK_TRAIN': 2000, 'TEST': ('house_val',), 'PROPOSAL_FILES_TEST': (), 'PRECOMPUTED_PROPOSAL_TOPK_TEST': 1000}), 'DATALOADER': CfgNode({'NUM_WORKERS': 4, 'ASPECT_RATIO_GROUPING': True, 'SAMPLER_TRAIN': 'TrainingSampler', 'REPEAT_THRESHOLD': 0.0, 'FILTER_EMPTY_ANNOTATIONS': True}), 'SOLVER': CfgNode({'LR_SCHEDULER_NAME': 'WarmupMultiStepLR', 'MAX_ITER': 1500, 'BASE_LR': 0.001, 'MOMENTUM': 0.9, 'NESTEROV': False, 'WEIGHT_DECAY': 0.0001, 'WEIGHT_DECAY_NORM': 0.0, 'GAMMA': 0.05, 'STEPS': (1000, 1500), 'WARMUP_FACTOR': 0.001, 'WARMUP_ITERS': 1000, 'WARMUP_METHOD': 'linear', 'CHECKPOINT_PERIOD': 5000, 'IMS_PER_BATCH': 4, 'REFERENCE_WORLD_SIZE': 0, 'BIAS_LR_FACTOR': 1.0, 'WEIGHT_DECAY_BIAS': 0.0001, 'CLIP_GRADIENTS': CfgNode({'ENABLED': False, 'CLIP_TYPE': 'value', 'CLIP_VALUE': 1.0, 'NORM_TYPE': 2.0}), 'AMP': CfgNode({'ENABLED': False})}), 'TEST': CfgNode({'EXPECTED_RESULTS': [], 'EVAL_PERIOD': 500, 'KEYPOINT_OKS_SIGMAS': [], 'DETECTIONS_PER_IMAGE': 100, 'AUG': CfgNode({'ENABLED': False, 'MIN_SIZES': (400, 500, 600, 700, 800, 900, 1000, 1100, 1200), 'MAX_SIZE': 4000, 'FLIP': True}), 'PRECISE_BN': CfgNode({'ENABLED': False, 'NUM_ITER': 200})}), 'OUTPUT_DIR': '/dbfs/mnt/datalake/rs/model_output/2021-08-24', 'SEED': -1, 'CUDNN_BENCHMARK': False, 'VIS_PERIOD': 0, 'GLOBAL': CfgNode({'HACK': 1.0})})