0

我正在使用 fastapi 对 ml 模型进行预测。因为单个预测需要超过 2 分钟,所以我使用 fastapi 后台任务将预测作为后台任务运行。现在,当我部署到数字海洋时,我无法运行后台工作人员。我越来越504: Gateway Timeout Error

启动命令

gunicorn --worker-tmp-dir /dev/shm --config gunicorn.config.py main:app

gunicorn.config.py

bind = "0.0.0.0:8080"
workers = 4
worker_class = "uvicorn.workers.UvicornWorker"

main.py

response = {}
async def predictions(solute, solvent):
    mol = Chem.MolFromSmiles(solute)
    solute_graph = get_graph_from_smile(solute)
    mol = Chem.MolFromSmiles(solvent)
    solvent_graph = get_graph_from_smile(solvent)
    delta_g, interaction_map =  model([solute_graph.to(device), solvent_graph.to(device)])
    interaction_map_one = torch.trunc(interaction_map)
    response["interaction_map"] = (interaction_map_one.detach().numpy()).tolist()
    response["predictions"] = delta_g.item()

@app.post('/predict_solubility')
async def post():
    return {'result': response}

@app.post('/predict')
async def predict(background_tasks: BackgroundTasks,solute,solvent):
    background_tasks.add_task(predictions,solute,solvent)
    return {'success'}

response_two = {}
async def predictions_two(solute):
    for i in data:
        delta_g, interaction_map =  model([get_graph_from_smile(Chem.MolToSmiles(Chem.AddHs(Chem.MolFromSmiles(solute)))).to(device), get_graph_from_smile(Chem.MolToSmiles(Chem.AddHs(Chem.MolFromSmiles(i)))).to(device)])
        response_two[i] = delta_g.item()
    
@app.post('/predict_solubility_json')
async def post():
    return {'result': response_two}

@app.get('/predict_two')
async def predict_two(background_tasks: BackgroundTasks,solute):
    background_tasks.add_task(predictions_two,solute)
    return {'success'}

if __name__ == "__main__":
    uvicorn.run(app,host="0.0.0.0", port=8000, reload=True)

我已经worker从组件中添加了数字海洋,即使我仍然处于服务器超时状态。我不确定我是否没有使用 fastapi 实现后台任务,因为我的启动命令是否有问题

4

0 回答 0