我已经使用 asyncpg 实现了 pgbouncer 以及 asyncpg 本机池实现。虽然我可以通过 pgbouncer 发送许多并发请求,但是对于 1000 个并发请求,我的 p95 接近 8 秒,而与 asyncpg 本机池实现一样,p95 仅高于 1.5 秒。
为什么会有这么大的性能差异?这是从 asyncpg 连接到 pgbouncer 的正确方法吗?
代码示例:
- 使用 asyncpg 和 pgbouncer:
pgbouncer_creds = {
"user": "myuser",
"password": "password",
"host": "localhost",
"port": "16432",
"database": "db1"
}
async def test():
async def db():
start = time.time()
connection = await asyncpg.connect(**pgbouncer_creds)
await connection.fetchrow("SELECT * FROM test WHERE id=$1;", "5db03111822547b59c3edaa324e0f829")
await connection.close()
end = time.time()
times.append((end - start))
tasks = [db() for _ in range(1000)]
await asyncio.gather(*tasks)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
loop.close()
- 本机 asyncp 池实现
creds = {
"user": "myuser",
"password": "password",
"host": "*.*.*.*",
"port": "5432",
"database": "db1"
}
async def test():
pool = await asyncpg.create_pool(**creds, max_size=100, min_size=100)
async def db():
start = time.time()
async with pool.acquire() as connection:
await connection.fetchrow("SELECT * FROM test WHERE id=$1;", "5db03111822547b59c3edaa324e0f829")
end = time.time()
times.append((end - start))
tasks = [db() for _ in range(1000)]
await asyncio.gather(*tasks)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
loop.close()
pgbouncer.ini 文件:
[databases]
db1 = host=*.*.*.* dbname=db1
[pgbouncer]
listen_addr = *
listen_port = 16432
auth_type = plain
auth_file = userlist.txt
default_pool_size = 100
max_db_connections = 200
max_client_conn = 10000
max_user_connections = 200
pool_mode = session