我尝试在 Python 驱动程序中使用 BATCH 将 150.000 个生成的数据插入 Cassandra。大约需要30 秒。我应该怎么做才能优化它并更快地插入数据?这是我的代码:
from cassandra.cluster import Cluster
from faker import Faker
import time
fake = Faker()
cluster = Cluster(['127.0.0.1'], port=9042)
session = cluster.connect()
session.default_timeout = 150
num = 0
def create_data():
global num
BATCH_SIZE = 1500
BATCH_STMT = 'BEGIN BATCH'
for i in range(BATCH_SIZE):
BATCH_STMT += f" INSERT INTO tt(id, title) VALUES ('{num}', '{fake.name()}')";
num += 1
BATCH_STMT += ' APPLY BATCH;'
prep_batch = session.prepare(BATCH_STMT)
return prep_batch
tt = []
session.execute('USE ttest_2')
prep_batch = []
print("Start create data function!")
start = time.time()
for i in range(100):
prep_batch.append(create_data())
end = time.time()
print("Time for create fake data: ", end - start)
start = time.time()
for i in range(100):
session.execute(prep_batch[i])
time.sleep(0.00000001)
end = time.time()
print("Time for execution insert into table: ", end - start)