我有以下 Django 模型。我不确定在蜘蛛中使用scrapy管道将这些相互关联的对象扫描到Django中的数据库时,保存这些相互关联对象的最佳方法是什么。似乎scrapy管道仅用于处理一种“类型”的项目
模型.py
class Parent(models.Model):
field1 = CharField()
class ParentX(models.Model):
field2 = CharField()
parent = models.OneToOneField(Parent, related_name = 'extra_properties')
class Child(models.Model):
field3 = CharField()
parent = models.ForeignKey(Parent, related_name='childs')
项目.py
# uses DjangoItem https://github.com/scrapy-plugins/scrapy-djangoitem
class ParentItem(DjangoItem):
django_model = Parent
class ParentXItem(DjangoItem):
django_model = ParentX
class ChildItem(DjangoItem):
django_model = Child
蜘蛛.py
class MySpider(scrapy.Spider):
name = "myspider"
allowed_domains = ["abc.com"]
start_urls = [
"http://www.example.com", # this page has ids of several Parent objects whose full details are in their individual pages
]
def parse(self, response):
parent_object_ids = [] #list from scraping the ids of the parent objects
for parent_id in parent_object_ids:
url = "http://www.example.com/%s" % parent_id
yield scrapy.Request(url, callback=self.parse_detail)
def parse_detail(self, response):
p = ParentItem()
px = ParentXItem()
c = ChildItem()
# populate p, px and c1, c2 with various data from the response.body
yield p
yield px
yield c1
yield c2 ... etc c3, c4
pipelines.py - 不知道在这里做什么
class ScrapytestPipeline(object):
def process_item(self, item, spider):
# This is where typically storage to database happens
# Now, I dont know whether the item is a ParentItem or ParentXItem or ChildItem
# Ideally, I want to first create the Parent obj and then ParentX obj (and point p.extra_properties = px), and then child objects
# c1.parent = p, c2.parent = p
# But I am not sure how to have pipeline do this in a sequential way from any order of items received