总新手,试图从 csv 中读取 url 列表并返回 csv 中的项目。需要一些帮助来找出我在这里出错的地方:蜘蛛代码:
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
import random
class incyspider(BaseSpider):
name = "incyspider"
def __init__(self):
super(incyspider, self).__init__()
domain_name = "incyspider.co.uk"
f = open("urls.csv")
start_urls = [url.strip() for url in f.readlines()]
f.close
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//div[@class="Product"]')
items = []
for site in sites:
item['title'] = hxs.select('//div[@class="Name"]/node()').extract()
item['hlink'] = hxs.select('//div[@class="Price"]/node()').extract()
item['price'] = hxs.select('//div[@class="Codes"]/node()').extract()
items.append(item)
return items
SPIDER = incyspider()
这是 items.py 代码:
from scrapy.item import Item, Field
class incyspider(Item):
# define the fields for your item here like:
# name = Field()
title = Field()
hlink = Field()
price = Field()
pass
要运行,我正在使用
scrapy crawl incyspider -o items.csv -t csv
我会非常感谢任何指示。