2

我使用 scrapinghub 已经有一段时间了。我有一些蜘蛛每天都在工作。每个周末我都会登录以收集抓取的数据。所以我最终不得不一次打开一个蜘蛛超过七个作业,下载数据并移动到下一个,然后是下一个蜘蛛,依此类推。有没有办法一次获取蜘蛛已完成作业的所有提取数据?

4

2 回答 2

1

我所做的是使用 scrapinghub python API 客户端接口,所以如果你熟悉 python,我建议使用它,否则你可以卷曲...... https://doc.scrapinghub.com/api/items.html#item-目的

我有一个宠物项目,它可以抓取各种视频托管网站,获取视频标题、流 url + 类别(取决于刮板是否需要它)......部署到 scrapinghub,然后使用 shubs api(基于 python),遍历像字典这样的项目创建 .m3u 播放列表..

目的是将所有想要的视频聚合在一个播放列表中(在我的情况下使用 vlc 播放器)。如果

这是一个废代码片段(不是我的实际项目应用程序)

from __future__ import print_function
from scrapinghub import Connection
import os

conn = Connection('YOURAPIKEYGOESHERE')
#179923/1/1
list = conn.project_ids()
print("PROJECTS")
print("-#-" * 30)
for index, item in enumerate(list[1::]):
    index = str(index)
    item = str(item)
    project = conn[item]
    pspi = project.spiders()
    jobs = project.jobs()
    for x in pspi:
        print("["+ index + "] | PROJECT ID " + item, x['id'], x['tags'])
print("-#-" * 30)
print(list[0:4])
print(list[4:8])
print(list[8:12])
print(list[12:16])
print(list[16:20])
print(list[20:24])
print("-#-" * 30)

project = conn['180064'] #Manually Inserted
print("CONNECTING 2 |" + project.id)
print(project)
print("-#-" * 30)
pspi = project.spiders()
for x in pspi:
    print(x)
print("-#-" * 30)

jobs = project.jobs()
print(jobs)
for job in jobs:
    print(job)

job = project.job(u'180064/3/1') #Manually Inserted
print(job)
print("ITEMS")
print("-#-" * 30)
itemCount = job.info['items_scraped']
print("Items Scraped: {}".format(itemCount))
print(job.info['items_scraped'])
print("-#-" * 30)



def printF():
    ipr = input("Do you wish to print? [y/n] \n")
    if ipr == "y":
        name = input("what is the name of project?\n")
        print("-#-" * 30)
        print("Printing intems to m3u")
        print("-#-" * 30)
        for item in job.items():
            with open(name +'.m3u', 'a') as f:
                f.write('#EXTINF:0, ' + str(item['title']) + '\n' + str(item['vidsrc']) + '\n')
                f.close()
        infile = name + ".m3u"
        outfile = name + "_clean.m3u"

        delete_list = ["['", "']"]
        fin = open(infile)
        fout = open(outfile, "w+")
        for line in fin:
            for word in delete_list:
                line = line.replace(word, "")
            fout.write(line)
        fin.close()
        fout.close()
    else:
        print("Not printing")
于 2017-04-17T21:33:35.490 回答
0

这是我的最终代码

#!/usr/bin/python
# coding=utf-8
from scrapinghub import ScrapinghubClient
import unicodecsv as csv
import os
import logging
import pandas as pd
import datetime
import pickle

# Create and configure logger
LOG_FORMAT = "%(levelname)s %(asctime)s - %(message)s"
logging.basicConfig(level = logging.INFO,
                    format = LOG_FORMAT,
                    filemode = 'w')
logger = logging.getLogger()

logger.info("Starting downloading")

# Enter ScrapingHub
apikey = '........'  # your API key as a string
client = ScrapinghubClient(apikey)
projectID = .......
project = client.get_project(projectID)
#   # Give me a list of dictionaries with info (each for every spider i have)
spider_dicts_list = project.spiders.list()
for spider_dict in spider_dicts_list:
    #   # Extract from the list the id of my spider
    spiderID = spider_dict["id"]
    logger.info("Working with spider: " + spiderID)
    # Get that spider and assign it to the object "spider"
    spider = project.spiders.get(spiderID)
    # Get a generator object for the jobs of that spider
    jobs_summary = spider.jobs.iter()
    # Generate all job keys using the generator object
    job_keys = [j['key'] for j in jobs_summary]
    for job_key in job_keys:
        # Get the corresponding job from the key, as "job"
        job = project.jobs.get(job_key)
        # Check to see if the job was completed
        if job.metadata.get(u'close_reason') == u'finished':
            # Create an empty list that will store all items (dictionaries)
            itemsDataFrame = pd.DataFrame()
            for item_aggelia in job.items.iter():
                # Save all items (dictionaries) to the DataFrame
                itemsDataFrame = itemsDataFrame.append(item_aggelia, ignore_index=True)
                job_key_name = job_key.split("/")[2]
                # Export a pickle
                # Check that the list is not empty
            if not itemsDataFrame.empty:
                for meta in job.metadata.iter():
                    if meta[0] == u"scrapystats":
                        timestamp = meta[1][u'finish_time']/1000.0
                dt = datetime.datetime.fromtimestamp(timestamp)
                filename = spiderID+" "+str(dt.year)+"-"+str(dt.month)+"-"+str(dt.day)+" "+str(dt.hour)+"_"+str(dt.minute)+"_"+str(dt.second)+" "+'Items.pickle'
                directory = u"E:/Documents/OneDrive/4_Προγραμματισμός/Scrapy/Αγορά Ακινήτων/"+spiderID+u"/Αρχεία_pd.DataFrame"
                os.chdir(directory)
                with open(filename, 'w') as file:
                    pickle.dump(itemsDataFrame,file)
            # Check for empty fields
            colList = itemsDataFrame.columns.tolist()
            for col in colList:
                if itemsDataFrame[col].isnull().all():
                    logger.warning("Found Null Field, in job " + job_key_name +": " + col)
            # Delete the job from ScrapingHub
            logger.debug("Deleting job " + job_key_name)
            job.delete()
        else:
            logger.info("Found a job that didn't finish properly. Job key: " + job_key+". close_reason:" + job.metadata.get(u'close_reason'))
于 2017-08-22T13:12:24.473 回答