1

我是 python 中的 ask 和 trio 的新手,我有一个示例代码。让我解释一下,我有一个 URL 列表,每个 URL 都是新闻 URL,每个 URL 都有子 URL。第一个 url 请求并获取所有其他 href 并添加到列表中。然后获取该列表中所有hrefs的文章。问题是某些时候文章变得空洞。

在其工作时尝试了单个 url 的示例代码

import asks
import trio
from goose3 import Goose
import logging as log
from goose3.configuration import ArticleContextPattern
from pprint import pprint
import json
import time

asks.init('trio') 


async def extractor(path, htmls, paths, session):

    try:
        r = await session.get(path, timeout=2)
        out = r.content
        htmls.append(out)
        paths.append(path)
    except Exception as e:
        out = str(e)
        htmls.append(out)
        paths.append(path)


async def main(path_list, session):    
    htmls = []
    paths = []
    async with trio.open_nursery() as n:
        for path in path_list:
            n.start_soon(extractor, path, htmls, paths, session)

    return htmls, paths


async def run(urls, conns=50): 


    s = asks.Session(connections=conns)
    g = Goose()

    htmls, paths = await main(urls, s)
    print(htmls,"       ",paths)
    cleaned = []
    for html, path in zip(htmls, paths):
        dic = {}
        dic['url'] = path
        if html is not None:                            
            try:
                #g.config.known_context_pattern = ArticleContextPattern(attr='class', value='the-post')
                article = g.extract(raw_html=html)
                author=article.authors
                dic['goose_text'] = article.cleaned_text
                #print(article.cleaned_text)
                #dic['goose_date'] = article.publish_datetime
                dic['goose_title'] = article.title
                if author:
                    dic['authors']=author[0]
                else:
                    dic['authors'] =''
            except Exception as e:
                raise
                print(e)
                log.info('goose found no text using html')
                dic['goose_html'] = html
                dic['goose_text'] = ''
                dic['goose_date'] = None
                dic['goose_title'] = None
                dic['authors'] =''
            cleaned.append(dic)
    return cleaned




async def real_main():
    sss= '[{"crawl_delay_sec": 0, "name": "mining","goose_text":"","article_date":"","title":"", "story_url": "http://www.mining.com/canalaska-start-drilling-west-mcarthur-uranium-project","url": "http://www.mining.com/tag/latin-america/page/1/"},{"crawl_delay_sec": 0, "name": "mining", "story_url": "http://www.mining.com/web/tesla-fires-sound-alarms-safety-electric-car-batteries", "url": "http://www.mining.com/tag/latin-america/page/1/"}]'

    obj = json.loads(sss)
    pprint(obj)

    articles=[]
    for l in obj:
      articles.append(await run([l['story_url']]))
      #await trio.sleep(3)

    pprint(articles)

if __name__ == "__main__":
    trio.run(real_main)

获取文章数据不遗漏

4

2 回答 2

0

我缺乏一些进一步的信息来深入回答您的问题,但很可能与鹅在 html 中搜索文本的方式有关。有关更多详细信息,请参阅此答案:https ://stackoverflow.com/a/30408761/8867146

于 2019-06-07T08:08:35.730 回答
0

当状态码为 != 200 时,“asks”并不总是引发异常。您需要在使用响应的内容之前检查响应的状态码。您可能还想增加超时时间,2 秒是不够的,特别是当您同时触发多达 50 个连接时。

无论如何,这里有一个简化的程序——所有 Goose 的东西对于显示实际错误是完全不必要的,两个结果数组不是一个好主意,向结果数组添加错误消息看起来很糟糕。

此外,您应该调查并行运行 URL 获取和处理。trio.open_memory_channel是你的朋友吗?


import asks
asks.init('trio')

import trio
from pprint import pprint

async def extractor(path, session, results):
    try:
        r = await session.get(path, timeout=2)
        if r.status_code != 200:
            raise asks.errors.BadStatus("Not OK",r.status_code)
        out = r.content
    except Exception as e:
        # do some reasonable error handling
        print(path, repr(e))
    else:
        results.append((out, path))

async def main(path_list, session):
    results = []
    async with trio.open_nursery() as n:
        for path in path_list:
            n.start_soon(extractor, path, session, results)
    return results


async def run(conns=50):
    s = asks.Session(connections=conns)

    urls = [
            "http://www.mining.com/web/tesla-fires-sound-alarms-safety-electric-car-batteries",
            "http://www.mining.com/canalaska-start-drilling-west-mcarthur-uranium-project",
            "https://www.google.com",  # just for testing more parallel connections
            "https://www.debian.org",
            ]

    results = await main(urls, s)
    for content, path in results:
        pass  # analyze this result
    print("OK")

if __name__ == "__main__":
    trio.run(run)
于 2019-06-10T19:35:49.540 回答