我正在尝试遍历带有 url 列表的文本文件,并让我的 python 脚本解析文件中的每个 url。
该代码只处理文件中的最后一行,它应该处理每一行并将结果附加到文件中。
我不知道该怎么做,感谢您的帮助。谢谢!
import feedparser # pip install feedparser
from BeautifulSoup import BeautifulStoneSoup
from BeautifulSoup import BeautifulSoup
import re
urls = open("c:/a2.txt", "r") # file with rss urls
for lines in urls:
d = feedparser.parse(lines) # feedparser is supposed to process every url in the file(urls)
statusupdate = d.entries[0].description
soup = BeautifulStoneSoup(statusupdate)
for e in d.entries:
print(e.title)
print(e.link)
print(soup.find("img")["src"])
print("\n") # 2 newlines
# writes title,link,image to a file and adds some characters
f = open(r'c:\a.txt', 'a')
f.writelines('"')
f.writelines(e.title)
f.writelines('"')
f.writelines(",")
f.writelines('"')
f.writelines(e.link)
f.writelines('"')
f.writelines(",")
f.writelines('"')
f.writelines(soup.find("img")["src"])
f.writelines('"')
f.writelines(",")
f.writelines("\n")
f.close()