55

I'm trying to crawl websites using a crawler written in Python. I want to integrate Tor with Python meaning I want to crawl the site anonymously using Tor.

I tried doing this. It doesn't seem to work. I checked my IP it is still the same as the one before I used tor. I checked it via python.

import urllib2
proxy_handler = urllib2.ProxyHandler({"tcp":"http://127.0.0.1:9050"})
opener = urllib2.build_opener(proxy_handler)
urllib2.install_opener(opener)
4

12 回答 12

23

您正在尝试连接到 SOCKS 端口 - Tor 拒绝任何非 SOCKS 流量。您可以通过中间人 - Privoxy - 使用端口 8118 进行连接。

例子:

proxy_support = urllib2.ProxyHandler({"http" : "127.0.0.1:8118"})
opener = urllib2.build_opener(proxy_support) 
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
print opener.open('http://www.google.com').read()

另请注意传递给 ProxyHandler 的属性,没有 http 前缀 ip:port

于 2010-01-06T19:37:37.497 回答
9
pip install PySocks

然后:

import socket
import socks
import urllib2

ipcheck_url = 'http://checkip.amazonaws.com/'

# Actual IP.
print(urllib2.urlopen(ipcheck_url).read())

# Tor IP.
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', 9050)
socket.socket = socks.socksocket
print(urllib2.urlopen(ipcheck_url).read())

urllib2.ProxyHandler就像在https://stackoverflow.com/a/2015649/895245中一样使用失败:

Tor is not an HTTP Proxy

提到:如何将 SOCKS 4/5 代理与 urllib2 一起使用?

在 Ubuntu 15.10、Tor 0.2.6.10、Python 2.7.10 上测试。

于 2015-12-28T12:40:16.057 回答
3

以下代码 100% 使用 Python 3.4

(您需要使用此代码保持 TOR 浏览器打开)

此脚本通过 socks5 连接到 TOR 从 checkip.dyn.com 获取 IP,更改身份并重新发送请求以获取新 IP(循环 10 次)

您需要安装适当的库才能使其正常工作。(享受而不滥用)

import socks
import socket
import time
from stem.control import Controller
from stem import Signal
import requests
from bs4 import BeautifulSoup
err = 0
counter = 0
url = "checkip.dyn.com"
with Controller.from_port(port = 9151) as controller:
    try:
        controller.authenticate()
        socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150)
        socket.socket = socks.socksocket
        while counter < 10:
            r = requests.get("http://checkip.dyn.com")
            soup = BeautifulSoup(r.content)
            print(soup.find("body").text)
            counter = counter + 1
            #wait till next identity will be available
            controller.signal(Signal.NEWNYM)
            time.sleep(controller.get_newnym_wait())
    except requests.HTTPError:
        print("Could not reach URL")
        err = err + 1
print("Used " + str(counter) + " IPs and got " + str(err) + " errors")
于 2015-05-22T19:01:22.370 回答
2

这是在python中使用tor代理下载文件的代码:(更新网址)

import urllib2

url = "http://www.disneypicture.net/data/media/17/Donald_Duck2.gif"

proxy = urllib2.ProxyHandler({'http': '127.0.0.1:8118'})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)

file_name = url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)

file_size_dl = 0
block_sz = 8192
while True:
    buffer = u.read(block_sz)
    if not buffer:
        break

    file_size_dl += len(buffer)
    f.write(buffer)
    status = r"%10d  [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
    status = status + chr(8)*(len(status)+1)
    print status,

f.close()
于 2012-01-27T17:02:29.273 回答
2

在 tor 前面使用 privoxy 作为 http-proxy 对我有用 - 这是一个爬虫模板:


import urllib2
import httplib

from BeautifulSoup import BeautifulSoup
from time import sleep

class Scraper(object):
    def __init__(self, options, args):
        if options.proxy is None:
            options.proxy = "http://localhost:8118/"
        self._open = self._get_opener(options.proxy)

    def _get_opener(self, proxy):
        proxy_handler = urllib2.ProxyHandler({'http': proxy})
        opener = urllib2.build_opener(proxy_handler)
        return opener.open

    def get_soup(self, url):
        soup = None
        while soup is None:
            try:
                request = urllib2.Request(url)
                request.add_header('User-Agent', 'foo bar useragent')
                soup = BeautifulSoup(self._open(request))
            except (httplib.IncompleteRead, httplib.BadStatusLine,
                    urllib2.HTTPError, ValueError, urllib2.URLError), err:
                sleep(1)
        return soup

class PageType(Scraper):
    _URL_TEMPL = "http://foobar.com/baz/%s"

    def items_from_page(self, url):
        nextpage = None
        soup = self.get_soup(url)

        items = []
        for item in soup.findAll("foo"):
            items.append(item["bar"])
            nexpage = item["href"]

        return nextpage, items

    def get_items(self):
        nextpage, items = self._categories_from_page(self._START_URL % "start.html")
        while nextpage is not None:
            nextpage, newitems = self.items_from_page(self._URL_TEMPL % nextpage)
            items.extend(newitems)
        return items()

pt = PageType()
print pt.get_items()
于 2009-07-08T12:13:35.120 回答
2

以下解决方案适用于我在Python 3中。改编自 CiroSantilli 的回答

使用urllib(Python 3 中 urllib2 的名称):

import socks
import socket
from urllib.request import urlopen

url = 'http://icanhazip.com/'

socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', 9150)
socket.socket = socks.socksocket

response = urlopen(url)
print(response.read())

requests

import socks
import socket
import requests

url = 'http://icanhazip.com/'

socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', 9150)
socket.socket = socks.socksocket

response = requests.get(url)
print(response.text)

使用Selenium+ PhantomJS:

from selenium import webdriver

url = 'http://icanhazip.com/'

service_args = [ '--proxy=localhost:9150', '--proxy-type=socks5', ]
phantomjs_path = '/your/path/to/phantomjs'

driver = webdriver.PhantomJS(
    executable_path=phantomjs_path, 
    service_args=service_args)

driver.get(url)
print(driver.page_source)
driver.close()

注意:如果您打算经常使用 Tor,请考虑捐款以支持他们出色的工作!

于 2016-10-20T12:46:22.373 回答
2

更新- 最新(v2.10.0 以上)requests库支持 socks 代理,附加要求为requests[socks].

安装-

pip install requests requests[socks]

基本用法-

import requests
session = requests.session()
# Tor uses the 9050 port as the default socks port
session.proxies = {'http':  'socks5://127.0.0.1:9050',
                   'https': 'socks5://127.0.0.1:9050'}

# Make a request through the Tor connection
# IP visible through Tor
print session.get("http://httpbin.org/ip").text
# Above should print an IP different than your public IP

# Following prints your normal public IP
print requests.get("http://httpbin.org/ip").text

旧答案- 尽管这是一篇旧帖子,但回答是因为似乎没有人提到requesocks图书馆。

它基本上是requests图书馆的一个港口。请注意,该库是一个旧的分支(最后更新于 2013 年 3 月 25 日),可能与最新的请求库具有不同的功能。

安装-

pip install requesocks

基本用法-

# Assuming that Tor is up & running
import requesocks
session = requesocks.session()
# Tor uses the 9050 port as the default socks port
session.proxies = {'http':  'socks5://127.0.0.1:9050',
                   'https': 'socks5://127.0.0.1:9050'}
# Make a request through the Tor connection
# IP visible through Tor
print session.get("http://httpbin.org/ip").text
# Above should print an IP different than your public IP
# Following prints your normal public IP
import requests
print requests.get("http://httpbin.org/ip").text
于 2015-11-23T16:28:09.003 回答
1

也许您遇到了一些网络连接问题?上面的脚本对我有用(我替换了一个不同的 URL - 我使用了http://stackoverflow.com/- 我得到了预期的页面:

<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd" >
 <html> <head>

<title>Stack Overflow</title>        
<link rel="stylesheet" href="/content/all.css?v=3856">

(ETC。)

于 2009-07-08T06:34:44.910 回答
0

Tor 是一个 socks 代理。使用您引用的示例直接连接到它失败,并显示“urlopen error Tunnel connection failed: 501 Tor is not an HTTP Proxy”。正如其他人所提到的,您可以使用 Privoxy 解决这个问题。

或者,您也可以使用 PycURL 或 SocksiPy。有关将两者与 tor 一起使用的示例,请参阅...

https://stem.torproject.org/tutorials/to_russia_with_love.html

于 2015-06-04T16:41:28.750 回答
0

要扩展上述关于使用 torify 和 Tor 浏览器(并且不需要 Privoxy)的评论:

pip install PySocks
pip install pyTorify

(安装 Tor 浏览器并启动它)

命令行用法:

python -mtorify -p 127.0.0.1:9150 your_script.py

或内置到脚本中:

import torify
torify.set_tor_proxy("127.0.0.1", 9150)
torify.disable_tor_check()
torify.use_tor_proxy()

# use urllib as normal
import urllib.request
req = urllib.request.Request("http://....")
req.add_header("Referer", "http://...") # etc
res = urllib.request.urlopen(req)
html = res.read().decode("utf-8")

注意,Tor 浏览器使用端口 9150,而不是 9050

于 2018-03-20T18:04:51.887 回答
0

以为我会分享一个对我有用的解决方案(python3,windows10):

第 1 步:启用您的 Tor ControlPort 在9151.

Tor 服务在默认端口9150和 ControlPort 上运行9151。您应该能够看到本地地址127.0.0.1:9150127.0.0.1:9151运行时间netstat -an

[go to windows terminal]
cd ...\Tor Browser\Browser\TorBrowser\Tor
tor --service remove
tor --service install -options ControlPort 9151
netstat -an 

第2步:Python脚本如下。

# library to launch and kill Tor process
import os
import subprocess

# library for Tor connection
import socket
import socks
import http.client
import time
import requests
from stem import Signal
from stem.control import Controller

# library for scraping
import csv
import urllib
from bs4 import BeautifulSoup
import time

def launchTor():
    # start Tor (wait 30 sec for Tor to load)
    sproc = subprocess.Popen(r'.../Tor Browser/Browser/firefox.exe')
    time.sleep(30)
    return sproc

def killTor(sproc):
    sproc.kill()

def connectTor():
    socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150, True)
    socket.socket = socks.socksocket
    print("Connected to Tor")

def set_new_ip():
    # disable socks server and enabling again
    socks.setdefaultproxy()
    """Change IP using TOR"""
    with Controller.from_port(port=9151) as controller:
        controller.authenticate()
        socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150, True)
        socket.socket = socks.socksocket
        controller.signal(Signal.NEWNYM)

def checkIP():
    conn = http.client.HTTPConnection("icanhazip.com")
    conn.request("GET", "/")
    time.sleep(3)
    response = conn.getresponse()
    print('current ip address :', response.read())

# Launch Tor and connect to Tor network
sproc = launchTor()
connectTor()

# list of url to scrape
url_list = [list of all the urls you want to scrape]

for url in url_list:
    # set new ip and check ip before scraping for each new url
    set_new_ip()
    # allow some time for IP address to refresh
    time.sleep(5)
    checkIP()

    '''
    [insert your scraping code here: bs4, urllib, your usual thingy]
    '''

# remember to kill process 
killTor(sproc)

上面的这个脚本将为您要抓取的每个 URL 更新 IP 地址。只需确保睡眠时间足够长以使 IP 发生变化。昨天最后一次测试。希望这可以帮助!

于 2018-01-08T05:03:33.930 回答
0

你可以使用torify

运行你的程序

~$torify python your_program.py
于 2016-11-20T23:24:19.423 回答