2

I'm the definition of a noob. I know next to nothing about python and am looking for help. I can read just enough code to alter variables to suit my wants/needs but I when it comes to doing something the original code didn't call for... I'm lost.

So here is the deal, I found a craigslist(CL) flagging script that originally searched ALL CL sites and flagged posts that contained a specific keyword (it was written to flag all posts that mentioned scientology).

I altered it to only search CL sites in my general area (15 sites instead of 437) but it still looks for specific keywords that have have changed. I want to automatically flag people that continuously spam CL and make it hard to sort through as I do a lot of business on CL from sorting through postings.

What I want the script to do is loop until it can no longer find posts that meet the criteria changing proxy servers after each loop. And a place inside the script where I would put in the proxy/s ip adress

I look forward to your replies.

Here is the altered code I have:

#!/usr/bin/env python
# -*- coding: utf-8 -*-


import urllib
from twill.commands import * # gives us go()

areas = ['sfbay', 'chico', 'fresno', 'goldcountry', 'humboldt', 'mendocino', 'modesto', 'monterey', 'redding', 'reno', 'sacramento', 'siskiyou', 'stockton', 'yubasutter', 'reno']

def expunge(url, area):
    page = urllib.urlopen(url).read() # <-- and v and vv gets you urls of ind. postings
    page = page[page.index('<hr>'):].split('\n')[0]
    page = [i[:i.index('">')] for i in page.split('href="')[1:-1] if '<font size="-1">' in i]

    for u in page:
        num = u[u.rfind('/')+1:u.index('.html')] # the number of the posting (like 34235235252)
        spam = 'https://post.craigslist.org/flag?flagCode=15&amppostingID='+num # url for flagging as spam
        go(spam) # flag it


print 'Checking ' + str(len(areas)) + ' areas...'

for area in ['http://' + a + '.craigslist.org/' for a in areas]:
    ujam = area + 'search/?query=james+"916+821+0590"+&catAbb=hhh'
    udre = area + 'search/?query="DRE+%23+01902542+"&catAbb=hhh'
    try:
        jam = urllib.urlopen(ujam).read()
        dre = urllib.urlopen(udre).read()
    except:
        print 'tl;dr error for ' + area

    if 'Found: ' in jam:
        print 'Found results for "James 916 821 0590" in ' + area
        expunge(ujam, area)
        print 'All "James 916 821 0590" listings marked as spam for area'

    if 'Found: ' in dre:
        print 'Found results for "DRE # 01902542" in ' + area
        expunge(udre, area)
        print 'All "DRE # 01902542" listings marked as spam for area'
4

4 回答 4

0

你可以像这样创建一个常量循环:

while True:
    if condition :
        break

Itertools 有一些迭代技巧http://docs.python.org/2/library/itertools.html

值得注意的是,退房itertools.cycle

(这些是指向正确方向的指针。您可以用一个、另一个甚至两者来制定解决方案)

于 2013-02-19T21:18:51.547 回答
0

我做了一些改变......不确定它们的工作情况如何,但我没有收到任何错误。如果您发现任何错误/丢失的东西,请告诉我。- 谢谢

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import urllib, urllib2
from twill.commands import go


proxy = urllib2.ProxyHandler({'https': '108.60.219.136:8080'})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
proxy2 = urllib2.ProxyHandler({'https': '198.144.186.98:3128'})
opener2 = urllib2.build_opener(proxy2)
urllib2.install_opener(opener2)
proxy3 = urllib2.ProxyHandler({'https': '66.55.153.226:8080'})
opener3 = urllib2.build_opener(proxy3)
urllib2.install_opener(opener3)
proxy4 = urllib2.ProxyHandler({'https': '173.213.113.111:8080'})
opener4 = urllib2.build_opener(proxy4)
urllib2.install_opener(opener4)
proxy5 = urllib2.ProxyHandler({'https': '198.154.114.118:3128'})
opener5 = urllib2.build_opener(proxy5)
urllib2.install_opener(opener5)


    areas = ['sfbay', 'chico', 'fresno', 'goldcountry', 'humboldt',
    'mendocino', 'modesto', 'monterey', 'redding', 'reno',
    'sacramento', 'siskiyou', 'stockton', 'yubasutter']
queries = ['james+"916+821+0590"','"DRE+%23+01902542"']

    def expunge(url, area):
page = urllib.urlopen(url).read() # <-- and v and vv gets you urls of ind. postings
page = page[page.index('<hr>'):].split('\n')[0]
page = [i[:i.index('">')] for i in page.split('href="')[1:-1] if '<font size="-1">' in i]

    for u in page:
    num = u[u.rfind('/')+1:u.index('.html')] # the number of the posting (like 34235235252)
    spam = urllib2.urlopen('https://post.craigslist.org/flag?flagCode=15&amppostingID='+num )
    spam2 = urllib2.urlopen('https://post.craigslist.org/flag?flagCode=28&amppostingID='+num )
    spam3 = urllib2.urlopen('https://post.craigslist.org/flag?flagCode=16&amppostingID='+num )
    go(spam) # flag it
    go(spam2) # flag it
    go(spam3) # flag it

print 'Checking ' + str(len(areas)) + ' areas...'

    for area in areas:
for query in queries:
    qurl = 'http://' + area + '.craigslist.org/search/?query=' + query + '+&catAbb=hhh'
    try:
        q = urllib.urlopen(qurl).read()
    except:
        print 'tl;dr error for {} in {}'.format(query, area)
        break

    if 'Found: ' in q:
        print 'Found results for {} in {}'.format(query, area)
        expunge(qurl, area)
        print 'All {} listings marked as spam for {}'.format(query, area)
        print ''
        print ''
    elif 'Nothing found for that search' in q:
        print 'No results for {} in {}'.format(query, area)
        print ''
        print ''
        break
    else:
        break
于 2013-02-20T19:11:38.113 回答
0

我对您的代码进行了一些更改。在我看来,该函数expunge已经循环遍历页面中的所有结果,所以我不确定您需要进行什么循环,但有一个示例说明如何检查最后是否找到结果,但是没有循环可以打破。

不知道如何更改代理/ ip。

顺便说一句,你有'reno'两次。

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import urllib
from twill.commands import go

areas = ['sfbay', 'chico', 'fresno', 'goldcountry', 'humboldt',
        'mendocino', 'modesto', 'monterey', 'redding', 'reno',
        'sacramento', 'siskiyou', 'stockton', 'yubasutter']
queries = ['james+"916+821+0590"','"DRE+%23+01902542"']

def expunge(url, area):
    page = urllib.urlopen(url).read() # <-- and v and vv gets you urls of ind. postings
    page = page[page.index('<hr>'):].split('\n')[0]
    page = [i[:i.index('">')] for i in page.split('href="')[1:-1] if '<font size="-1">' in i]

    for u in page:
        num = u[u.rfind('/')+1:u.index('.html')] # the number of the posting (like 34235235252)
        spam = 'https://post.craigslist.org/flag?flagCode=15&amppostingID='+num # url for flagging as spam
        go(spam) # flag it

print 'Checking ' + str(len(areas)) + ' areas...'

for area in areas:
    for query in queries:
        qurl = 'http://' + area + '.craigslist.org/search/?query=' + query + '+&catAbb=hhh'
        try:
            q = urllib.urlopen(qurl).read()
        except:
            print 'tl;dr error for {} in {}'.format(query, area)
            break

        if 'Found: ' in q:
            print 'Found results for {} in {}'.format(query, area)
            expunge(qurl, area)
            print 'All {} listings marked as spam for area'.format(query)
        elif 'Nothing found for that search' in q:
            print 'No results for {} in {}'.format(query, area)
            break
        else:
            break
于 2013-02-19T23:31:43.660 回答
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import urllib, urllib2
from twill.commands import go


proxy = urllib2.ProxyHandler({'https': '108.60.219.136:8080'})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
proxy2 = urllib2.ProxyHandler({'https': '198.144.186.98:3128'})
opener2 = urllib2.build_opener(proxy2)
urllib2.install_opener(opener2)
proxy3 = urllib2.ProxyHandler({'https': '66.55.153.226:8080'})
opener3 = urllib2.build_opener(proxy3)
urllib2.install_opener(opener3)
proxy4 = urllib2.ProxyHandler({'https': '173.213.113.111:8080'})
opener4 = urllib2.build_opener(proxy4)
urllib2.install_opener(opener4)
proxy5 = urllib2.ProxyHandler({'https': '198.154.114.118:3128'})
opener5 = urllib2.build_opener(proxy5)
urllib2.install_opener(opener5)


areas = ['capecod']
queries = ['rent','rental','home','year','falmouth','lease','credit','tenant','apartment','bedroom','bed','bath']

    def expunge(url, area):
page = urllib.urlopen(url).read() # <-- and v and vv gets you urls of ind. postings
page = page[page.index('<hr>'):].split('\n')[0]
page = [i[:i.index('">')] for i in page.split('href="')[1:-1] if '<font size="-1">' in i]

    for u in page:
    num = u[u.rfind('/')+1:u.index('.html')] # the number of the posting (like 34235235252)
    spam = urllib2.urlopen('https://post.craigslist.org/flag?flagCode=15&amppostingID='+num )
    spam2 = urllib2.urlopen('https://post.craigslist.org/flag?flagCode=28&amppostingID='+num )
    spam3 = urllib2.urlopen('https://post.craigslist.org/flag?flagCode=16&amppostingID='+num )
    go(spam) # flag it
    go(spam2) # flag it
    go(spam3) # flag it

print 'Checking ' + str(len(areas)) + ' areas...'

    for area in areas:
for query in queries:
    qurl = 'http://' + area + '.craigslist.org/search/?query=' + query + '+&catAbb=hhh'
    try:
        q = urllib.urlopen(qurl).read()
    except:
        print 'tl;dr error for {} in {}'.format(query, area)
        break

    if 'Found: ' in q:
        print 'Found results for {} in {}'.format(query, area)
        expunge(qurl, area)
        print 'All {} listings marked as spam for {}'.format(query, area)
        print ''
        print ''
    elif 'Nothing found for that search' in q:
        print 'No results for {} in {}'.format(query, area)
        print ''
        print ''
        break
    else:
        break
于 2014-05-17T18:26:54.927 回答