1

我想在 Twitter ( https://twitter.com/HousingWire ) 上获取 HousingWire 的每条推文。我知道如何验证推特帐户,但我如何才能获得 HousingWire 的推文?

我知道如何根据关键字流式传输数据,但我想流式传输 HousingWire 推文。我该怎么做?

import time
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener

ckey='' 
csecret=''
atoken=''
asecret=''

class listener(StreamListener):
    def on_data(self,data):
       try:
             print data

            #tweet=data.split(',"text":"')[1].split('","source')[0]
            #print tweet
            #savethis=str(time.time())+'::'+tweet
            savefile=open('tweetdb.txt','a')
            savefile.write(data)
            savefile.write('\n')
            savefile.close()
            return True
      except BaseException,e:
            print 'failed on data',str(e)
            time.sleep(5)   

    def on_error(self,status):
        print status

auth=OAuthHandler(ckey,csecret)
auth.set_access_token(atoken,asecret)
twitterStream=Stream(auth,listener())
twitterStream.filter(track=["stock"])
4

1 回答 1

6

您可以使用下面的 Python 脚本从 HousingWire 获取最后 3,240 条推文(Twitter 只允许访问用户的那么多推文 - 无法获取完整的历史记录)。用法:只需将他们的推特网名放在脚本中即可。

#!/usr/bin/env python
# encoding: utf-8

import tweepy #https://github.com/tweepy/tweepy
import csv

#Twitter API credentials
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""


def get_all_tweets(screen_name):
    #Twitter only allows access to a users most recent 3240 tweets with this method

    #authorize twitter, initialize tweepy
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_key, access_secret)
    api = tweepy.API(auth)

    #initialize a list to hold all the tweepy Tweets
    alltweets = []  

    #make initial request for most recent tweets (200 is the maximum allowed count)
    new_tweets = api.user_timeline(screen_name = screen_name,count=200)

    #save most recent tweets
    alltweets.extend(new_tweets)

    #save the id of the oldest tweet less one
    oldest = alltweets[-1].id - 1

    #keep grabbing tweets until there are no tweets left to grab
    while len(new_tweets) > 0:
        print "getting tweets before %s" % (oldest)

        #all subsiquent requests use the max_id param to prevent duplicates
        new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)

        #save most recent tweets
        alltweets.extend(new_tweets)

        #update the id of the oldest tweet less one
        oldest = alltweets[-1].id - 1

        print "...%s tweets downloaded so far" % (len(alltweets))

    #transform the tweepy tweets into a 2D array that will populate the csv 
    outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]

    #write the csv  
    with open('%s_tweets.csv' % screen_name, 'wb') as f:
        writer = csv.writer(f)
        writer.writerow(["id","created_at","text"])
        writer.writerows(outtweets)

    pass


if __name__ == '__main__':
    #pass in the username of the account you want to download
    get_all_tweets("J_tsar")
于 2016-06-16T14:31:45.530 回答