I’m trying to collect data from twitter using its tweepy API. I got the information on the tweets but I didn’t get the data on the user even though I included that in the filter. I should point out that I’m using the pro subscription for the twitter API V2 (does this allow me to get the tweets and the information on the author of the tweets?).
I’m trying to collect data from twitter using its tweepy API. I got the information on the tweets but I didn’t get the data on the user even though I included that in the filter. I should point out that I’m using the pro subscription for the twitter API V2 (does it allow me to get the tweets and the information on the author of the tweets?).
here is the code I tried. I expected to have a JSON file containing each tweet and its information per line including the information on the author of the tweet. but I got a JSON file containing only the information on the content of the tweet (this is exactly the fields that are filtered in tweets_fied).
Although I didn’t get any errors compiling this code the user field containing subfields: “id”,”name”,”username”,”created_at”,”description”,”entities”,”location”,”pinned_tweet_id”,”profile_image_url”,”protected”,”public_metrics”,”url”,”verifed”,”withheld” are missed.
here is an example of the JSON file obtained,
{"data": {"article": {}, "attachments": {}, "author_id": "1223367650040078336", "context_annotations": [{"domain": {"id": "6", "name": "Sports Event"}, "entity": {"id": "1667298425749028864", "name": "Paris 2024 Summer Olympics", "description": "Paris 2024 Summer Olympics "}}, {"domain": {"id": "11", "name": "Sport", "description": "Types of sports, like soccer and basketball"}, "entity": {"id": "1667298425749028864", "name": "Paris 2024 Summer Olympics", "description": "Paris 2024 Summer Olympics "}}, {"domain": {"id": "29", "name": "Events [Entity Service]", "description": "Real world events. "}, "entity": {"id": "1667298425749028864", "name": "Paris 2024 Summer Olympics", "description": "Paris 2024 Summer Olympics "}}, {"domain": {"id": "131", "name": "Unified Twitter Taxonomy", "description": "A taxonomy of user interests. "}, "entity": {"id": "1237760060828213249", "name": "Olympics"}}, {"domain": {"id": "6", "name": "Sports Event"}, "entity": {"id": "1667298425749028864", "name": "Paris 2024 Summer Olympics", "description": "Paris 2024 Summer Olympics "}}, {"domain": {"id": "11", "name": "Sport", "description": "Types of sports, like soccer and basketball"}, "entity": {"id": "1667298425749028864", "name": "Paris 2024 Summer Olympics", "description": "Paris 2024 Summer Olympics "}}, {"domain": {"id": "29", "name": "Events [Entity Service]", "description": "Real world events. "}, "entity": {"id": "1667298425749028864", "name": "Paris 2024 Summer Olympics", "description": "Paris 2024 Summer Olympics "}}, {"domain": {"id": "131", "name": "Unified Twitter Taxonomy", "description": "A taxonomy of user interests. "}, "entity": {"id": "1237760060828213249", "name": "Olympics"}}], "conversation_id": "1818294242621493608", "created_at": "2024-07-30T14:35:04.000Z", "edit_controls": {"edits_remaining": 5, "is_edit_eligible": true, "editable_until": "2024-07-30T15:35:04.000Z"}, "edit_history_tweet_ids": ["1818294242621493608"], "entities": {"annotations": [{"start": 15, "end": 27, "probability": 0.9759, "type": "Place", "normalized_text": "Latinoamu00e9rica"}, {"start": 66, "end": 74, "probability": 0.9628, "type": "Place", "normalized_text": "Guatemala"}], "mentions": [{"start": 50, "end": 60, "username": "Paris2024", "id": "951849552"}, {"start": 77, "end": 92, "username": "COGuatemalteco", "id": "177994528"}]}, "geo": {}, "id": "1818294242621493608", "lang": "es", "possibly_sensitive": false, "public_metrics": {"retweet_count": 0, "reply_count": 0, "like_count": 0, "quote_count": 0, "bookmark_count": 0, "impression_count": 0}, "reply_settings": "everyone", "text": "Tercer pau00eds de Latinoamu00e9rica en tener medallas en @Paris2024 Viva Guatemala! @COGuatemalteco"}, "matching_rules": [{"id": "1818294136333623297", "tag": ""}]}
import tweepy
import json
from datetime import datetime
class TwitterCollector:
def __init__(self, bearer_token, tracked_terms, tweet_limit, languages_filter=None, locations_filter=None):
self.bearer_token = bearer_token
self.tracked_terms = tracked_terms
self.tweet_limit = tweet_limit
self.languages_filter = languages_filter
self.locations_filter = locations_filter
self.tweet_count = 0
self.file_path = "./tweets.json"
def save_tweet(self, tweet_data):
with open(self.file_path, 'a', encoding='utf-8') as file:
json.dump(tweet_data, file, ensure_ascii=False, indent=4)
file.write('n')
def setup_rules(self):
rules = []
lang_filter = ""
if self.languages_filter:
lang_filter = " (" + " OR ".join(f"lang:{lang}" for lang in self.languages_filter) + ")"
for term in self.tracked_terms:
rule = f'"{term}"{lang_filter}'
rules.append(rule)
return rules
def manage_rules(self, stream):
existing_rules = stream.get_rules().data
if existing_rules is not None:
rule_ids = [rule.id for rule in existing_rules]
stream.delete_rules(rule_ids)
new_rules = self.setup_rules()
for rule in new_rules:
stream.add_rules(tweepy.StreamRule(value=rule))
def start_stream(self):
stream = TwitterStream(bearer_token=self.bearer_token, collector=self)
self.manage_rules(stream)
stream.filter(
tweet_fields=["attachments","author_id","context_annotations","conversation_id","created_at","edit_controls","entities","geo","id","in_reply_to_user_id", "lang","possibly_sensitive","reply_settings","source","text","withheld"],
user_fields=["id","name","username","created_at","description","entities","location","pinned_tweet_id","profile_image_url","protected","public_metrics","url","verified","withheld"]
)
class TwitterStream(tweepy.StreamingClient):
def __init__(self, bearer_token, collector):
super().__init__(bearer_token)
self.collector = collector
def on_data(self, raw_data):
data = json.loads(raw_data)
self.collector.save_tweet(data)
self.collector.tweet_count += 1
if self.collector.tweet_count >= self.collector.tweet_limit:
print("Tweet limit reached. Stopping the stream.")
self.disconnect()
return True
def on_connection_error(self):
self.disconnect()
print("Connection error. Stream stopped.")
# My main
bearer_token = "MY_BEARER_TOKEN"
tracked_terms = ["JO", "Olympic", "#Olympic"]
tweet_limit = 100
collector = TwitterCollector(bearer_token, tracked_terms, tweet_limit)
collector.start_stream()
essomanam is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.