File size: 2,076 Bytes
05e4494 eb0fcf7 05e4494 eb0fcf7 05e4494 eb0fcf7 05e4494 eb0fcf7 05e4494 eb0fcf7 05e4494 eb0fcf7 05e4494 eb0fcf7 05e4494 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | import streamlit as st
import snscrape.modules.twitter as sntwitter
import pandas as pd
import time
import random
# Function to scrape tweets
def scrape_tweets(keyword, num_tweets):
tweets = []
try:
scraper = sntwitter.TwitterSearchScraper(keyword).get_items()
for i, tweet in enumerate(scraper):
if i >= num_tweets:
break
tweets.append([tweet.date, tweet.content, tweet.user.username, tweet.likeCount, tweet.retweetCount])
time.sleep(random.uniform(0.5, 1.5)) # Random delay to avoid rate limits
if tweets:
df = pd.DataFrame(tweets, columns=["Date", "Tweet", "User", "Likes", "Retweets"])
return df
else:
return None
except Exception as e:
st.error(f"Error while scraping: {str(e)}")
return None
# Streamlit UI
st.title("Twitter Scraper")
st.markdown("๐น **Scrape & Download Tweets** (Political/Funny/Custom)")
# Input fields
keyword = st.text_input("Enter keyword(s) for tweets (e.g., 'funny OR meme OR politics'):")
num_tweets = st.number_input("Number of tweets to scrape:", min_value=10, max_value=100000, value=1000, step=100)
# Scrape button
if st.button("Scrape Tweets"):
if keyword:
with st.spinner("Scraping tweets..."):
df = scrape_tweets(keyword, num_tweets)
if df is not None:
st.success(f"โ
Scraped {len(df)} tweets!")
st.dataframe(df) # Display table
# Download CSV
csv = df.to_csv(index=False).encode('utf-8')
st.download_button(
label="Download CSV ๐ฅ",
data=csv,
file_name="tweets.csv",
mime="text/csv",
)
else:
st.warning("โ No tweets found. Try different keywords.")
else:
st.warning("โ Please enter a keyword.")
# Footer
st.markdown("---")
st.markdown("๐ **Note**: This scraper is for educational purposes. Respect Twitter's terms of service.")
|