import streamlit as st import snscrape.modules.twitter as sntwitter import pandas as pd import time import random # Function to scrape tweets def scrape_tweets(keyword, num_tweets): tweets = [] try: scraper = sntwitter.TwitterSearchScraper(keyword).get_items() for i, tweet in enumerate(scraper): if i >= num_tweets: break tweets.append([tweet.date, tweet.content, tweet.user.username, tweet.likeCount, tweet.retweetCount]) time.sleep(random.uniform(0.5, 1.5)) # Random delay to avoid rate limits if tweets: df = pd.DataFrame(tweets, columns=["Date", "Tweet", "User", "Likes", "Retweets"]) return df else: return None except Exception as e: st.error(f"Error while scraping: {str(e)}") return None # Streamlit UI st.title("Twitter Scraper") st.markdown("🔹 **Scrape & Download Tweets** (Political/Funny/Custom)") # Input fields keyword = st.text_input("Enter keyword(s) for tweets (e.g., 'funny OR meme OR politics'):") num_tweets = st.number_input("Number of tweets to scrape:", min_value=10, max_value=100000, value=1000, step=100) # Scrape button if st.button("Scrape Tweets"): if keyword: with st.spinner("Scraping tweets..."): df = scrape_tweets(keyword, num_tweets) if df is not None: st.success(f"✅ Scraped {len(df)} tweets!") st.dataframe(df) # Display table # Download CSV csv = df.to_csv(index=False).encode('utf-8') st.download_button( label="Download CSV 📥", data=csv, file_name="tweets.csv", mime="text/csv", ) else: st.warning("⚠ No tweets found. Try different keywords.") else: st.warning("⚠ Please enter a keyword.") # Footer st.markdown("---") st.markdown("📌 **Note**: This scraper is for educational purposes. Respect Twitter's terms of service.")