File size: 3,931 Bytes
ddafd74 b836ff8 951e96c ba6e74f ddafd74 b836ff8 ddafd74 b836ff8 27dbcc3 8e022d8 27dbcc3 b836ff8 27dbcc3 b836ff8 27dbcc3 b836ff8 ddafd74 b836ff8 ba6e74f b836ff8 ba6e74f 27dbcc3 ddafd74 b836ff8 ddafd74 b836ff8 27dbcc3 b836ff8 27dbcc3 b836ff8 27dbcc3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import os
import streamlit as st
import pandas as pd
import requests
import time
# β
Safely get the API key from the environment or fallback to empty string
API_KEY = os.getenv("SERPAPI_API_KEY", "")
# β
Ensure API_KEY is available
if not API_KEY:
raise ValueError("API key is missing. Please set the SERPAPI_API_KEY environment variable.")
# Function to get collaboration data using SERPAPI
def get_collaboration_data(competitors, target="Virgin Media"):
results = []
for comp in competitors:
query = f"{comp} {target} partnership OR collaboration"
params = {
"q": query,
"api_key": API_KEY,
"engine": "google", # You can switch to other engines if needed
"num": 5 # Number of results to return per query
}
print(f"Searching: {query}") # Log the query for tracking
try:
# Make a request to the SERP API
response = requests.get("https://serpapi.com/search", params=params)
response.raise_for_status() # Check for request errors (e.g., 4xx, 5xx)
# Parse the JSON response
data = response.json()
# Extract the organic results
for item in data.get("organic_results", []):
results.append({
"Competitor": comp,
"Title": item.get("title", "No title available"),
"Link": item.get("link", "No link available"),
"Snippet": item.get("snippet", "No snippet available")
})
except requests.exceptions.RequestException as e:
print(f"Error fetching data for {comp}: {e}")
except ValueError as e:
print(f"Error parsing JSON response for {comp}: {e}")
time.sleep(1) # β
Respect API rate limits
# Return results as a DataFrame
return pd.DataFrame(results)
# Set the page configuration
st.set_page_config(page_title="Virgin Media Collaboration Finder", layout="centered")
# Page title
st.title("π΅οΈ Virgin Media - Competitor Collaboration Finder")
st.markdown("Use this tool to find which competitors have worked with **Virgin Media**.")
# URL to the competitors CSV on Hugging Face
csv_url = "https://huggingface.co/spaces/Satyam0077/anapan_ai_Competitor_Task/resolve/main/competitors.csv"
# Load competitor list from Hugging Face URL
try:
competitors_df = pd.read_csv(csv_url)
competitors = competitors_df['Company'].dropna().tolist()
except Exception as e:
st.error(f"β Failed to load the CSV: {e}")
st.stop()
# Run search when button is clicked
if st.button("π Run Collaboration Search"):
with st.spinner("Searching... This may take a few moments..."):
result_df = get_collaboration_data(competitors) # This will now work as the function is defined
if not result_df.empty:
# Save results to the "results" folder
os.makedirs("results", exist_ok=True)
result_csv_path = os.path.join("results", "virgin_collab_results.csv")
result_df.to_csv(result_csv_path, index=False)
st.success("β
Search Completed!")
st.dataframe(result_df)
else:
st.warning("β οΈ No collaboration data found. Nothing was saved.")
# Show saved results when button is clicked
if st.button("π Show Saved Results"):
try:
saved_df = pd.read_csv("results/virgin_collab_results.csv")
if saved_df.empty:
st.warning("β οΈ The saved file is empty. Try running the search again.")
else:
st.success("β
Loaded saved results.")
st.dataframe(saved_df)
except FileNotFoundError:
st.warning("β οΈ No saved results found. Please run the search first.")
except pd.errors.EmptyDataError:
st.warning("β οΈ The saved file exists but is empty or invalid.")
|