SupTwin / app.py
ClergeF's picture
Update app.py
4ceba05 verified
import streamlit as st
import pandas as pd
import requests
from bs4 import BeautifulSoup
from datasets import load_dataset
import os
st.set_page_config(page_title="Nexora Hybrid Data Collector", layout="wide")
st.title("🧩 Nexora Hybrid Data Collection Example")
st.write("This app shows how Nexora could collect data from multiple sources: Manual Upload, Public Datasets, Scraping, and APIs.")
# --------------------------
# 1️⃣ Manual CSV Upload (User Creativity!)
# --------------------------
st.header("πŸ“‚ 1. Upload Your Own CSV Dataset")
uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
if uploaded_file:
user_data = pd.read_csv(uploaded_file)
st.success("CSV Uploaded Successfully!")
st.dataframe(user_data)
# --------------------------
# 2️⃣ Public Dataset (Kaggle / Hugging Face Example)
# --------------------------
st.header("πŸ“Š 2. Pull Public Dataset (Hugging Face Example)")
if st.button("Load 'Wine Quality' Dataset from Hugging Face"):
dataset = load_dataset("wine-quality")
wine_data = pd.DataFrame(dataset['train'])
st.success("Public Dataset Loaded!")
st.dataframe(wine_data.head())
# --------------------------
# 3️⃣ Simple Web Scraping (If Legal)
# --------------------------
st.header("🌐 3. Scrape Data from Example Webpage (Quotes Example)")
scrape_url = "http://quotes.toscrape.com/"
response = requests.get(scrape_url)
soup = BeautifulSoup(response.text, "html.parser")
quotes = [quote.get_text() for quote in soup.find_all("span", class_="text")]
if quotes:
st.success("Scraped Example Quotes from Website:")
st.write(quotes[:5])
# --------------------------
# 4️⃣ API Pull Example (Open-Meteo Free Weather API)
# --------------------------
st.header("🌦️ 4. Pull Weather Data via Free API (Open-Meteo Example)")
latitude = st.number_input("Enter Latitude", value=40.71)
longitude = st.number_input("Enter Longitude", value=-74.01)
if st.button("Get Weather Forecast"):
weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}&daily=temperature_2m_max&timezone=America/New_York"
weather_response = requests.get(weather_url)
if weather_response.status_code == 200:
weather_data = weather_response.json()
st.success("Weather Forecast Data:")
st.json(weather_data)
else:
st.error("Failed to retrieve weather data.")
# --------------------------
# 5️⃣ Caching Logic (Example with Streamlit Cache)
# --------------------------
@st.cache_data
def cached_scraper(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
return [quote.get_text() for quote in soup.find_all("span", class_="text")]
st.header("⚑ Cached Scraper Example")
if st.button("Scrape Again with Cache"):
cached_quotes = cached_scraper(scrape_url)
st.write(cached_quotes[:5])
# --------------------------
# Footer
# --------------------------
st.markdown("---")
st.write("πŸš€ Powered by Nexora Concept β€” Combining automation with your creativity.")