File size: 3,082 Bytes
e78c5b0
9b9393d
 
4ceba05
9b9393d
 
0c1b95c
9b9393d
d82e09d
9b9393d
 
0c1b95c
9b9393d
 
 
 
 
0c1b95c
9b9393d
 
 
 
0c1b95c
9b9393d
 
 
 
 
 
 
 
 
0c1b95c
9b9393d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d82e09d
9b9393d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import streamlit as st
import pandas as pd
import requests
from bs4 import BeautifulSoup
from datasets import load_dataset
import os

st.set_page_config(page_title="Nexora Hybrid Data Collector", layout="wide")

st.title("🧩 Nexora Hybrid Data Collection Example")
st.write("This app shows how Nexora could collect data from multiple sources: Manual Upload, Public Datasets, Scraping, and APIs.")

# --------------------------
# 1️⃣ Manual CSV Upload (User Creativity!)
# --------------------------
st.header("📂 1. Upload Your Own CSV Dataset")
uploaded_file = st.file_uploader("Choose a CSV file", type="csv")

if uploaded_file:
    user_data = pd.read_csv(uploaded_file)
    st.success("CSV Uploaded Successfully!")
    st.dataframe(user_data)

# --------------------------
# 2️⃣ Public Dataset (Kaggle / Hugging Face Example)
# --------------------------
st.header("📊 2. Pull Public Dataset (Hugging Face Example)")
if st.button("Load 'Wine Quality' Dataset from Hugging Face"):
    dataset = load_dataset("wine-quality")
    wine_data = pd.DataFrame(dataset['train'])
    st.success("Public Dataset Loaded!")
    st.dataframe(wine_data.head())

# --------------------------
# 3️⃣ Simple Web Scraping (If Legal)
# --------------------------
st.header("🌐 3. Scrape Data from Example Webpage (Quotes Example)")
scrape_url = "http://quotes.toscrape.com/"
response = requests.get(scrape_url)
soup = BeautifulSoup(response.text, "html.parser")
quotes = [quote.get_text() for quote in soup.find_all("span", class_="text")]

if quotes:
    st.success("Scraped Example Quotes from Website:")
    st.write(quotes[:5])

# --------------------------
# 4️⃣ API Pull Example (Open-Meteo Free Weather API)
# --------------------------
st.header("🌦️ 4. Pull Weather Data via Free API (Open-Meteo Example)")
latitude = st.number_input("Enter Latitude", value=40.71)
longitude = st.number_input("Enter Longitude", value=-74.01)

if st.button("Get Weather Forecast"):
    weather_url = f"https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}&daily=temperature_2m_max&timezone=America/New_York"
    weather_response = requests.get(weather_url)
    
    if weather_response.status_code == 200:
        weather_data = weather_response.json()
        st.success("Weather Forecast Data:")
        st.json(weather_data)
    else:
        st.error("Failed to retrieve weather data.")

# --------------------------
# 5️⃣ Caching Logic (Example with Streamlit Cache)
# --------------------------
@st.cache_data
def cached_scraper(url):
    response = requests.get(url)
    soup = BeautifulSoup(response.text, "html.parser")
    return [quote.get_text() for quote in soup.find_all("span", class_="text")]

st.header("⚡ Cached Scraper Example")
if st.button("Scrape Again with Cache"):
    cached_quotes = cached_scraper(scrape_url)
    st.write(cached_quotes[:5])

# --------------------------
# Footer
# --------------------------
st.markdown("---")
st.write("🚀 Powered by Nexora Concept — Combining automation with your creativity.")