Engineer786 commited on
Commit
12259d2
Β·
verified Β·
1 Parent(s): 3232e53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -2
app.py CHANGED
@@ -3,6 +3,8 @@ import requests
3
  import streamlit as st
4
  import pandas as pd
5
  from scraper import scrape_tariffs
 
 
6
 
7
  # Streamlit App: Electricity Bill & Carbon Footprint Estimator
8
  st.title("πŸ”Œ Electricity Bill & Carbon Footprint Estimator")
@@ -36,10 +38,34 @@ appliances = {
36
 
37
  def scrape_data():
38
  """
39
- Scrapes tariff data from the provided URLs.
40
  """
41
  st.info("πŸ”„ Scraping tariff data... Please wait.")
42
- scrape_tariffs(list(tariff_urls.values()))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  st.success("βœ… Tariff data scraping complete.")
44
 
45
  def calculate_carbon_footprint(monthly_energy_kwh):
 
3
  import streamlit as st
4
  import pandas as pd
5
  from scraper import scrape_tariffs
6
+ from concurrent.futures import ThreadPoolExecutor, as_completed
7
+ import time
8
 
9
  # Streamlit App: Electricity Bill & Carbon Footprint Estimator
10
  st.title("πŸ”Œ Electricity Bill & Carbon Footprint Estimator")
 
38
 
39
  def scrape_data():
40
  """
41
+ Scrapes tariff data from the provided URLs using parallel requests for efficiency.
42
  """
43
  st.info("πŸ”„ Scraping tariff data... Please wait.")
44
+
45
+ def fetch_url(url):
46
+ """
47
+ Fetches data from the given URL and returns the response.
48
+ """
49
+ try:
50
+ response = requests.get(url, timeout=10)
51
+ return response.text
52
+ except requests.exceptions.RequestException as e:
53
+ st.warning(f"⚠️ Failed to fetch {url}: {e}")
54
+ return None
55
+
56
+ # Use ThreadPoolExecutor for parallel scraping
57
+ with ThreadPoolExecutor() as executor:
58
+ future_to_url = {executor.submit(fetch_url, url): url for url in tariff_urls.values()}
59
+ for future in as_completed(future_to_url):
60
+ url = future_to_url[future]
61
+ response = future.result()
62
+ if response:
63
+ st.write(f"βœ… Successfully fetched data from: {url}")
64
+ # Pass the response to your scraping function if necessary
65
+ # Example: parse_and_save_data(response)
66
+ else:
67
+ st.write(f"❌ Failed to fetch data from: {url}")
68
+
69
  st.success("βœ… Tariff data scraping complete.")
70
 
71
  def calculate_carbon_footprint(monthly_energy_kwh):