Engineer786 commited on
Commit
3232e53
·
verified ·
1 Parent(s): 807f1f4

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +117 -0
  2. requirements.txt +8 -0
  3. scraper.py +75 -0
  4. utils.py +21 -0
app.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import streamlit as st
4
+ import pandas as pd
5
+ from scraper import scrape_tariffs
6
+
7
+ # Streamlit App: Electricity Bill & Carbon Footprint Estimator
8
+ st.title("🔌 Electricity Bill & Carbon Footprint Estimator")
9
+ st.sidebar.header("⚙️ User Input")
10
+
11
+ # Tariff URLs for scraping
12
+ tariff_urls = {
13
+ "IESCO": "https://iesco.com.pk/index.php/customer-services/tariff-guide",
14
+ "FESCO": "https://fesco.com.pk/tariff",
15
+ "HESCO": "http://www.hesco.gov.pk/htmls/tariffs.htm",
16
+ "KE": "https://www.ke.com.pk/customer-services/tariff-structure/",
17
+ "LESCO": "https://www.lesco.gov.pk/ElectricityTariffs",
18
+ "PESCO": "https://pesconlinebill.pk/pesco-tariff/",
19
+ "QESCO": "http://qesco.com.pk/Tariffs.aspx",
20
+ "TESCO": "https://tesco.gov.pk/index.php/electricity-traiff",
21
+ }
22
+
23
+ # Predefined appliances and their power in watts
24
+ appliances = {
25
+ "LED Bulb (10W)": 10,
26
+ "Ceiling Fan (75W)": 75,
27
+ "Refrigerator (150W)": 150,
28
+ "Air Conditioner (1.5 Ton, 1500W)": 1500,
29
+ "Washing Machine (500W)": 500,
30
+ "Television (100W)": 100,
31
+ "Laptop (65W)": 65,
32
+ "Iron (1000W)": 1000,
33
+ "Microwave Oven (1200W)": 1200,
34
+ "Water Heater (2000W)": 2000,
35
+ }
36
+
37
+ def scrape_data():
38
+ """
39
+ Scrapes tariff data from the provided URLs.
40
+ """
41
+ st.info("🔄 Scraping tariff data... Please wait.")
42
+ scrape_tariffs(list(tariff_urls.values()))
43
+ st.success("✅ Tariff data scraping complete.")
44
+
45
+ def calculate_carbon_footprint(monthly_energy_kwh):
46
+ """
47
+ Calculates the carbon footprint based on energy consumption in kWh.
48
+ """
49
+ carbon_emission_factor = 0.75 # kg CO2 per kWh
50
+ return monthly_energy_kwh * carbon_emission_factor
51
+
52
+ # Sidebar: Scrape Tariff Data
53
+ if st.sidebar.button("Scrape Tariff Data"):
54
+ scrape_data()
55
+
56
+ # Sidebar: Tariff Selection
57
+ st.sidebar.subheader("💡 Select Tariff")
58
+ try:
59
+ tariff_data = pd.read_csv("data/tariffs.csv")
60
+ tariff_types = tariff_data["category"].unique()
61
+ selected_tariff = st.sidebar.selectbox("Select your tariff category:", tariff_types)
62
+ rate_per_kwh = tariff_data[tariff_data["category"] == selected_tariff]["rate"].iloc[0]
63
+ st.sidebar.write(f"Rate per kWh: **{rate_per_kwh} PKR**")
64
+ except FileNotFoundError:
65
+ st.sidebar.error("⚠️ Tariff data not found. Please scrape the data first.")
66
+ rate_per_kwh = 0
67
+
68
+ # Sidebar: User Inputs for Appliances
69
+ st.sidebar.subheader("🏠 Add Appliances")
70
+ selected_appliance = st.sidebar.selectbox("Select an appliance:", list(appliances.keys()))
71
+ appliance_power = appliances[selected_appliance]
72
+ appliance_quantity = st.sidebar.number_input(
73
+ "Enter quantity:", min_value=1, max_value=10, value=1
74
+ )
75
+ usage_hours = st.sidebar.number_input(
76
+ "Enter usage hours per day:", min_value=1, max_value=24, value=5
77
+ )
78
+
79
+ # Add appliance details to the main list
80
+ if "appliance_list" not in st.session_state:
81
+ st.session_state["appliance_list"] = []
82
+
83
+ if st.sidebar.button("Add Appliance"):
84
+ st.session_state["appliance_list"].append(
85
+ {
86
+ "appliance": selected_appliance,
87
+ "power": appliance_power,
88
+ "quantity": appliance_quantity,
89
+ "hours": usage_hours,
90
+ }
91
+ )
92
+
93
+ # Display the list of added appliances
94
+ st.subheader("📋 Added Appliances")
95
+ if st.session_state["appliance_list"]:
96
+ for idx, appliance in enumerate(st.session_state["appliance_list"], start=1):
97
+ st.write(
98
+ f"{idx}. **{appliance['appliance']}** - "
99
+ f"{appliance['power']}W, {appliance['quantity']} unit(s), "
100
+ f"{appliance['hours']} hours/day"
101
+ )
102
+
103
+ # Electricity Bill and Carbon Footprint Calculation
104
+ if st.session_state["appliance_list"] and rate_per_kwh > 0:
105
+ total_daily_energy_kwh = sum(
106
+ (appliance["power"] * appliance["quantity"] * appliance["hours"]) / 1000
107
+ for appliance in st.session_state["appliance_list"]
108
+ )
109
+ monthly_energy_kwh = total_daily_energy_kwh * 30 # Assume 30 days in a month
110
+ bill_amount = monthly_energy_kwh * rate_per_kwh # Dynamic tariff rate
111
+ carbon_footprint = calculate_carbon_footprint(monthly_energy_kwh)
112
+
113
+ st.subheader("💵 Electricity Bill & 🌍 Carbon Footprint")
114
+ st.write(f"💵 **Estimated Electricity Bill**: **{bill_amount:.2f} PKR**")
115
+ st.write(f"🌍 **Estimated Carbon Footprint**: **{carbon_footprint:.2f} kg CO2 per month**")
116
+ else:
117
+ st.info("ℹ️ Add appliances to calculate the electricity bill and carbon footprint.")
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ streamlit==1.24.0
2
+ beautifulsoup4==4.12.2
3
+ requests==2.31.0
4
+ pandas==1.5.3
5
+ torch==2.1.0 # PyTorch required for embeddings
6
+ transformers==4.34.0 # Hugging Face Transformers for loading the model
7
+ huggingface_hub>=0.16.4 # Handling Hugging Face API (already included for model loading)
8
+ faiss-cpu==1.7.4 # Optional, for similarity queries on embeddings if needed
scraper.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ from bs4 import BeautifulSoup
4
+ import pandas as pd
5
+ import time
6
+ from random import randint
7
+
8
+ def scrape_tariffs(urls):
9
+ data = []
10
+
11
+ # Ensure the 'data' directory exists before saving the CSV
12
+ os.makedirs("data", exist_ok=True)
13
+
14
+ for url in urls:
15
+ try:
16
+ response = requests.get(url, timeout=10) # Added timeout
17
+ response.raise_for_status() # Raise exception for bad status codes (4xx, 5xx)
18
+
19
+ # Scrape data if the response is OK
20
+ if response.status_code == 200:
21
+ soup = BeautifulSoup(response.content, "html.parser")
22
+ rows = soup.find_all("tr")
23
+
24
+ for row in rows:
25
+ cells = row.find_all("td")
26
+ if len(cells) >= 2:
27
+ try:
28
+ data.append({
29
+ "category": cells[0].text.strip(),
30
+ "rate": float(cells[1].text.strip().replace(",", "")),
31
+ })
32
+ except ValueError:
33
+ continue
34
+
35
+ except requests.exceptions.RequestException as e:
36
+ print(f"Error fetching data from {url}: {e}")
37
+ print("Retrying...")
38
+
39
+ # Retry logic in case of failure (max 3 retries with random delay)
40
+ retries = 3
41
+ while retries > 0:
42
+ time.sleep(randint(1, 3)) # Sleep for a random time before retrying
43
+ retries -= 1
44
+ try:
45
+ response = requests.get(url, timeout=10)
46
+ response.raise_for_status()
47
+ if response.status_code == 200:
48
+ soup = BeautifulSoup(response.content, "html.parser")
49
+ rows = soup.find_all("tr")
50
+
51
+ for row in rows:
52
+ cells = row.find_all("td")
53
+ if len(cells) >= 2:
54
+ try:
55
+ data.append({
56
+ "category": cells[0].text.strip(),
57
+ "rate": float(cells[1].text.strip().replace(",", "")),
58
+ })
59
+ except ValueError:
60
+ continue
61
+ break
62
+ except requests.exceptions.RequestException:
63
+ print(f"Retry failed: {e}")
64
+ continue
65
+
66
+ # Sleep between requests to avoid hitting the servers too quickly
67
+ time.sleep(randint(2, 5))
68
+
69
+ if data:
70
+ df = pd.DataFrame(data)
71
+ # Save the scraped data to the 'data' directory
72
+ df.to_csv("data/tariffs.csv", index=False)
73
+ print("Tariff data saved successfully.")
74
+ else:
75
+ print("No tariff data found.")
utils.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import faiss
2
+ import numpy as np
3
+
4
+ def generate_faiss_index(embeddings):
5
+ # Ensure that the embeddings are converted to np.float32 (FAISS expects float32)
6
+ embeddings = np.array(embeddings, dtype=np.float32)
7
+ index = faiss.IndexFlatL2(768) # Assuming 768-dimensional embeddings for a model like MiniLM
8
+ index.add(embeddings)
9
+ return index
10
+
11
+ def load_faiss_index_to_gpu(index):
12
+ # If you're using GPU, ensure the index is moved to the GPU
13
+ res = faiss.StandardGpuResources() # Create resources for the GPU
14
+ gpu_index = faiss.index_cpu_to_gpu(res, 0, index) # Load into GPU (assuming GPU 0 is available)
15
+ return gpu_index
16
+
17
+ def query_faiss_index(query_embedding, gpu_index):
18
+ # Query the FAISS index with the query embedding
19
+ query_embedding = np.array(query_embedding, dtype=np.float32) # Ensure the query is a np.array with the right type
20
+ distances, indices = gpu_index.search(query_embedding.reshape(1, -1), 1) # Reshaping as FAISS expects 2D array
21
+ return indices, distances