Krish-Upgrix commited on
Commit
b851932
Β·
verified Β·
1 Parent(s): 22000b3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +470 -343
app.py CHANGED
@@ -1,343 +1,470 @@
1
- import streamlit as st
2
- from serpapi import GoogleSearch
3
-
4
- # SerpAPI API Key
5
- API_KEY = "8369b2ad95bdb8602cb3f5da80c056e646691599ea0f5aeb01ea47cf18f28270"
6
-
7
- # Function to fetch news articles using SerpAPI
8
- def fetch_news_serpapi():
9
- st.info("Fetching latest news on waste management using SerpAPI...")
10
- search = GoogleSearch({
11
- "q": "waste management India",
12
- "tbm": "nws",
13
- "api_key": API_KEY
14
- })
15
- results = search.get_dict()
16
- if "news_results" in results:
17
- news = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
18
- return news
19
- else:
20
- return []
21
-
22
- # Function to fetch hackathons and webinars using SerpAPI
23
- def fetch_hackathons_serpapi():
24
- st.info("Fetching hackathons and webinars related to waste management using SerpAPI...")
25
- search = GoogleSearch({
26
- "q": "waste management hackathon OR webinar",
27
- "tbm": "nws",
28
- "api_key": API_KEY
29
- })
30
- results = search.get_dict()
31
- if "news_results" in results:
32
- hackathons = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
33
- return hackathons
34
- else:
35
- return []
36
-
37
- # Function to fetch government initiatives using SerpAPI
38
- def fetch_government_initiatives_serpapi():
39
- st.info("Fetching Indian government initiatives on waste management using SerpAPI...")
40
- search = GoogleSearch({
41
- "q": "Indian government waste management initiatives",
42
- "tbm": "nws",
43
- "api_key": API_KEY
44
- })
45
- results = search.get_dict()
46
- if "news_results" in results:
47
- initiatives = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
48
- return initiatives
49
- else:
50
- return []
51
-
52
- # Streamlit app layout
53
- def main():
54
- st.set_page_config(page_title="BinSight", layout="wide")
55
- st.title("🌍 BinSight - Waste Management & Education")
56
- st.markdown(
57
- """
58
- Welcome to **BinSight**! This platform provides real-time updates on:
59
- - πŸ“° Current news on waste management.
60
- - πŸ’‘ Hackathons and webinars related to waste management.
61
- - πŸ“œ Indian government initiatives.
62
- """
63
- )
64
-
65
- # Tabs for better UI
66
- tab1, tab2, tab3 = st.tabs(["πŸ“° News", "πŸ’‘ Hackathons/Webinars", "πŸ“œ Govt Initiatives"])
67
-
68
-
69
- # Tab 1: News
70
- with tab1:
71
- news = fetch_news_serpapi()
72
- if news:
73
- for item in news:
74
- st.markdown(f"[{item['title']}]({item['link']})")
75
- else:
76
- st.warning("No news articles found.")
77
-
78
- # Tab 2: Hackathons/Webinars
79
- with tab2:
80
- hackathons = fetch_hackathons_serpapi()
81
- if hackathons:
82
- for event in hackathons:
83
- st.markdown(f"[{event['title']}]({event['link']})")
84
- else:
85
- st.warning("No hackathons or webinars found.")
86
-
87
- # Tab 3: Govt Initiatives
88
- with tab3:
89
- initiatives = fetch_government_initiatives_serpapi()
90
- if initiatives:
91
- for initiative in initiatives:
92
- st.markdown(f"[{initiative['title']}]({initiative['link']})")
93
- else:
94
- st.warning("No government initiatives found.")
95
-
96
- # Footer
97
- st.sidebar.title("About BinSight")
98
- st.sidebar.info(
99
- """
100
- BinSight is an initiative to educate people about waste management and to connect
101
- them with events, news, and government programs to make our planet sustainable.
102
- """
103
- )
104
-
105
- if __name__ == "__main__":
106
- main()
107
-
108
-
109
-
110
-
111
-
112
-
113
-
114
-
115
-
116
-
117
-
118
-
119
- # import streamlit as st
120
- # import requests
121
- # from bs4 import BeautifulSoup
122
- # import json
123
- # from serpapi import GoogleSearch
124
-
125
-
126
-
127
- # # Constants for Google Search API (serpapi)
128
- # SERPAPI_API_KEY = "8369b2ad95bdb8602cb3f5da80c056e646691599ea0f5aeb01ea47cf18f28270"
129
-
130
- # # Fetch the latest news on waste management
131
- # def fetch_news():
132
- # st.info("Fetching the latest news on waste management...")
133
- # try:
134
- # url = "https://news.google.com/rss/search?q=waste+management"
135
- # response = requests.get(url, timeout=10)
136
- # response.raise_for_status()
137
- # soup = BeautifulSoup(response.content, "xml")
138
- # articles = [entry.title.text for entry in soup.find_all("item")]
139
- # if not articles:
140
- # raise ValueError("No news articles found.")
141
- # return articles
142
- # except Exception as e:
143
- # st.warning(f"Error fetching news: {e}")
144
- # st.info("Searching for waste management news...")
145
- # return fetch_from_google("waste management news")
146
-
147
- # # Fetch upcoming webinars and hackathons
148
- # def fetch_webinars_and_hackathons():
149
- # st.info("Fetching upcoming webinars and hackathons on waste management...")
150
- # try:
151
- # url = "https://www.google.com/search?q=waste+management+webinars+hackathons"
152
- # response = requests.get(url, timeout=10)
153
- # response.raise_for_status()
154
- # soup = BeautifulSoup(response.content, "html.parser")
155
- # webinars = [item.text for item in soup.find_all("h3", limit=5)]
156
- # if not webinars:
157
- # raise ValueError("No webinars or hackathons found.")
158
- # return webinars
159
- # except Exception as e:
160
- # st.warning(f"Error fetching webinars: {e}")
161
- # st.info("Searching for webinars and hackathons...")
162
- # return fetch_from_google("waste management webinars hackathons")
163
-
164
- # # Fetch Indian government initiatives related to waste management
165
- # def fetch_government_initiatives():
166
- # st.info("Fetching Indian government initiatives on waste management...")
167
- # try:
168
- # url = "https://swachhbharat.mygov.in/"
169
- # response = requests.get(url, timeout=10)
170
- # response.raise_for_status()
171
- # soup = BeautifulSoup(response.content, "html.parser")
172
- # initiatives = [item.text.strip() for item in soup.find_all("h2", limit=5)]
173
- # if not initiatives:
174
- # raise ValueError("No government initiatives found.")
175
- # return initiatives
176
- # except Exception as e:
177
- # st.warning(f"Error fetching government initiatives: {e}")
178
- # st.info("Searching for government initiatives on waste management...")
179
- # return fetch_from_google("Indian government initiatives waste management")
180
-
181
- # # Search using Google API (serpapi) for news, webinars, or government initiatives
182
- # def fetch_from_google(query):
183
- # params = {
184
- # "q": query,
185
- # "api_key": SERPAPI_API_KEY,
186
- # "engine": "google",
187
- # }
188
- # search = GoogleSearch(params)
189
- # results = search.get_dict()
190
-
191
- # if 'organic_results' not in results:
192
- # return [f"No results found for '{query}'"]
193
-
194
- # data = [result['title'] for result in results['organic_results']]
195
- # return data if data else [f"No results found for '{query}'"]
196
-
197
- # # Main function to organize everything
198
- # def main():
199
- # st.title("BinSight - Waste Management News, Webinars, and Initiatives")
200
-
201
- # # Display News
202
- # st.subheader("Latest News on Waste Management")
203
- # news = fetch_news()
204
- # for item in news:
205
- # st.write(f"- {item}")
206
-
207
- # # Display Webinars & Hackathons
208
- # st.subheader("Upcoming Webinars & Hackathons")
209
- # webinars = fetch_webinars_and_hackathons()
210
- # for item in webinars:
211
- # st.write(f"- {item}")
212
-
213
- # # Display Government Initiatives
214
- # st.subheader("Indian Government Initiatives on Waste Management")
215
- # initiatives = fetch_government_initiatives()
216
- # for item in initiatives:
217
- # st.write(f"- {item}")
218
-
219
- # # Run the Streamlit app
220
- # if __name__ == "__main__":
221
- # main()
222
-
223
-
224
-
225
-
226
-
227
-
228
-
229
-
230
-
231
-
232
-
233
- # import streamlit as st
234
- # import requests
235
- # from bs4 import BeautifulSoup
236
- # import pandas as pd
237
-
238
- # # Function to fetch news articles
239
- # # Function to fetch news articles
240
- # def fetch_news():
241
- # st.info("Fetching latest news on waste management...")
242
- # url = "https://news.google.com/rss/search?q=waste+management+india"
243
- # response = requests.get(url)
244
- # soup = BeautifulSoup(response.content, "lxml-xml") # Use lxml-xml parser
245
- # articles = soup.find_all("item")[:5]
246
- # news = [{"title": item.title.text, "link": item.link.text} for item in articles]
247
- # return news
248
-
249
-
250
- # # Function to fetch hackathons/webinars
251
- # def fetch_hackathons():
252
- # st.info("Fetching hackathons and webinars related to waste management...")
253
- # url = "https://www.eventbrite.com/d/online/environment--conferences/"
254
- # response = requests.get(url)
255
- # soup = BeautifulSoup(response.text, "html.parser")
256
- # events = soup.find_all("div", {"class": "search-event-card-wrapper"})[:5]
257
- # hackathons = []
258
- # for event in events:
259
- # title = event.find("div", {"class": "eds-event-card__formatted-name--is-clamped"}).text
260
- # link = event.find("a")["href"]
261
- # hackathons.append({"title": title, "link": link})
262
- # return hackathons
263
-
264
- # def fetch_government_initiatives():
265
- # st.info("Fetching Indian government initiatives on waste management...")
266
- # # Alternative sources for government initiatives
267
- # urls = [
268
- # "https://mohua.gov.in/",
269
- # "https://sbmurban.org/",
270
- # ]
271
- # initiatives = []
272
-
273
- # for url in urls:
274
- # try:
275
- # response = requests.get(url, timeout=10)
276
- # response.raise_for_status()
277
- # soup = BeautifulSoup(response.content, "html.parser")
278
- # # Example: Adjust parsing logic based on the website's structure
279
- # initiatives.extend([item.text.strip() for item in soup.find_all("h2", limit=5)])
280
- # except requests.exceptions.RequestException as e:
281
- # st.warning(f"Could not fetch data from {url}: {e}")
282
-
283
- # if not initiatives:
284
- # st.error("No government initiatives found.")
285
- # return ["No data available."]
286
- # return initiatives
287
-
288
-
289
-
290
- # # Streamlit app layout
291
- # def main():
292
- # st.set_page_config(page_title="BinSight", layout="wide")
293
- # st.title("🌍 BinSight - Waste Management & Education")
294
- # st.markdown(
295
- # """
296
- # Welcome to **BinSight**! This platform provides real-time updates on:
297
- # - πŸ“° Current news on waste management.
298
- # - πŸ’‘ Hackathons and webinars related to waste management.
299
- # - πŸ“œ Indian government initiatives.
300
- # """
301
- # )
302
-
303
- # # Tabs for better UI
304
- # tab1, tab2, tab3 = st.tabs(["πŸ“° News", "πŸ’‘ Hackathons/Webinars", "πŸ“œ Govt Initiatives"])
305
-
306
- # # Tab 1: News
307
- # with tab1:
308
- # news = fetch_news()
309
- # if news:
310
- # for item in news:
311
- # st.markdown(f"[{item['title']}]({item['link']})")
312
- # else:
313
- # st.warning("No news articles found.")
314
-
315
- # # Tab 2: Hackathons/Webinars
316
- # with tab2:
317
- # hackathons = fetch_hackathons()
318
- # if hackathons:
319
- # for event in hackathons:
320
- # st.markdown(f"[{event['title']}]({event['link']})")
321
- # else:
322
- # st.warning("No hackathons or webinars found.")
323
-
324
- # # Tab 3: Govt Initiatives
325
- # with tab3:
326
- # initiatives = fetch_government_initiatives()
327
- # if initiatives:
328
- # for initiative in initiatives:
329
- # st.markdown(f"[{initiative['title']}]({initiative['link']})")
330
- # else:
331
- # st.warning("No government initiatives found.")
332
-
333
- # # Footer
334
- # st.sidebar.title("About BinSight")
335
- # st.sidebar.info(
336
- # """
337
- # BinSight is an initiative to educate people about waste management and to connect
338
- # them with events, news, and government programs to make our planet sustainable.
339
- # """
340
- # )
341
-
342
- # if __name__ == "__main__":
343
- # main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from serpapi import GoogleSearch
3
+
4
+ # SerpAPI API Key
5
+ API_KEY = "8369b2ad95bdb8602cb3f5da80c056e646691599ea0f5aeb01ea47cf18f28270"
6
+
7
+ # Function to fetch news articles using SerpAPI
8
+ def fetch_news_serpapi():
9
+ st.info("Fetching latest news on waste management using SerpAPI...")
10
+ search = GoogleSearch({
11
+ "q": "waste management India",
12
+ "tbm": "nws",
13
+ "api_key": API_KEY
14
+ })
15
+ results = search.get_dict()
16
+ if "news_results" in results:
17
+ news = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
18
+ return news
19
+ else:
20
+ return []
21
+
22
+ # Function to fetch hackathons and webinars using SerpAPI
23
+ def fetch_hackathons_serpapi():
24
+ st.info("Fetching hackathons and webinars related to waste management using SerpAPI...")
25
+ search = GoogleSearch({
26
+ "q": "waste management hackathon OR webinar",
27
+ "tbm": "nws",
28
+ "api_key": API_KEY
29
+ })
30
+ results = search.get_dict()
31
+ if "news_results" in results:
32
+ hackathons = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
33
+ return hackathons
34
+ else:
35
+ return []
36
+
37
+ # Function to fetch government initiatives using SerpAPI
38
+ def fetch_government_initiatives_serpapi():
39
+ st.info("Fetching Indian government initiatives on waste management using SerpAPI...")
40
+ search = GoogleSearch({
41
+ "q": "Indian government waste management initiatives",
42
+ "tbm": "nws",
43
+ "api_key": API_KEY
44
+ })
45
+ results = search.get_dict()
46
+ if "news_results" in results:
47
+ initiatives = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
48
+ return initiatives
49
+ else:
50
+ return []
51
+
52
+ # Streamlit app layout
53
+ def main():
54
+ st.set_page_config(page_title="BinSight", layout="wide")
55
+ st.title("🌍 BinSight - Waste Management & Education")
56
+ st.markdown(
57
+ """
58
+ Welcome to **BinSight**! This platform provides real-time updates on:
59
+ - πŸ“° Current news on waste management.
60
+ - πŸ’‘ Hackathons and webinars related to waste management.
61
+ - πŸ“› Indian government initiatives.
62
+ """
63
+ )
64
+
65
+ # Tabs for better UI
66
+ tab1, tab2, tab3 = st.tabs(["πŸ“° News", "πŸ’‘ Hackathons/Webinars", "πŸ“› Govt Initiatives"])
67
+
68
+
69
+ # Tab 1: News
70
+ with tab1:
71
+ news = fetch_news_serpapi()
72
+ if news:
73
+ for item in news:
74
+ st.markdown(f"[{item['title']}]({item['link']})")
75
+ else:
76
+ st.warning("No news articles found.")
77
+
78
+ # Tab 2: Hackathons/Webinars
79
+ with tab2:
80
+ hackathons = fetch_hackathons_serpapi()
81
+ if hackathons:
82
+ for event in hackathons:
83
+ st.markdown(f"[{event['title']}]({event['link']})")
84
+ else:
85
+ st.warning("No hackathons or webinars found.")
86
+
87
+ # Tab 3: Govt Initiatives
88
+ with tab3:
89
+ initiatives = fetch_government_initiatives_serpapi()
90
+ if initiatives:
91
+ for initiative in initiatives:
92
+ st.markdown(f"[{initiative['title']}]({initiative['link']})")
93
+ else:
94
+ st.warning("No government initiatives found.")
95
+
96
+ # Footer
97
+ st.sidebar.title("About BinSight")
98
+ st.sidebar.info(
99
+ """
100
+ BinSight is an initiative to educate people about waste management and to connect
101
+ them with events, news, and government programs to make our planet sustainable.
102
+ """
103
+ )
104
+
105
+ # Back button to redirect to dashboard
106
+ st.markdown("<br>", unsafe_allow_html=True)
107
+ st.markdown("<a href='https://binsight.onrender.com/dashboard.html' target='_self' style='text-decoration:none;'><button style='padding: 10px 20px; font-size: 16px;'>β¬… Back to Dashboard</button></a>", unsafe_allow_html=True)
108
+
109
+ if __name__ == "__main__":
110
+ main()
111
+
112
+
113
+
114
+
115
+
116
+
117
+
118
+
119
+
120
+
121
+
122
+
123
+
124
+
125
+ # Best version without backbutton
126
+
127
+
128
+ # import streamlit as st
129
+ # from serpapi import GoogleSearch
130
+
131
+ # # SerpAPI API Key
132
+ # API_KEY = "8369b2ad95bdb8602cb3f5da80c056e646691599ea0f5aeb01ea47cf18f28270"
133
+
134
+ # # Function to fetch news articles using SerpAPI
135
+ # def fetch_news_serpapi():
136
+ # st.info("Fetching latest news on waste management using SerpAPI...")
137
+ # search = GoogleSearch({
138
+ # "q": "waste management India",
139
+ # "tbm": "nws",
140
+ # "api_key": API_KEY
141
+ # })
142
+ # results = search.get_dict()
143
+ # if "news_results" in results:
144
+ # news = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
145
+ # return news
146
+ # else:
147
+ # return []
148
+
149
+ # # Function to fetch hackathons and webinars using SerpAPI
150
+ # def fetch_hackathons_serpapi():
151
+ # st.info("Fetching hackathons and webinars related to waste management using SerpAPI...")
152
+ # search = GoogleSearch({
153
+ # "q": "waste management hackathon OR webinar",
154
+ # "tbm": "nws",
155
+ # "api_key": API_KEY
156
+ # })
157
+ # results = search.get_dict()
158
+ # if "news_results" in results:
159
+ # hackathons = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
160
+ # return hackathons
161
+ # else:
162
+ # return []
163
+
164
+ # # Function to fetch government initiatives using SerpAPI
165
+ # def fetch_government_initiatives_serpapi():
166
+ # st.info("Fetching Indian government initiatives on waste management using SerpAPI...")
167
+ # search = GoogleSearch({
168
+ # "q": "Indian government waste management initiatives",
169
+ # "tbm": "nws",
170
+ # "api_key": API_KEY
171
+ # })
172
+ # results = search.get_dict()
173
+ # if "news_results" in results:
174
+ # initiatives = [{"title": item["title"], "link": item["link"]} for item in results["news_results"][:5]]
175
+ # return initiatives
176
+ # else:
177
+ # return []
178
+
179
+ # # Streamlit app layout
180
+ # def main():
181
+ # st.set_page_config(page_title="BinSight", layout="wide")
182
+ # st.title("🌍 BinSight - Waste Management & Education")
183
+ # st.markdown(
184
+ # """
185
+ # Welcome to **BinSight**! This platform provides real-time updates on:
186
+ # - πŸ“° Current news on waste management.
187
+ # - πŸ’‘ Hackathons and webinars related to waste management.
188
+ # - πŸ“œ Indian government initiatives.
189
+ # """
190
+ # )
191
+
192
+ # # Tabs for better UI
193
+ # tab1, tab2, tab3 = st.tabs(["πŸ“° News", "πŸ’‘ Hackathons/Webinars", "πŸ“œ Govt Initiatives"])
194
+
195
+
196
+ # # Tab 1: News
197
+ # with tab1:
198
+ # news = fetch_news_serpapi()
199
+ # if news:
200
+ # for item in news:
201
+ # st.markdown(f"[{item['title']}]({item['link']})")
202
+ # else:
203
+ # st.warning("No news articles found.")
204
+
205
+ # # Tab 2: Hackathons/Webinars
206
+ # with tab2:
207
+ # hackathons = fetch_hackathons_serpapi()
208
+ # if hackathons:
209
+ # for event in hackathons:
210
+ # st.markdown(f"[{event['title']}]({event['link']})")
211
+ # else:
212
+ # st.warning("No hackathons or webinars found.")
213
+
214
+ # # Tab 3: Govt Initiatives
215
+ # with tab3:
216
+ # initiatives = fetch_government_initiatives_serpapi()
217
+ # if initiatives:
218
+ # for initiative in initiatives:
219
+ # st.markdown(f"[{initiative['title']}]({initiative['link']})")
220
+ # else:
221
+ # st.warning("No government initiatives found.")
222
+
223
+ # # Footer
224
+ # st.sidebar.title("About BinSight")
225
+ # st.sidebar.info(
226
+ # """
227
+ # BinSight is an initiative to educate people about waste management and to connect
228
+ # them with events, news, and government programs to make our planet sustainable.
229
+ # """
230
+ # )
231
+
232
+ # if __name__ == "__main__":
233
+ # main()
234
+
235
+
236
+
237
+
238
+
239
+
240
+
241
+
242
+
243
+
244
+
245
+
246
+ # import streamlit as st
247
+ # import requests
248
+ # from bs4 import BeautifulSoup
249
+ # import json
250
+ # from serpapi import GoogleSearch
251
+
252
+
253
+
254
+ # # Constants for Google Search API (serpapi)
255
+ # SERPAPI_API_KEY = "8369b2ad95bdb8602cb3f5da80c056e646691599ea0f5aeb01ea47cf18f28270"
256
+
257
+ # # Fetch the latest news on waste management
258
+ # def fetch_news():
259
+ # st.info("Fetching the latest news on waste management...")
260
+ # try:
261
+ # url = "https://news.google.com/rss/search?q=waste+management"
262
+ # response = requests.get(url, timeout=10)
263
+ # response.raise_for_status()
264
+ # soup = BeautifulSoup(response.content, "xml")
265
+ # articles = [entry.title.text for entry in soup.find_all("item")]
266
+ # if not articles:
267
+ # raise ValueError("No news articles found.")
268
+ # return articles
269
+ # except Exception as e:
270
+ # st.warning(f"Error fetching news: {e}")
271
+ # st.info("Searching for waste management news...")
272
+ # return fetch_from_google("waste management news")
273
+
274
+ # # Fetch upcoming webinars and hackathons
275
+ # def fetch_webinars_and_hackathons():
276
+ # st.info("Fetching upcoming webinars and hackathons on waste management...")
277
+ # try:
278
+ # url = "https://www.google.com/search?q=waste+management+webinars+hackathons"
279
+ # response = requests.get(url, timeout=10)
280
+ # response.raise_for_status()
281
+ # soup = BeautifulSoup(response.content, "html.parser")
282
+ # webinars = [item.text for item in soup.find_all("h3", limit=5)]
283
+ # if not webinars:
284
+ # raise ValueError("No webinars or hackathons found.")
285
+ # return webinars
286
+ # except Exception as e:
287
+ # st.warning(f"Error fetching webinars: {e}")
288
+ # st.info("Searching for webinars and hackathons...")
289
+ # return fetch_from_google("waste management webinars hackathons")
290
+
291
+ # # Fetch Indian government initiatives related to waste management
292
+ # def fetch_government_initiatives():
293
+ # st.info("Fetching Indian government initiatives on waste management...")
294
+ # try:
295
+ # url = "https://swachhbharat.mygov.in/"
296
+ # response = requests.get(url, timeout=10)
297
+ # response.raise_for_status()
298
+ # soup = BeautifulSoup(response.content, "html.parser")
299
+ # initiatives = [item.text.strip() for item in soup.find_all("h2", limit=5)]
300
+ # if not initiatives:
301
+ # raise ValueError("No government initiatives found.")
302
+ # return initiatives
303
+ # except Exception as e:
304
+ # st.warning(f"Error fetching government initiatives: {e}")
305
+ # st.info("Searching for government initiatives on waste management...")
306
+ # return fetch_from_google("Indian government initiatives waste management")
307
+
308
+ # # Search using Google API (serpapi) for news, webinars, or government initiatives
309
+ # def fetch_from_google(query):
310
+ # params = {
311
+ # "q": query,
312
+ # "api_key": SERPAPI_API_KEY,
313
+ # "engine": "google",
314
+ # }
315
+ # search = GoogleSearch(params)
316
+ # results = search.get_dict()
317
+
318
+ # if 'organic_results' not in results:
319
+ # return [f"No results found for '{query}'"]
320
+
321
+ # data = [result['title'] for result in results['organic_results']]
322
+ # return data if data else [f"No results found for '{query}'"]
323
+
324
+ # # Main function to organize everything
325
+ # def main():
326
+ # st.title("BinSight - Waste Management News, Webinars, and Initiatives")
327
+
328
+ # # Display News
329
+ # st.subheader("Latest News on Waste Management")
330
+ # news = fetch_news()
331
+ # for item in news:
332
+ # st.write(f"- {item}")
333
+
334
+ # # Display Webinars & Hackathons
335
+ # st.subheader("Upcoming Webinars & Hackathons")
336
+ # webinars = fetch_webinars_and_hackathons()
337
+ # for item in webinars:
338
+ # st.write(f"- {item}")
339
+
340
+ # # Display Government Initiatives
341
+ # st.subheader("Indian Government Initiatives on Waste Management")
342
+ # initiatives = fetch_government_initiatives()
343
+ # for item in initiatives:
344
+ # st.write(f"- {item}")
345
+
346
+ # # Run the Streamlit app
347
+ # if __name__ == "__main__":
348
+ # main()
349
+
350
+
351
+
352
+
353
+
354
+
355
+
356
+
357
+
358
+
359
+
360
+ # import streamlit as st
361
+ # import requests
362
+ # from bs4 import BeautifulSoup
363
+ # import pandas as pd
364
+
365
+ # # Function to fetch news articles
366
+ # # Function to fetch news articles
367
+ # def fetch_news():
368
+ # st.info("Fetching latest news on waste management...")
369
+ # url = "https://news.google.com/rss/search?q=waste+management+india"
370
+ # response = requests.get(url)
371
+ # soup = BeautifulSoup(response.content, "lxml-xml") # Use lxml-xml parser
372
+ # articles = soup.find_all("item")[:5]
373
+ # news = [{"title": item.title.text, "link": item.link.text} for item in articles]
374
+ # return news
375
+
376
+
377
+ # # Function to fetch hackathons/webinars
378
+ # def fetch_hackathons():
379
+ # st.info("Fetching hackathons and webinars related to waste management...")
380
+ # url = "https://www.eventbrite.com/d/online/environment--conferences/"
381
+ # response = requests.get(url)
382
+ # soup = BeautifulSoup(response.text, "html.parser")
383
+ # events = soup.find_all("div", {"class": "search-event-card-wrapper"})[:5]
384
+ # hackathons = []
385
+ # for event in events:
386
+ # title = event.find("div", {"class": "eds-event-card__formatted-name--is-clamped"}).text
387
+ # link = event.find("a")["href"]
388
+ # hackathons.append({"title": title, "link": link})
389
+ # return hackathons
390
+
391
+ # def fetch_government_initiatives():
392
+ # st.info("Fetching Indian government initiatives on waste management...")
393
+ # # Alternative sources for government initiatives
394
+ # urls = [
395
+ # "https://mohua.gov.in/",
396
+ # "https://sbmurban.org/",
397
+ # ]
398
+ # initiatives = []
399
+
400
+ # for url in urls:
401
+ # try:
402
+ # response = requests.get(url, timeout=10)
403
+ # response.raise_for_status()
404
+ # soup = BeautifulSoup(response.content, "html.parser")
405
+ # # Example: Adjust parsing logic based on the website's structure
406
+ # initiatives.extend([item.text.strip() for item in soup.find_all("h2", limit=5)])
407
+ # except requests.exceptions.RequestException as e:
408
+ # st.warning(f"Could not fetch data from {url}: {e}")
409
+
410
+ # if not initiatives:
411
+ # st.error("No government initiatives found.")
412
+ # return ["No data available."]
413
+ # return initiatives
414
+
415
+
416
+
417
+ # # Streamlit app layout
418
+ # def main():
419
+ # st.set_page_config(page_title="BinSight", layout="wide")
420
+ # st.title("🌍 BinSight - Waste Management & Education")
421
+ # st.markdown(
422
+ # """
423
+ # Welcome to **BinSight**! This platform provides real-time updates on:
424
+ # - πŸ“° Current news on waste management.
425
+ # - πŸ’‘ Hackathons and webinars related to waste management.
426
+ # - πŸ“œ Indian government initiatives.
427
+ # """
428
+ # )
429
+
430
+ # # Tabs for better UI
431
+ # tab1, tab2, tab3 = st.tabs(["πŸ“° News", "πŸ’‘ Hackathons/Webinars", "πŸ“œ Govt Initiatives"])
432
+
433
+ # # Tab 1: News
434
+ # with tab1:
435
+ # news = fetch_news()
436
+ # if news:
437
+ # for item in news:
438
+ # st.markdown(f"[{item['title']}]({item['link']})")
439
+ # else:
440
+ # st.warning("No news articles found.")
441
+
442
+ # # Tab 2: Hackathons/Webinars
443
+ # with tab2:
444
+ # hackathons = fetch_hackathons()
445
+ # if hackathons:
446
+ # for event in hackathons:
447
+ # st.markdown(f"[{event['title']}]({event['link']})")
448
+ # else:
449
+ # st.warning("No hackathons or webinars found.")
450
+
451
+ # # Tab 3: Govt Initiatives
452
+ # with tab3:
453
+ # initiatives = fetch_government_initiatives()
454
+ # if initiatives:
455
+ # for initiative in initiatives:
456
+ # st.markdown(f"[{initiative['title']}]({initiative['link']})")
457
+ # else:
458
+ # st.warning("No government initiatives found.")
459
+
460
+ # # Footer
461
+ # st.sidebar.title("About BinSight")
462
+ # st.sidebar.info(
463
+ # """
464
+ # BinSight is an initiative to educate people about waste management and to connect
465
+ # them with events, news, and government programs to make our planet sustainable.
466
+ # """
467
+ # )
468
+
469
+ # if __name__ == "__main__":
470
+ # main()