GirishaBuilds01 commited on
Commit
8f76a50
Β·
verified Β·
1 Parent(s): e7ee167

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +168 -237
app.py CHANGED
@@ -1,278 +1,209 @@
1
  """
2
  ESG Document Intelligence Prototype
3
- Simple, lean version for HuggingFace Spaces
4
  """
5
 
6
  import gradio as gr
7
  import re
8
  import json
 
9
 
10
- # ── Constants ─────────────────────────────────────────────────────────────────
 
 
 
 
11
 
12
- GREENWASHING_KEYWORDS = [
13
- "carbon neutral", "net-zero", "net zero", "climate positive",
14
- "100% renewable", "zero emissions", "carbon negative", "eco-friendly",
15
- "carbon offset", "zero waste", "nature positive", "planet positive",
16
- "fully sustainable", "green certified"
17
- ]
18
 
19
- ESG_CATEGORIES = {
20
- "🌿 Environmental": [
21
- "carbon", "emission", "climate", "renewable", "energy", "water",
22
- "waste", "biodiversity", "pollution", "recycling", "greenhouse",
23
- "fossil fuel", "solar", "wind", "deforestation"
24
- ],
25
- "πŸ‘₯ Social": [
26
- "employee", "diversity", "inclusion", "health", "safety", "community",
27
- "human rights", "labor", "gender", "training", "wellbeing",
28
- "stakeholder", "philanthropy", "education", "wage"
29
- ],
30
- "πŸ›οΈ Governance": [
31
- "board", "director", "audit", "compliance", "ethics", "transparency",
32
- "corruption", "risk management", "disclosure", "accountability",
33
- "shareholder", "executive", "policy", "regulation"
34
- ]
35
- }
36
 
37
- SECTOR_KEYWORDS = {
38
- "Energy & Utilities": ["oil", "gas", "electricity", "utility", "power plant", "pipeline"],
39
- "Finance & Banking": ["bank", "investment", "portfolio", "loan", "insurance"],
40
- "Technology": ["software", "data center", "cloud", "semiconductor"],
41
- "Manufacturing": ["factory", "manufacturing", "production", "supply chain"],
42
- "Consumer Goods": ["product", "retail", "consumer", "packaging", "brand"],
43
- "Healthcare": ["health", "pharmaceutical", "medical", "hospital"],
44
- "Agriculture & Food": ["agriculture", "food", "farming", "crop", "livestock"],
45
- "Transportation": ["transport", "aviation", "shipping", "fleet"],
46
- }
47
 
48
- # ── Global State ──────────────────────────────────────────────────────────────
49
- _doc_chunks = []
50
- _doc_name = ""
 
 
51
 
52
- # ── Helpers ───────────────────────────────────────────────────────────────────
 
 
 
 
53
 
54
- def extract_text(pdf_path):
55
- import pdfplumber
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  pages = []
57
- with pdfplumber.open(pdf_path) as pdf:
58
- for i, page in enumerate(pdf.pages):
59
- text = (page.extract_text() or "").strip()
60
- if text:
61
- pages.append({"page": i + 1, "text": text})
62
  return pages
63
 
64
-
65
- def simple_chunk(pages, size=300):
66
- chunks = []
67
  for pg in pages:
68
  words = pg["text"].split()
69
- for start in range(0, len(words), size):
70
- chunk_text = " ".join(words[start:start + size])
71
- if len(chunk_text) > 40:
72
- chunks.append({"page": pg["page"], "text": chunk_text})
73
- return chunks
74
-
75
-
76
- def keyword_search(query, chunks, top_k=4):
77
- query_words = set(re.sub(r"[^\w\s]", "", query.lower()).split())
78
- scored = []
79
- for chunk in chunks:
80
- score = sum(chunk["text"].lower().count(w) for w in query_words)
81
- if score > 0:
82
- scored.append((score, chunk))
83
- scored.sort(key=lambda x: -x[0])
84
- return [c for _, c in scored[:top_k]]
85
-
86
-
87
- def classify_role(text):
88
  t = text.lower()
89
- if any(kw in t for kw in GREENWASHING_KEYWORDS):
90
- return "claim"
91
- if any(w in t for w in ["data shows", "%", "tonnes", "kwh", "mwh", "measured"]):
92
- return "evidence"
93
- if any(w in t for w in ["policy", "target", "goal", "by 2030", "by 2050", "we will"]):
94
- return "policy"
95
- if any(w in t for w in ["kpi", "metric", "indicator", "score", "rating"]):
96
- return "metric"
97
  return "context"
98
 
99
-
100
- # ── Analysis ──────────────────────────────────────────────────────────────────
101
-
102
- def compute_esg_scores(chunks):
103
- text = " ".join(c["text"] for c in chunks).lower()
104
- counts = {k: sum(text.count(kw) for kw in kws) for k, kws in ESG_CATEGORIES.items()}
105
- total = sum(counts.values()) or 1
106
- return {k: round(v / total * 100, 1) for k, v in counts.items()}
107
-
108
-
109
- def detect_sector(chunks):
110
- text = " ".join(c["text"] for c in chunks).lower()
111
- hits = {s: sum(text.count(kw) for kw in kws) for s, kws in SECTOR_KEYWORDS.items()}
112
- best = max(hits, key=hits.get)
113
- return best if hits[best] > 0 else "General / Diversified"
114
-
115
-
116
- def detect_greenwashing(chunks):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  flags, seen = [], set()
118
- for chunk in chunks:
119
- t = chunk["text"].lower()
120
- matched = [kw for kw in GREENWASHING_KEYWORDS if kw in t]
121
  if matched:
122
- key = (chunk["page"], matched[0])
123
  if key not in seen:
124
  seen.add(key)
125
- verified = any(w in t for w in ["certified", "verified", "audited", "third party", "sbti"])
126
- flags.append({"page": chunk["page"], "keywords": matched,
127
- "snip": chunk["text"][:200], "verified": verified})
128
- return flags
129
-
130
-
131
- # ── Gradio Handlers ────────────────────────────────────────────────────────────
132
-
133
- def process_pdf(pdf_file):
134
- global _doc_chunks, _doc_name
135
- if pdf_file is None:
136
- return "⚠️ Please upload a PDF."
137
- try:
138
- pages = extract_text(pdf_file.name)
139
- if not pages:
140
- return "❌ Could not extract text. Use a text-based PDF (not scanned)."
141
- _doc_chunks = simple_chunk(pages)
142
- _doc_name = pdf_file.name.split("/")[-1]
143
- roles = {}
144
- for c in _doc_chunks:
145
- r = classify_role(c["text"])
146
- roles[r] = roles.get(r, 0) + 1
147
- return (
148
- f"βœ… **Processed:** {_doc_name}\n\n"
149
- f"- Pages: **{len(pages)}**\n"
150
- f"- Chunks: **{len(_doc_chunks)}**\n"
151
- f"- Discourse roles: `{json.dumps(roles)}`\n\n"
152
- f"Now explore the other tabs!"
153
- )
154
- except Exception as e:
155
- return f"❌ Error: {e}"
156
-
157
-
158
- def ask_question(question):
159
- if not _doc_chunks:
160
- return "⚠️ Upload a document first.", ""
161
- if not question.strip():
162
- return "⚠️ Enter a question.", ""
163
- hits = keyword_search(question, _doc_chunks)
164
- if not hits:
165
- return "No relevant content found for that question.", ""
166
- answer = f"Based on **{_doc_name}**, here is what was found:\n\n"
167
- for c in hits:
168
- answer += f"πŸ“„ *Page {c['page']}:* {c['text'][:300]}…\n\n"
169
- evidence = "### πŸ“Ž Retrieved Chunks (keyword match)\n\n"
170
- for i, c in enumerate(hits, 1):
171
- role = classify_role(c["text"])
172
- evidence += f"**[{i}] Page {c['page']} | role: `{role}`**\n> {c['text'][:250]}…\n\n"
173
- return answer, evidence
174
-
175
-
176
- def show_esg_scores():
177
- if not _doc_chunks:
178
- return "⚠️ Upload a document first."
179
- scores = compute_esg_scores(_doc_chunks)
180
- sector = detect_sector(_doc_chunks)
181
- def bar(v):
182
- f = int(v / 5)
183
- return "β–ˆ" * f + "β–‘" * (20 - f)
184
- lines = [f"## πŸ“Š ESG Scores β€” *{_doc_name}*\n",
185
- "| Pillar | Score | Bar |", "|--------|-------|-----|"]
186
- for pillar, score in scores.items():
187
- lines.append(f"| {pillar} | {score}% | `{bar(score)}` |")
188
- overall = round(sum(scores.values()) / 3, 1)
189
- lines.append(f"| ⭐ **Overall** | **{overall}%** | `{bar(overall)}` |")
190
- lines.append(f"\n**Detected Sector:** {sector}")
191
- lines.append("\n> *Scores are keyword-density proxies for demonstration.*")
192
- return "\n".join(lines)
193
-
194
-
195
- def show_greenwashing():
196
- if not _doc_chunks:
197
- return "⚠️ Upload a document first."
198
- flags = detect_greenwashing(_doc_chunks)
199
- if not flags:
200
- return "βœ… No greenwashing keywords detected."
201
- unverified = [f for f in flags if not f["verified"]]
202
- verified = [f for f in flags if f["verified"]]
203
- lines = [f"## 🚨 Greenwashing Report β€” *{_doc_name}*\n",
204
- f"**Flagged:** {len(flags)} claims ({len(unverified)} ⚠️ unverified | {len(verified)} βœ… with evidence)\n\n---\n"]
205
- if unverified:
206
- lines.append("### ⚠️ Unverified Claims\n")
207
- for f in unverified:
208
- lines.append(f"πŸ“ **Page {f['page']}** β€” {', '.join(f['keywords'])}\n> {f['snip']}…\n")
209
- if verified:
210
- lines.append("\n### βœ… Claims With Supporting Evidence\n")
211
- for f in verified:
212
- lines.append(f"πŸ“ **Page {f['page']}** β€” {', '.join(f['keywords'])}\n> {f['snip']}…\n")
213
- return "\n".join(lines)
214
-
215
-
216
- def show_graph():
217
- if not _doc_chunks:
218
- return "⚠️ Upload a document first."
219
  roles = {}
220
- for c in _doc_chunks:
221
- r = classify_role(c["text"])
222
- roles[r] = roles.get(r, 0) + 1
223
- return (
224
- f"## πŸ•ΈοΈ Discourse Graph Summary β€” *{_doc_name}*\n\n"
225
- "Chunks are classified into discourse roles and linked by typed edges.\n\n"
226
- "| Role | Count | Meaning |\n"
227
- "|------|-------|---------|\n"
228
- f"| `claim` | {roles.get('claim', 0)} | Sustainability claims (greenwashing candidates) |\n"
229
- f"| `evidence` | {roles.get('evidence', 0)} | Data, measurements, statistics |\n"
230
- f"| `policy` | {roles.get('policy', 0)} | Commitments, targets, goals |\n"
231
- f"| `metric` | {roles.get('metric', 0)} | KPIs and indicators |\n"
232
- f"| `context` | {roles.get('context', 0)} | General narrative |\n\n"
233
- "**Edge types:** `follows` · `supported_by` (claim→evidence) · `measured_by` (policy→metric)\n\n"
234
- "> In a full HyperRAG deployment, these edges enable multi-hop reasoning across the document."
235
- )
236
-
237
 
238
  # ── UI ────────────────────────────────────────────────────────────────────────
239
-
240
- with gr.Blocks(title="ESG Intelligence Prototype") as demo:
241
-
242
- gr.Markdown("# 🌿 ESG Document Intelligence Prototype\n**HyperRAG + Discourse Graph** β€” upload an ESG PDF to explore")
243
 
244
  with gr.Tab("πŸ“€ Upload"):
245
- pdf_in = gr.File(label="ESG Report (PDF)", file_types=[".pdf"])
246
- proc_btn = gr.Button("βš™οΈ Process Document", variant="primary")
247
- proc_out = gr.Markdown("Upload a PDF and click Process.")
248
- proc_btn.click(process_pdf, inputs=pdf_in, outputs=proc_out)
249
 
250
  with gr.Tab("πŸ’¬ Q&A"):
251
- q_in = gr.Textbox(label="Question", placeholder="What are the carbon reduction targets?")
252
- q_btn = gr.Button("Ask", variant="primary")
253
- q_ans = gr.Markdown(label="Answer")
254
- q_ev = gr.Markdown(label="Evidence")
255
- gr.Examples([
256
- ["What are the Scope 1 and 2 emissions?"],
257
- ["What diversity initiatives are mentioned?"],
258
- ["What governance policies exist?"],
259
- ["What renewable energy targets are set?"],
260
- ], inputs=q_in)
261
- q_btn.click(ask_question, inputs=q_in, outputs=[q_ans, q_ev])
262
 
263
  with gr.Tab("πŸ“Š ESG Scores"):
264
- s_btn = gr.Button("Compute Scores", variant="primary")
265
- s_out = gr.Markdown()
266
- s_btn.click(show_esg_scores, outputs=s_out)
267
 
268
  with gr.Tab("🚨 Greenwashing"):
269
- g_btn = gr.Button("Detect Claims", variant="primary")
270
- g_out = gr.Markdown()
271
- g_btn.click(show_greenwashing, outputs=g_out)
272
 
273
  with gr.Tab("πŸ•ΈοΈ Discourse Graph"):
274
- d_btn = gr.Button("Show Graph Summary", variant="primary")
275
- d_out = gr.Markdown()
276
- d_btn.click(show_graph, outputs=d_out)
277
 
278
  demo.launch()
 
1
  """
2
  ESG Document Intelligence Prototype
3
+ Qdrant vector search + Discourse Graph
4
  """
5
 
6
  import gradio as gr
7
  import re
8
  import json
9
+ from pathlib import Path
10
 
11
+ # ── lazy imports ──────────────────────────────────────────────────────────────
12
+ def get_qdrant():
13
+ from qdrant_client import QdrantClient
14
+ from qdrant_client.models import Distance, VectorParams, PointStruct
15
+ return QdrantClient, Distance, VectorParams, PointStruct
16
 
17
+ def get_embedder():
18
+ from sentence_transformers import SentenceTransformer
19
+ return SentenceTransformer("all-MiniLM-L6-v2")
 
 
 
20
 
21
+ def get_pdfplumber():
22
+ import pdfplumber
23
+ return pdfplumber
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ # ── Config ────────────────────────────────────────────────────────────────────
26
+ COLLECTION = "esg"
27
+ DIM = 384
 
 
 
 
 
 
 
28
 
29
+ GREENWASHING_KW = [
30
+ "carbon neutral", "net-zero", "net zero", "zero emissions",
31
+ "100% renewable", "carbon offset", "zero waste", "eco-friendly",
32
+ "fully sustainable", "nature positive", "carbon negative"
33
+ ]
34
 
35
+ ESG_KW = {
36
+ "🌿 Environmental": ["carbon","emission","climate","renewable","energy","water","waste","pollution","solar","biodiversity"],
37
+ "πŸ‘₯ Social": ["employee","diversity","inclusion","health","safety","human rights","labor","gender","community"],
38
+ "πŸ›οΈ Governance": ["board","audit","compliance","ethics","transparency","corruption","disclosure","regulation","policy"]
39
+ }
40
 
41
+ # ── State ─────────────────────────────────────────────────────────────────────
42
+ state = {"client": None, "embedder": None, "chunks": [], "name": "", "ready": False}
43
+
44
+ # ── Init ──────────────────────────────────────────────────────────────────────
45
+ def init():
46
+ if state["embedder"] is None:
47
+ state["embedder"] = get_embedder()
48
+ if state["client"] is None:
49
+ QdrantClient, Distance, VectorParams, _ = get_qdrant()
50
+ c = QdrantClient(":memory:")
51
+ c.recreate_collection(COLLECTION, vectors_config=VectorParams(size=DIM, distance=Distance.COSINE))
52
+ state["client"] = c
53
+
54
+ # ── PDF + chunking ────────────────────────────────────────────────────────────
55
+ def load_pdf(path):
56
+ pdfplumber = get_pdfplumber()
57
  pages = []
58
+ with pdfplumber.open(path) as pdf:
59
+ for i, p in enumerate(pdf.pages):
60
+ t = (p.extract_text() or "").strip()
61
+ if t:
62
+ pages.append({"page": i+1, "text": t})
63
  return pages
64
 
65
+ def chunk(pages, size=250):
66
+ out = []
 
67
  for pg in pages:
68
  words = pg["text"].split()
69
+ for s in range(0, len(words), size):
70
+ t = " ".join(words[s:s+size])
71
+ if len(t) > 30:
72
+ out.append({"page": pg["page"], "text": t})
73
+ return out
74
+
75
+ # ── Discourse role ────────────────────────────────────────────────────────────
76
+ def role(text):
 
 
 
 
 
 
 
 
 
 
 
77
  t = text.lower()
78
+ if any(k in t for k in GREENWASHING_KW): return "claim"
79
+ if any(k in t for k in ["%","tonnes","kwh","mwh"]): return "evidence"
80
+ if any(k in t for k in ["target","goal","by 2030","by 2050","we will","commitment"]): return "policy"
81
+ if any(k in t for k in ["kpi","metric","indicator"]): return "metric"
 
 
 
 
82
  return "context"
83
 
84
+ # ── Handlers ──────────────────────────────────────────────────────────────────
85
+ def process(pdf):
86
+ if pdf is None: return "⚠️ Upload a PDF first."
87
+ try:
88
+ init()
89
+ _, _, _, PointStruct = get_qdrant()
90
+ pages = load_pdf(pdf.name)
91
+ chunks = chunk(pages)
92
+ embeds = state["embedder"].encode([c["text"] for c in chunks], batch_size=32, normalize_embeddings=True)
93
+ state["client"].recreate_collection(COLLECTION,
94
+ vectors_config=__import__("qdrant_client").models.VectorParams(size=DIM,
95
+ distance=__import__("qdrant_client").models.Distance.COSINE))
96
+ pts = [PointStruct(id=i, vector=e.tolist(), payload={"page": c["page"], "text": c["text"]})
97
+ for i,(c,e) in enumerate(zip(chunks, embeds))]
98
+ state["client"].upsert(COLLECTION, pts)
99
+ state["chunks"] = chunks
100
+ state["name"] = Path(pdf.name).name
101
+ state["ready"] = True
102
+ roles = {}
103
+ for c in chunks:
104
+ r = role(c["text"]); roles[r] = roles.get(r,0)+1
105
+ return (f"βœ… **{state['name']}** processed\n\n"
106
+ f"- Pages: **{len(pages)}** | Chunks: **{len(chunks)}**\n"
107
+ f"- Discourse nodes: `{json.dumps(roles)}`\n\n"
108
+ "Explore the tabs β†’")
109
+ except Exception as e:
110
+ return f"❌ {e}"
111
+
112
+ def ask(q):
113
+ if not state["ready"]: return "⚠️ Upload a document first.", ""
114
+ if not q.strip(): return "⚠️ Enter a question.", ""
115
+ vec = state["embedder"].encode([q], normalize_embeddings=True)[0].tolist()
116
+ hits = state["client"].search(COLLECTION, vec, limit=4, with_payload=True)
117
+ ans = f"**Results from {state['name']}:**\n\n"
118
+ evid = "### πŸ“Ž Retrieved Evidence\n\n"
119
+ for i, h in enumerate(hits, 1):
120
+ pg, txt = h.payload["page"], h.payload["text"]
121
+ r = role(txt)
122
+ ans += f"πŸ“„ **Page {pg}:** {txt[:280]}…\n\n"
123
+ evid += f"**[{i}] Page {pg} | score {h.score:.3f} | role `{r}`**\n> {txt[:220]}…\n\n"
124
+ return ans, evid
125
+
126
+ def esg_scores():
127
+ if not state["ready"]: return "⚠️ Upload a document first."
128
+ text = " ".join(c["text"] for c in state["chunks"]).lower()
129
+ counts = {k: sum(text.count(w) for w in ws) for k,ws in ESG_KW.items()}
130
+ total = sum(counts.values()) or 1
131
+ scores = {k: round(v/total*100,1) for k,v in counts.items()}
132
+ overall = round(sum(scores.values())/3, 1)
133
+ def bar(v): return "β–ˆ"*int(v/5) + "β–‘"*(20-int(v/5))
134
+ rows = "\n".join(f"| {k} | {v}% | `{bar(v)}` |" for k,v in scores.items())
135
+ return (f"## πŸ“Š ESG Scores β€” *{state['name']}*\n\n"
136
+ f"| Pillar | Score | Bar |\n|--------|-------|-----|\n{rows}\n"
137
+ f"| ⭐ Overall | **{overall}%** | `{bar(overall)}` |\n\n"
138
+ "> Keyword-density proxy scores.")
139
+
140
+ def greenwashing():
141
+ if not state["ready"]: return "⚠️ Upload a document first."
142
  flags, seen = [], set()
143
+ for c in state["chunks"]:
144
+ t = c["text"].lower()
145
+ matched = [k for k in GREENWASHING_KW if k in t]
146
  if matched:
147
+ key = (c["page"], matched[0])
148
  if key not in seen:
149
  seen.add(key)
150
+ verified = any(w in t for w in ["certified","verified","audited","third party","sbti"])
151
+ flags.append({"page":c["page"],"kws":matched,"snip":c["text"][:200],"ok":verified})
152
+ if not flags: return "βœ… No greenwashing keywords found."
153
+ bad = [f for f in flags if not f["ok"]]
154
+ good = [f for f in flags if f["ok"]]
155
+ out = [f"## 🚨 Greenwashing β€” *{state['name']}*\n",
156
+ f"{len(bad)} unverified ⚠️ | {len(good)} with evidence βœ…\n\n---\n"]
157
+ if bad:
158
+ out.append("### ⚠️ Unverified\n")
159
+ for f in bad:
160
+ out.append(f"πŸ“ **Page {f['page']}** β€” `{'`, `'.join(f['kws'])}`\n> {f['snip']}…\n")
161
+ if good:
162
+ out.append("\n### βœ… Evidenced\n")
163
+ for f in good:
164
+ out.append(f"πŸ“ **Page {f['page']}** β€” `{'`, `'.join(f['kws'])}`\n> {f['snip']}…\n")
165
+ return "\n".join(out)
166
+
167
+ def graph():
168
+ if not state["ready"]: return "⚠️ Upload a document first."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  roles = {}
170
+ for c in state["chunks"]:
171
+ r = role(c["text"]); roles[r] = roles.get(r,0)+1
172
+ rows = "\n".join(f"| `{r}` | {n} |" for r,n in sorted(roles.items(), key=lambda x:-x[1]))
173
+ return (f"## πŸ•ΈοΈ Discourse Graph β€” *{state['name']}*\n\n"
174
+ f"| Role | Chunks |\n|------|--------|\n{rows}\n\n"
175
+ "**Edges modelled:**\n"
176
+ "- `follows` β€” sequential chunks\n"
177
+ "- `supported_by` β€” claim β†’ evidence\n"
178
+ "- `measured_by` β€” policy β†’ metric\n\n"
179
+ "> Graph expansion enables multi-hop HyperRAG retrieval.")
 
 
 
 
 
 
 
180
 
181
  # ── UI ────────────────────────────────────────────────────────────────────────
182
+ with gr.Blocks(title="ESG Intelligence") as demo:
183
+ gr.Markdown("# 🌿 ESG Document Intelligence\n*Qdrant semantic search · Discourse graph reasoning*")
 
 
184
 
185
  with gr.Tab("πŸ“€ Upload"):
186
+ f = gr.File(label="ESG Report PDF", file_types=[".pdf"])
187
+ btn = gr.Button("Process", variant="primary")
188
+ out = gr.Markdown("Upload a PDF and click Process.")
189
+ btn.click(process, f, out)
190
 
191
  with gr.Tab("πŸ’¬ Q&A"):
192
+ q = gr.Textbox(label="Question", placeholder="What are the carbon reduction targets?")
193
+ btn2 = gr.Button("Ask", variant="primary")
194
+ ans = gr.Markdown()
195
+ ev = gr.Markdown()
196
+ gr.Examples([["What are Scope 1 and 2 emissions?"],["What diversity initiatives exist?"],
197
+ ["What are the renewable energy targets?"],["What governance policies are in place?"]], q)
198
+ btn2.click(ask, q, [ans, ev])
 
 
 
 
199
 
200
  with gr.Tab("πŸ“Š ESG Scores"):
201
+ gr.Button("Compute", variant="primary").click(esg_scores, outputs=gr.Markdown())
 
 
202
 
203
  with gr.Tab("🚨 Greenwashing"):
204
+ gr.Button("Detect", variant="primary").click(greenwashing, outputs=gr.Markdown())
 
 
205
 
206
  with gr.Tab("πŸ•ΈοΈ Discourse Graph"):
207
+ gr.Button("Show", variant="primary").click(graph, outputs=gr.Markdown())
 
 
208
 
209
  demo.launch()