Files changed (1) hide show
  1. app.py +350 -352
app.py CHANGED
@@ -1,384 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
- import json
3
- import re
4
- from typing import List, Dict, Any, Optional
 
 
 
 
5
  from fastapi import FastAPI, HTTPException
6
- from fastapi.responses import HTMLResponse
7
  from pydantic import BaseModel
8
- from dotenv import load_dotenv
9
- import requests
 
10
  from bs4 import BeautifulSoup
11
- from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
12
- from fastapi.middleware.cors import CORSMiddleware
13
-
14
-
15
- # ---------------- Lazy-loaded AI Models ----------------
16
- ZS_PIPE = None
17
- SENTE_MODEL = None
18
- GEMINI_CLIENT = None
19
-
20
- def get_zs_pipe():
21
- global ZS_PIPE
22
- if ZS_PIPE is None:
23
- try:
24
- from transformers import pipeline
25
- # much smaller model (~250MB vs 1.3GB)
26
- ZS_PIPE = pipeline("zero-shot-classification", model="typeform/distilbert-base-uncased-mnli")
27
- except Exception:
28
- ZS_PIPE = None
29
- return ZS_PIPE
 
 
 
 
 
 
 
 
 
30
 
31
- def get_sente_model():
32
- global SENTE_MODEL
33
- if SENTE_MODEL is None:
34
- try:
35
- from sentence_transformers import SentenceTransformer
36
- # smaller semantic similarity model (~80MB vs 400MB)
37
- SENTE_MODEL = SentenceTransformer("all-MiniLM-L6-v2")
38
- except Exception:
39
- SENTE_MODEL = None
40
- return SENTE_MODEL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- def get_gemini_client():
44
- global GEMINI_CLIENT
45
- if GEMINI_CLIENT is None:
46
- try:
47
- from google import genai
48
- GEMINI_CLIENT = genai.Client() # uses GEMINI_API_KEY from environment
49
- except Exception:
50
- GEMINI_CLIENT = None
51
- return GEMINI_CLIENT
52
-
53
- # ---------------- Env Vars ----------------
54
- load_dotenv()
55
- GNEWS_API_KEY = os.getenv("GNEWS_KEY")
56
- NEWSORG_API_KEY = os.getenv("NEWSORG_KEY")
57
- GEMINI_API_KEY = os.getenv("AI_API_KEY")
58
-
59
- app = FastAPI(title="Hybrid Misinformation Detector")
60
- # Define allowed origins
61
- origins = ["*"]
62
-
63
- # Add CORS middleware
64
- app.add_middleware(
65
- CORSMiddleware,
66
- allow_origins=origins, # List of allowed origins
67
- allow_credentials=True, # Allow cookies and credentials
68
- allow_methods=["*"], # Allow all HTTP methods (GET, POST, etc.)
69
- allow_headers=["*"], # Allow all headers
70
- )
71
- # ---------------- Models ----------------
72
- class VerifyRequest(BaseModel):
73
- text: str
74
- mode: Optional[str] = "fast" # fast, deep, hybrid
75
-
76
- # ---------------- Utilities ----------------
77
- def safe_headers():
78
- return {"User-Agent": "misinfo-tool/1.0 (+https://example.com)"}
79
-
80
- def domain_from_url(url: str) -> Optional[str]:
81
- if not url: return None
82
  try:
83
- m = re.search(r"https?://(?:www\.)?([^/]+)/?", url)
84
- if m:
85
- domain = m.group(1).lower()
86
- parts = domain.split('.')
87
- if len(parts) > 2:
88
- domain = ".".join(parts[-2:])
89
- return domain
90
- except Exception:
91
- return None
92
- return None
93
-
94
- # ---------------- Trusted / Blacklist ----------------
95
- TRUSTED_DOMAINS = {
96
- "bbc.co.uk","bbc.com","cnn.com","nytimes.com","reuters.com","apnews.com",
97
- "theguardian.com","npr.org","washingtonpost.com","wsj.com","usatoday.com",
98
- "bloomberg.com","aljazeera.com","msnbc.com","cnbc.com","foxnews.com",
99
- "scientificamerican.com","nature.com","sciencedaily.com"
100
- }
101
-
102
- BLACKLISTED_DOMAINS = {
103
- "imdb.com","youtube.com","wikipedia.org","fandom.com","comicbook.com",
104
- "rottentomatoes.com","hulu.com","netflix.com","ign.com","forbes.com"
105
- }
106
-
107
- UNWANTED_KEYWORDS = [
108
- "movie","film","episode","tv show","trailer","comic","manga","fan","fandom",
109
- "review","fiction","novel","fantasy","screenplay","actor","actress"
110
- ]
111
 
112
- # ---------------- NLP classify ----------------
113
- def classify_text_type(text: str) -> Dict[str, Any]:
114
- labels = ["news","rumor","fact","opinion","satire","unverifiable"]
115
- pipe = get_zs_pipe()
116
- if pipe:
117
- try:
118
- res = pipe(text, labels, multi_label=False, truncation=True)
119
- label = res["labels"][0]
120
- score = float(res["scores"][0])
121
- return {"type": label, "score": round(score,3), "scores": dict(zip(res["labels"], res["scores"]))}
122
- except Exception:
123
- pass
124
- t = text.lower()
125
- if any(k in t for k in ["according to","reported","breaking","news","announced"]):
126
- return {"type":"news","score":0.65,"scores":{}}
127
- if any(k in t for k in ["i think","in my opinion","i believe","should"]):
128
- return {"type":"opinion","score":0.7,"scores":{}}
129
- if any(k in t for k in ["joke","satire","not real","parody"]):
130
- return {"type":"satire","score":0.7,"scores":{}}
131
- if any(k in t for k in ["study shows","research","published","peer-reviewed"]):
132
- return {"type":"fact","score":0.6,"scores":{}}
133
- return {"type":"rumor","score":0.45,"scores":{}}
134
-
135
- def summarize_text(text: str, max_len=300) -> str:
136
- sentences = re.split(r'(?<=[.!?]) +', text.strip())
137
- summary = sentences[0] if sentences else text
138
- if len(summary) > max_len:
139
- summary = summary[:max_len].rsplit(' ',1)[0] + "..."
140
- return summary
141
-
142
- # ---------------- Search ----------------
143
- def fetch_gnews(query: str, max_results=6) -> List[Dict[str,str]]:
144
- if not GNEWS_API_KEY:
145
- return []
146
  try:
147
- url = "https://gnews.io/api/v4/search"
148
- params = {"q": query, "token": GNEWS_API_KEY, "max": max_results, "lang":"en"}
149
- r = requests.get(url, params=params, headers=safe_headers(), timeout=6)
150
- r.raise_for_status()
151
- js = r.json()
152
- return [{"title": a.get("title"), "url": a.get("url"), "source": a.get("source",{}).get("name"), "snippet": a.get("description")} for a in js.get("articles", [])[:max_results]]
153
- except Exception:
154
- return []
155
 
156
- def fetch_newsapi(query: str, max_results=6) -> List[Dict[str,str]]:
157
- if not NEWSORG_API_KEY:
158
- return []
 
 
159
  try:
160
- url = "https://newsapi.org/v2/everything"
161
- params = {"q": query, "pageSize": max_results, "apiKey": NEWSORG_API_KEY, "language":"en"}
162
- r = requests.get(url, params=params, headers=safe_headers(), timeout=6)
163
- r.raise_for_status()
164
- js = r.json()
165
- return [{"title": a.get("title"), "url": a.get("url"), "source": a.get("source",{}).get("name"), "snippet": a.get("description")} for a in js.get("articles", [])[:max_results]]
166
- except Exception:
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  return []
168
 
169
- def duckduckgo_search(query: str, max_results=8) -> List[Dict[str,str]]:
 
170
  try:
171
- url = "https://html.duckduckgo.com/html/"
172
- r = requests.post(url, data={"q": query}, headers=safe_headers(), timeout=6)
173
  r.raise_for_status()
174
- soup = BeautifulSoup(r.text, "html.parser")
175
  results = []
176
- for res in soup.select(".result__a")[:max_results]:
177
- title = res.get_text()
178
- href = res.get("href")
179
- snippet_node = res.find_parent().select_one(".result__snippet")
180
- snippet = snippet_node.get_text() if snippet_node else ""
181
- results.append({"title": title, "url": href, "source":None, "snippet": snippet})
 
 
 
182
  return results
183
- except Exception:
 
184
  return []
185
 
186
- # ---------------- Optimized fetch all sources ----------------
187
- def fetch_all_sources(query: str) -> List[Dict[str,str]]:
188
- with ThreadPoolExecutor(max_workers=3) as executor:
189
- futures = [
190
- executor.submit(fetch_gnews, query),
191
- executor.submit(fetch_newsapi, query),
192
- executor.submit(duckduckgo_search, query)
193
- ]
194
- results = []
195
- for f in futures:
196
- try:
197
- results.extend(f.result())
198
- except:
199
- pass
200
- return results
201
-
202
- # ---------------- Filtering ----------------
203
- def is_unwanted_snippet(snippet: str) -> bool:
204
- if not snippet: return False
205
- s = snippet.lower()
206
- return any(k in s for k in UNWANTED_KEYWORDS)
207
-
208
- def filter_sources(sources: List[Dict[str,str]]) -> List[Dict[str,str]]:
209
- kept, seen = [], set()
210
- for s in sources:
211
- url = s.get("url") or ""
212
- if not url or url in seen: continue
213
- seen.add(url)
214
- domain = domain_from_url(url)
215
- s["domain"] = domain or ""
216
- if not domain: continue
217
- if domain in BLACKLISTED_DOMAINS: continue
218
- if domain not in TRUSTED_DOMAINS: continue
219
- if is_unwanted_snippet(s.get("snippet","")) or is_unwanted_snippet(s.get("title","")): continue
220
- kept.append(s)
221
- return kept
222
-
223
- # ---------------- Semantic filtering ----------------
224
- def compute_similarity(args):
225
- claim_emb, snippet = args
226
- model = get_sente_model()
227
- if not model: return 0.0
228
- snippet_emb = model.encode(snippet, convert_to_tensor=True)
229
- from sentence_transformers import util
230
- return util.cos_sim(claim_emb, snippet_emb).item()
231
-
232
- def semantic_filter_parallel(claim: str, sources: List[Dict[str,str]], threshold=0.3) -> List[Dict[str,str]]:
233
- model = get_sente_model()
234
- if not model or not sources:
235
- return sources
236
-
237
- claim_emb = model.encode(claim, convert_to_tensor=True)
238
- args = [(claim_emb, s["snippet"]) for s in sources]
239
-
240
- filtered = []
241
- with ProcessPoolExecutor(max_workers=min(4, len(sources))) as executor:
242
- sims = list(executor.map(compute_similarity, args))
243
-
244
- for s, sim in zip(sources, sims):
245
- if sim >= threshold:
246
- filtered.append(s)
247
- return filtered
248
-
249
- # ---------------- Evidence summary ----------------
250
- def summarize_evidence(sources: List[Dict[str,str]], max_chars=800) -> str:
251
- if not sources:
252
- return "No credible news sources found."
253
- parts = []
254
- for s in sources[:8]:
255
- t = s.get("title") or ""
256
- snip = s.get("snippet") or ""
257
- domain = s.get("domain") or domain_from_url(s.get("url","")) or ""
258
- parts.append(f"{t} ({domain}) — {snip}")
259
- res = "\n".join(parts)
260
- if len(res) > max_chars:
261
- return res[:max_chars].rsplit(" ",1)[0] + "..."
262
- return res
263
-
264
- # ---------------- Fusion ----------------
265
- def fuse_scores(fast_conf: float, deep_outcome: Optional[str], evidence_count: int) -> Dict[str,Any]:
266
- base = fast_conf*0.5 + min(evidence_count/5.0,1.0)*0.5
267
- if deep_outcome and deep_outcome.lower() in ["false","misleading"]:
268
- base *= 0.7
269
- score = int(round(max(0, min(1, base)) * 100))
270
- color = "green" if score >= 70 else "yellow" if score >= 40 else "red"
271
- return {"score":score, "color":color}
272
-
273
- # ---------------- Fact Check API ----------------
274
- def factcheck_claim(claim: str) -> Dict[str,Any]:
275
- api_key = "AIzaSyB0A-MIHs8qkjYTWE-TnoLw46KplX-Ihjs"
276
- url = "https://factchecktools.googleapis.com/v1alpha1/claims:search"
277
- params = {"query": claim, "key": api_key, "languageCode": "en", "pageSize": 5}
278
  try:
279
- r = requests.get(url, params=params, headers=safe_headers(), timeout=6)
280
  r.raise_for_status()
281
  js = r.json()
282
- claims = js.get("claims", [])
283
- results = []
284
- for c in claims:
285
- claimant = c.get("claimant", "Unknown")
286
- text = c.get("text", "")
287
- claimReview = c.get("claimReview", [])
288
- for review in claimReview:
289
- publisher = review.get("publisher", {}).get("name")
290
- url = review.get("url")
291
- title = review.get("title")
292
- review_rating = review.get("textualRating")
293
- results.append({
294
- "claimant": claimant,
295
- "text": text,
296
- "publisher": publisher,
297
- "url": url,
298
- "title": title,
299
- "rating": review_rating
300
- })
301
- outcome = "Unverified" if not results else results[0].get("rating", "Unverified")
302
- return {"outcome": outcome, "source": results}
303
  except Exception as e:
304
- return {"outcome": "Error", "source": [], "error": str(e)}
305
-
306
- # ---------------- API ----------------
307
- @app.post("/verify")
308
- async def verify(req: VerifyRequest):
309
- claim = (req.text or "").strip()
310
- mode = (req.mode or "fast").lower()
311
- if not claim:
312
- raise HTTPException(status_code=400, detail="Empty claim")
313
-
314
- # Step 1 classify
315
- text_type_res = classify_text_type(claim)
316
- stored_type = text_type_res["type"]
317
-
318
- # Step 2 summarize
319
- user_summary = summarize_text(claim)
320
-
321
- # Step 3 search
322
- query = f"{user_summary} site:bbc.com OR site:cnn.com OR site:reuters.com OR site:apnews.com"
323
- all_raw = fetch_all_sources(query)
324
-
325
- # Step 4 filter
326
- filtered = filter_sources(all_raw)
327
 
328
- # Step 4b semantic filter
329
- filtered = semantic_filter_parallel(claim, filtered)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
331
- evidence_summary = summarize_evidence(filtered)
 
 
 
 
 
 
 
332
 
333
- # Step 5 fast classification
334
- fast_label, fast_conf = "Unverifiable", 0.4
335
- pipe = get_zs_pipe()
336
- if pipe:
 
 
 
 
 
 
 
 
 
 
 
 
337
  try:
338
- cls = pipe(claim, ["True","False","Misleading","Unverifiable"], multi_label=False, truncation=True)
339
- fast_label = cls["labels"][0]
340
- fast_conf = float(cls["scores"][0])
341
- except:
342
- pass
343
-
344
- # Step 6 deep (Gemini AI)
345
- deep_result = None
346
- if mode in ["deep","hybrid"]:
347
- client = get_gemini_client()
348
- if client:
349
- try:
350
- prompt = f'Verify claim: "{claim}". Output JSON: outcome, explanation, comparison, takeaways.'
351
- resp = client.models.generate_content(model="gemini-2.5-flash", contents=prompt)
352
- deep_result = json.loads(resp.text)
353
- except:
354
- deep_result = {"outcome":"Unverifiable","explanation":"Gemini API error","takeaways":["Check credible sources"]}
 
 
 
 
 
 
 
 
355
  else:
356
- deep_result = {"outcome":"Unverifiable","explanation":"Demo mode: API missing","takeaways":["Check credible sources"]}
357
-
358
- # Step 7 fact-check API
359
- factcheck = factcheck_claim(claim)
360
 
361
- # Step 8 fuse scores
362
- deep_outcome = deep_result.get("outcome") if deep_result else None
363
- fuse = fuse_scores(fast_conf, deep_outcome, len(filtered))
364
 
365
- return {
366
- "claim": claim,
367
- "text_type": stored_type,
368
- "text_type_scores": text_type_res.get("scores", {}),
369
- "user_summary": user_summary,
370
- "fast": {"label": fast_label, "confidence": round(fast_conf,3)},
371
- "evidence_count_raw": len(all_raw),
372
- "evidence_count_filtered": len(filtered),
373
- "evidence": filtered,
374
- "evidence_summary": evidence_summary,
375
- "deep": deep_result or {},
376
- "factcheck": factcheck,
377
- "credibility": fuse
378
- }
379
 
380
- # ---------------- Frontend ----------------
381
-
382
- if __name__ == "__main__":
383
- import uvicorn
384
- uvicorn.run("app:app", host="0.0.0.0", port=int(os.getenv("PORT","1748")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ """
3
+ Real-time Misinformation Detection FastAPI backend.
4
+
5
+ Features:
6
+ - Accepts JSON input with any of: text, url, image_base64
7
+ - Fetches and sanitizes URL content
8
+ - Runs OCR on images (pytesseract)
9
+ - Detects language (langdetect)
10
+ - Uses HuggingFace zero-shot classification to tag categories:
11
+ ['fake_news','satire','conspiracy','propaganda','deepfake','clickbait','misleading','true']
12
+ - Calls Google Fact Check API and Google Custom Search (if API keys provided)
13
+ - Aggregates evidence and returns a JSON response with tags, confidences, and sources.
14
+
15
+ Notes:
16
+ - Install Tesseract (system package) for OCR to work.
17
+ - Provide GOOGLE_API_KEY and GOOGLE_CSE_ID as env vars to enable Google queries.
18
+ - For production: convert models to ONNX/quantize for lower latency, add rate-limiting & caching (Redis).
19
+ """
20
+
21
  import os
22
+ import io
23
+ import base64
24
+ import asyncio
25
+ import logging
26
+ from typing import Optional, List, Dict, Any
27
+
28
+ import requests
29
  from fastapi import FastAPI, HTTPException
 
30
  from pydantic import BaseModel
31
+ from PIL import Image
32
+ import pytesseract
33
+ from langdetect import detect, LangDetectException
34
  from bs4 import BeautifulSoup
35
+ from newspaper import Article
36
+
37
+ from transformers import pipeline
38
+
39
+ # Configure logging
40
+ logging.basicConfig(level=logging.INFO)
41
+ logger = logging.getLogger("misinfo-backend")
42
+
43
+ # Load environment variables for optional integrations
44
+ GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY") # Google API key
45
+ GOOGLE_CSE_ID = os.environ.get("GOOGLE_CSE_ID") # Programmable Search Engine ID
46
+ GOOGLE_FACTCHECK_KEY = os.environ.get("GOOGLE_FACTCHECK_KEY") # (if separate)
47
+
48
+ # Initialize FastAPI
49
+ app = FastAPI(title="Misinformation Detection API", version="0.1")
50
+
51
+ # Initialize HF pipelines (zero-shot and claim detection)
52
+ # Zero-shot classifier to tag categories. Candidate labels chosen for the task.
53
+ LABELS = [
54
+ "fake_news",
55
+ "satire",
56
+ "conspiracy",
57
+ "propaganda",
58
+ "deepfake",
59
+ "clickbait",
60
+ "misleading",
61
+ "true"
62
+ ]
63
 
64
+ # One-time model loads - may take time on first import
65
+ logger.info("Loading HuggingFace zero-shot classification pipeline...")
66
+ zs_pipeline = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
67
+
68
+ # Optional: you can provide a lighter model string for faster inference:
69
+ # zs_pipeline = pipeline("zero-shot-classification", model="typeform/distilbert-base-uncased-mnli")
70
+
71
+ # Define request & response schemas
72
+ class AnalyzeRequest(BaseModel):
73
+ text: Optional[str] = None
74
+ url: Optional[str] = None
75
+ image_base64: Optional[str] = None # base64-encoded image (PNG/JPEG)
76
+
77
+ class EvidenceItem(BaseModel):
78
+ source: str
79
+ title: Optional[str] = None
80
+ snippet: Optional[str] = None
81
+ url: Optional[str] = None
82
+ verdict: Optional[str] = None
83
+
84
+ class AnalyzeResponse(BaseModel):
85
+ input_type: str
86
+ language: Optional[str]
87
+ text: Optional[str]
88
+ tags: Dict[str, float]
89
+ evidence: List[EvidenceItem]
90
+ notes: Optional[str] = None
91
+
92
+ # -----------------------
93
+ # Helper utilities
94
+ # -----------------------
95
+ def fetch_url_text(url: str, timeout: float = 8.0) -> str:
96
+ """
97
+ Fetches a URL and extracts main article text using newspaper3k as fallback to BeautifulSoup.
98
+ """
99
+ try:
100
+ logger.info("Fetching URL: %s", url)
101
+ article = Article(url)
102
+ article.download()
103
+ article.parse()
104
+ text = article.text
105
+ if text and len(text) > 50:
106
+ return text
107
+ except Exception as e:
108
+ logger.debug("Newspaper failed: %s", e)
109
 
110
+ # Fallback: simple fetch and extract <p> text
111
+ try:
112
+ resp = requests.get(url, timeout=timeout, headers={"User-Agent": "misinfo-bot/1.0"})
113
+ resp.raise_for_status()
114
+ soup = BeautifulSoup(resp.text, "html.parser")
115
+ paragraphs = [p.get_text(strip=True) for p in soup.find_all("p")]
116
+ joined = "\n\n".join([p for p in paragraphs if p])
117
+ return joined[:10000] # limit to 10k chars
118
+ except Exception as e:
119
+ logger.exception("Failed to fetch or parse URL: %s", e)
120
+ raise
121
 
122
+ def ocr_image_from_base64(b64: str) -> str:
123
+ """
124
+ Decode base64 image and run OCR (pytesseract).
125
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  try:
127
+ image_data = base64.b64decode(b64)
128
+ img = Image.open(io.BytesIO(image_data)).convert("RGB")
129
+ # Optionally resize for better OCR
130
+ w, h = img.size
131
+ max_dim = 1600
132
+ if max(w, h) > max_dim:
133
+ scale = max_dim / max(w, h)
134
+ img = img.resize((int(w * scale), int(h * scale)))
135
+ text = pytesseract.image_to_string(img)
136
+ return text.strip()
137
+ except Exception as e:
138
+ logger.exception("OCR failed: %s", e)
139
+ return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
+ def detect_language_of_text(text: str) -> Optional[str]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  try:
143
+ lang = detect(text)
144
+ return lang
145
+ except LangDetectException:
146
+ return None
 
 
 
 
147
 
148
+ async def run_zero_shot(text: str, labels: List[str]) -> Dict[str, float]:
149
+ """
150
+ Run zero-shot classifier and return label->score mapping.
151
+ This is synchronous pipeline but we wrap it for consistency.
152
+ """
153
  try:
154
+ res = zs_pipeline(text, candidate_labels=labels, multi_label=True)
155
+ # res: {'sequence':..., 'labels': [...], 'scores': [...]}
156
+ return {label: float(score) for label, score in zip(res["labels"], res["scores"])}
157
+ except Exception as e:
158
+ logger.exception("Zero-shot classification failed: %s", e)
159
+ return {label: 0.0 for label in labels}
160
+
161
+ # -----------------------
162
+ # External Fact-check / Search Integrations (optional)
163
+ # -----------------------
164
+ def query_google_factcheck(claim: str) -> List[Dict[str, Any]]:
165
+ """
166
+ Wrapper for Google Fact Check Tools API.
167
+ Returns a list of claimReview-like objects with {source, title, snippet, url, verdict}.
168
+ Requires GOOGLE_API_KEY (and optionally GOOGLE_FACTCHECK_KEY).
169
+ API docs: https://developers.google.com/fact-check/tools/api
170
+ """
171
+ key = GOOGLE_API_KEY or GOOGLE_FACTCHECK_KEY
172
+ if not key:
173
+ logger.debug("Google Fact Check key not configured.")
174
  return []
175
 
176
+ endpoint = "https://factchecktools.googleapis.com/v1alpha1/claims:search"
177
+ params = {"query": claim, "key": key}
178
  try:
179
+ r = requests.get(endpoint, params=params, timeout=6)
 
180
  r.raise_for_status()
181
+ payload = r.json()
182
  results = []
183
+ for item in payload.get("claims", []):
184
+ for review in item.get("claimReview", []):
185
+ results.append({
186
+ "source": review.get("publisher", {}).get("name"),
187
+ "title": item.get("text"),
188
+ "snippet": review.get("title") or review.get("textualRating"),
189
+ "url": review.get("url"),
190
+ "verdict": review.get("textualRating") or review.get("title")
191
+ })
192
  return results
193
+ except Exception as e:
194
+ logger.exception("Google Fact Check API error: %s", e)
195
  return []
196
 
197
+ def google_search_evidence(query: str, num: int = 3) -> List[Dict[str, Any]]:
198
+ """
199
+ Use Google Custom Search (Programmable Search Engine) to query news/fact-check sites.
200
+ Requires GOOGLE_API_KEY and GOOGLE_CSE_ID.
201
+ """
202
+ if not (GOOGLE_API_KEY and GOOGLE_CSE_ID):
203
+ logger.debug("Google Search/CSE not configured.")
204
+ return []
205
+
206
+ endpoint = "https://www.googleapis.com/customsearch/v1"
207
+ params = {
208
+ "key": GOOGLE_API_KEY,
209
+ "cx": GOOGLE_CSE_ID,
210
+ "q": query,
211
+ "num": num,
212
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  try:
214
+ r = requests.get(endpoint, params=params, timeout=6)
215
  r.raise_for_status()
216
  js = r.json()
217
+ items = []
218
+ for it in js.get("items", []):
219
+ items.append({
220
+ "source": it.get("displayLink"),
221
+ "title": it.get("title"),
222
+ "snippet": it.get("snippet"),
223
+ "url": it.get("link"),
224
+ })
225
+ return items
 
 
 
 
 
 
 
 
 
 
 
 
226
  except Exception as e:
227
+ logger.exception("Google Custom Search error: %s", e)
228
+ return []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
230
+ # -----------------------
231
+ # Simple deepfake image detector placeholder
232
+ # -----------------------
233
+ def detect_image_manipulation(image_base64: str) -> Dict[str, Any]:
234
+ """
235
+ Placeholder stub for image deepfake/manipulation detection.
236
+ For production: replace with a trained CV model (e.g. EfficientNet/TSA/ViT based).
237
+ Returns: {'deepfake_confidence': float, 'notes': str}
238
+ """
239
+ # TODO: integrate a real deepfake model (e.g., DFDC-trained classifier).
240
+ # Right now: always return low confidence.
241
+ return {"deepfake_confidence": 0.02, "notes": "placeholder detector; integrate a trained model for production."}
242
+
243
+ # -----------------------
244
+ # Main analysis orchestration
245
+ # -----------------------
246
+ async def analyze_text_pipeline(text: str) -> Dict[str, Any]:
247
+ """
248
+ Run language detection, claim extraction (simple split), zero-shot classification,
249
+ fact-check queries, and return aggregated results.
250
+ """
251
+ # Detect language
252
+ language = detect_language_of_text(text) or "unknown"
253
+
254
+ # (Simple) Claim extraction: split into sentences and pick top-N sentences by length
255
+ # In production, use a proper claim-detection model (ClaimBuster).
256
+ sentences = [s.strip() for s in text.split('.') if s.strip()]
257
+ # Heuristic: pick up to 3 longest sentences as "claims"
258
+ claims = sorted(sentences, key=lambda s: len(s), reverse=True)[:3]
259
+ if not claims:
260
+ claims = [text[:300]]
261
+
262
+ # Run zero-shot classification on the whole text
263
+ zs_scores = await run_zero_shot(text, LABELS)
264
+
265
+ # Query fact-check APIs for top claims (async runs)
266
+ loop = asyncio.get_event_loop()
267
+ tasks = [loop.run_in_executor(None, query_google_factcheck, claim) for claim in claims]
268
+ tasks += [loop.run_in_executor(None, google_search_evidence, claim) for claim in claims]
269
+ results = await asyncio.gather(*tasks, return_exceptions=True)
270
+
271
+ evidence = []
272
+ # results structure: [fc_claim1, cs_claim1, fc_claim2, cs_claim2, ...]
273
+ for res in results:
274
+ if isinstance(res, Exception):
275
+ continue
276
+ for r in res:
277
+ evidence.append(r)
278
+
279
+ # Build summary trust/score: simple fusion of highest zs label for 'true' vs others
280
+ # Compute "misinfo_score" = max score of any misinfo label (not 'true')
281
+ misinfo_labels = [l for l in LABELS if l != "true"]
282
+ misinfo_score = max([zs_scores.get(l, 0.0) for l in misinfo_labels]) if zs_scores else 0.0
283
+ true_score = zs_scores.get("true", 0.0)
284
+
285
+ notes = f"Claims extracted: {len(claims)}. Misinfo_score={misinfo_score:.3f}, true_score={true_score:.3f}"
286
 
287
+ return {
288
+ "language": language,
289
+ "text": text,
290
+ "zs_scores": zs_scores,
291
+ "claims": claims,
292
+ "evidence": evidence,
293
+ "notes": notes
294
+ }
295
 
296
+ # -----------------------
297
+ # API Endpoint
298
+ # -----------------------
299
+ @app.post("/analyze", response_model=AnalyzeResponse)
300
+ async def analyze(req: AnalyzeRequest):
301
+ if not (req.text or req.url or req.image_base64):
302
+ raise HTTPException(status_code=400, detail="Provide at least one of text, url, or image_base64.")
303
+
304
+ # Determine input type and get primary text (if any)
305
+ input_text = None
306
+ input_type = "unknown"
307
+ evidence_items: List[EvidenceItem] = []
308
+
309
+ # 1) If URL provided, fetch and extract text
310
+ if req.url:
311
+ input_type = "url"
312
  try:
313
+ input_text = fetch_url_text(req.url)
314
+ except Exception as e:
315
+ raise HTTPException(status_code=500, detail=f"Failed fetching URL: {e}")
316
+
317
+ # 2) If image provided, run OCR and deepfake check
318
+ if req.image_base64:
319
+ # If both image and url/text provided, we will aggregate them.
320
+ input_type = "image" if input_type == "unknown" else input_type + "+image"
321
+ ocr_text = ocr_image_from_base64(req.image_base64)
322
+ df_result = detect_image_manipulation(req.image_base64)
323
+ # Add an evidence entry for image analysis
324
+ evidence_items.append(EvidenceItem(source="local_image_analysis", title="Image analysis", snippet=str(df_result), url=None))
325
+ # Merge OCR text with overall text (if none yet, use OCR)
326
+ if ocr_text:
327
+ # Append OCR text to input_text (prefer original text if already present)
328
+ if input_text:
329
+ input_text = input_text + "\n\n[OCR TEXT]\n" + ocr_text
330
+ else:
331
+ input_text = ocr_text
332
+
333
+ # 3) If direct text provided
334
+ if req.text:
335
+ input_type = "text" if input_type == "unknown" else input_type + "+text"
336
+ if input_text:
337
+ input_text = input_text + "\n\n" + req.text
338
  else:
339
+ input_text = req.text
 
 
 
340
 
341
+ # If still no text (e.g., image had no OCR), set to empty string
342
+ if not input_text:
343
+ input_text = ""
344
 
345
+ # Run the main text pipeline
346
+ pipeline_result = await analyze_text_pipeline(input_text)
 
 
 
 
 
 
 
 
 
 
 
 
347
 
348
+ # Convert evidence dictionaries to EvidenceItem dataclass list
349
+ for ev in pipeline_result.get("evidence", []):
350
+ try:
351
+ evidence_items.append(EvidenceItem(
352
+ source=ev.get("source") or "unknown",
353
+ title=ev.get("title"),
354
+ snippet=ev.get("snippet"),
355
+ url=ev.get("url"),
356
+ verdict=ev.get("verdict")
357
+ ))
358
+ except Exception:
359
+ continue
360
+
361
+ # Assemble tag scores (HuggingFace labels -> mapping)
362
+ zscores = pipeline_result.get("zs_scores", {})
363
+ # Normalize to include all labels with default 0.0
364
+ tags = {label: float(zscores.get(label, 0.0)) for label in LABELS}
365
+
366
+ # Response
367
+ resp = AnalyzeResponse(
368
+ input_type=input_type,
369
+ language=pipeline_result.get("language"),
370
+ text=(pipeline_result.get("text")[:10000] if pipeline_result.get("text") else None),
371
+ tags=tags,
372
+ evidence=evidence_items,
373
+ notes=pipeline_result.get("notes")
374
+ )
375
+ return resp
376
+
377
+ # -----------------------
378
+ # Simple root health endpoint
379
+ # -----------------------
380
+ @app.get("/")
381
+ def root():
382
+ return {"status": "ok", "service": "misinfo-detector", "version": "0.1"}