Spaces:
Sleeping
Sleeping
Commit Β·
540b123
1
Parent(s): 3a02fc0
Initial deployment
Browse files- Dockerfile +18 -0
- apisetup.py +93 -0
- conflictdetection.py +309 -0
- dataretrieval.py +247 -0
- main.py +184 -0
- prototype.py +527 -0
- querygeneration.py +55 -0
- requirements.txt +20 -0
- similarity.py +115 -0
- static/looks.css +1075 -0
- templates/index.html +771 -0
Dockerfile
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cat > ~/FactCheckAI/Dockerfile << 'EOF'
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
WORKDIR /app
|
| 5 |
+
|
| 6 |
+
RUN apt-get update && apt-get install -y build-essential && rm -rf /var/lib/apt/lists/*
|
| 7 |
+
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 10 |
+
|
| 11 |
+
RUN python -m spacy download en_core_web_trf
|
| 12 |
+
|
| 13 |
+
COPY . .
|
| 14 |
+
|
| 15 |
+
EXPOSE 7860
|
| 16 |
+
|
| 17 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
| 18 |
+
EOF
|
apisetup.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
Collecting all the links through api
|
| 3 |
+
'''
|
| 4 |
+
import requests, json
|
| 5 |
+
from langdetect import detect, LangDetectException
|
| 6 |
+
# super dev
|
| 7 |
+
|
| 8 |
+
class Apicaller():
|
| 9 |
+
def __init__(self, key):
|
| 10 |
+
self.api = key
|
| 11 |
+
|
| 12 |
+
def superdev(self, keywords: list[str]):
|
| 13 |
+
|
| 14 |
+
if not isinstance(keywords, list):
|
| 15 |
+
raise ValueError("Invalid data type")
|
| 16 |
+
|
| 17 |
+
attempts = 0
|
| 18 |
+
links = []
|
| 19 |
+
|
| 20 |
+
for keyword in keywords:
|
| 21 |
+
|
| 22 |
+
if len(keyword) < 3:
|
| 23 |
+
continue
|
| 24 |
+
|
| 25 |
+
url = "https://google.serper.dev/news"
|
| 26 |
+
|
| 27 |
+
payload = {
|
| 28 |
+
"q": keyword,
|
| 29 |
+
"gl": "in",
|
| 30 |
+
"hl": "en"
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
headers = {
|
| 34 |
+
'X-API-KEY': self.api,
|
| 35 |
+
'Content-Type': 'application/json'
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
# FIX 1: requests.request() itself can throw RequestException (timeout,
|
| 39 |
+
# DNS failure, no network) before .status_code is ever accessible.
|
| 40 |
+
# Catch it, log it, count it as a failed attempt, and move on.
|
| 41 |
+
try:
|
| 42 |
+
response = requests.request("POST", url, headers=headers, json=payload)
|
| 43 |
+
except requests.exceptions.RequestException as e:
|
| 44 |
+
print(f"[Apicaller] Network error for keyword '{keyword}': {e}")
|
| 45 |
+
attempts += 1
|
| 46 |
+
if attempts >= len(keywords) / 2:
|
| 47 |
+
print("Too many network failures, exiting...")
|
| 48 |
+
break
|
| 49 |
+
continue
|
| 50 |
+
|
| 51 |
+
if response.status_code != 200:
|
| 52 |
+
# Wrong api key or something else
|
| 53 |
+
print(f"Request failed with status code: {response.status_code}")
|
| 54 |
+
attempts += 1
|
| 55 |
+
|
| 56 |
+
if attempts >= len(keywords) / 2:
|
| 57 |
+
print("Wrrong api key or somthing else exiting...")
|
| 58 |
+
break
|
| 59 |
+
|
| 60 |
+
continue
|
| 61 |
+
|
| 62 |
+
# FIX 2: response_json["news"] raises KeyError when the API returns
|
| 63 |
+
# a different top-level structure (e.g. an error payload with no "news" key).
|
| 64 |
+
# Skip the keyword and continue rather than crashing the whole call.
|
| 65 |
+
try:
|
| 66 |
+
response_json = response.json()
|
| 67 |
+
news_items = response_json["news"]
|
| 68 |
+
except (KeyError, ValueError) as e:
|
| 69 |
+
print(f"[Apicaller] Unexpected response structure for keyword '{keyword}': {e}")
|
| 70 |
+
continue
|
| 71 |
+
|
| 72 |
+
for result in news_items:
|
| 73 |
+
|
| 74 |
+
# FIX 3: result["title"] and result["link"] raise KeyError when a
|
| 75 |
+
# news entry is missing expected fields. Skip the malformed entry.
|
| 76 |
+
try:
|
| 77 |
+
title = result["title"]
|
| 78 |
+
link = result["link"]
|
| 79 |
+
except KeyError as e:
|
| 80 |
+
print(f"[Apicaller] Missing field {e} in news entry β skipping entry.")
|
| 81 |
+
continue
|
| 82 |
+
|
| 83 |
+
# FIX 4: langdetect.detect() raises LangDetectException on very short,
|
| 84 |
+
# numeric, or character-ambiguous titles. Treat detection failure as
|
| 85 |
+
# non-English so the link is simply not added (safe, conservative default).
|
| 86 |
+
try:
|
| 87 |
+
if detect(title) == "en":
|
| 88 |
+
links.append(link)
|
| 89 |
+
except LangDetectException as e:
|
| 90 |
+
print(f"[Apicaller] Language detection failed for title '{title[:40]}': {e} β skipping.")
|
| 91 |
+
continue
|
| 92 |
+
|
| 93 |
+
return links
|
conflictdetection.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import json
|
| 3 |
+
import torch
|
| 4 |
+
import spacy
|
| 5 |
+
from sentence_transformers import SentenceTransformer, util
|
| 6 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, logging
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from typing import List, Tuple
|
| 9 |
+
|
| 10 |
+
logging.set_verbosity_error()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Spacy model β loaded once at module level with proper error handling
|
| 14 |
+
|
| 15 |
+
_SPACY_MODEL_NAME = "en_core_web_trf"
|
| 16 |
+
|
| 17 |
+
def _load_spacy_model(model_name: str):
|
| 18 |
+
try:
|
| 19 |
+
return spacy.load(model_name)
|
| 20 |
+
|
| 21 |
+
except OSError:
|
| 22 |
+
print(f"[ConflictDetector] spaCy model '{model_name}' not found. Downloading...")
|
| 23 |
+
try:
|
| 24 |
+
spacy.cli.download(model_name)
|
| 25 |
+
print(f"[ConflictDetector] Model '{model_name}' downloaded successfully.")
|
| 26 |
+
return spacy.load(model_name)
|
| 27 |
+
except Exception as e:
|
| 28 |
+
raise RuntimeError(f"[ConflictDetector] spaCy model download failed: {e}") from e
|
| 29 |
+
|
| 30 |
+
except Exception as e:
|
| 31 |
+
raise RuntimeError(f"[ConflictDetector] spaCy model loading failed: {e}") from e
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
_spacy_model = _load_spacy_model(_SPACY_MODEL_NAME)
|
| 36 |
+
except RuntimeError as e:
|
| 37 |
+
print(f"[ConflictDetector] WARNING: spaCy model could not be loaded: {e}")
|
| 38 |
+
print("[ConflictDetector] NER-based conflict classification will fall back to 'Factual Conflict'.")
|
| 39 |
+
_spacy_model = None
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Data class
|
| 43 |
+
|
| 44 |
+
@dataclass
|
| 45 |
+
class Conflict:
|
| 46 |
+
sentence_a: str
|
| 47 |
+
sentence_b: str
|
| 48 |
+
conflict_type: str
|
| 49 |
+
severity: str
|
| 50 |
+
confidence: float
|
| 51 |
+
contradiction_score: float
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# ConflictDetector
|
| 55 |
+
|
| 56 |
+
class ConflictDetector:
|
| 57 |
+
def __init__(self, strictness: float = 0.7):
|
| 58 |
+
if not (0.0 <= strictness <= 1.0):
|
| 59 |
+
raise ValueError(f"strictness must be between 0.0 and 1.0, got {strictness}")
|
| 60 |
+
|
| 61 |
+
self.strictness = strictness
|
| 62 |
+
|
| 63 |
+
print("[ConflictDetector] Loading semantic similarity model...")
|
| 64 |
+
try:
|
| 65 |
+
self.similarity_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 66 |
+
except Exception as e:
|
| 67 |
+
raise RuntimeError(f"[ConflictDetector] Failed to load similarity model: {e}") from e
|
| 68 |
+
|
| 69 |
+
print("[ConflictDetector] Loading NLI contradiction detection model...")
|
| 70 |
+
try:
|
| 71 |
+
_nli_model_name = "cross-encoder/nli-deberta-v3-base"
|
| 72 |
+
self.nli_tokenizer = AutoTokenizer.from_pretrained(_nli_model_name)
|
| 73 |
+
self.nli_model = AutoModelForSequenceClassification.from_pretrained(_nli_model_name)
|
| 74 |
+
self.nli_model.eval()
|
| 75 |
+
except Exception as e:
|
| 76 |
+
raise RuntimeError(f"[ConflictDetector] Failed to load NLI model: {e}") from e
|
| 77 |
+
|
| 78 |
+
print("[ConflictDetector] Loading NER model...")
|
| 79 |
+
self.nlp = _spacy_model
|
| 80 |
+
|
| 81 |
+
self.ignore_patterns = [
|
| 82 |
+
r"\b(published|updated|posted|written by|author|reporter|editor)\b",
|
| 83 |
+
r"\b\d{1,2}:\d{2}\s?(am|pm|AM|PM)\b",
|
| 84 |
+
r"\bfollow us\b|\bsubscribe\b|\bclick here\b",
|
| 85 |
+
r"\bcopyright\b|\ball rights reserved\b",
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
print("[ConflictDetector] All models loaded.\n")
|
| 89 |
+
|
| 90 |
+
def split_into_claims(self, text: str) -> List[str]:
|
| 91 |
+
if not isinstance(text, str) or not text.strip():
|
| 92 |
+
return []
|
| 93 |
+
|
| 94 |
+
sentences = re.split(r"(?<=[.!?])\s+", text.strip())
|
| 95 |
+
|
| 96 |
+
claims = []
|
| 97 |
+
for sent in sentences:
|
| 98 |
+
sent = sent.strip()
|
| 99 |
+
if len(sent.split()) < 6:
|
| 100 |
+
continue
|
| 101 |
+
if any(re.search(p, sent, re.IGNORECASE) for p in self.ignore_patterns):
|
| 102 |
+
continue
|
| 103 |
+
claims.append(sent)
|
| 104 |
+
|
| 105 |
+
return claims
|
| 106 |
+
|
| 107 |
+
def find_similar_pairs(self, claims_a, claims_b):
|
| 108 |
+
if not claims_a or not claims_b:
|
| 109 |
+
return []
|
| 110 |
+
|
| 111 |
+
similarity_threshold = 0.75 - (self.strictness * 0.25)
|
| 112 |
+
|
| 113 |
+
try:
|
| 114 |
+
embeddings_a = self.similarity_model.encode(claims_a, batch_size=24, convert_to_tensor=True)
|
| 115 |
+
embeddings_b = self.similarity_model.encode(claims_b, batch_size=24, convert_to_tensor=True)
|
| 116 |
+
except Exception as e:
|
| 117 |
+
print(f"[ConflictDetector] Encoding failed during similarity search: {e}")
|
| 118 |
+
return []
|
| 119 |
+
|
| 120 |
+
cosine_scores = util.cos_sim(embeddings_a, embeddings_b)
|
| 121 |
+
|
| 122 |
+
pairs = []
|
| 123 |
+
for i in range(len(claims_a)):
|
| 124 |
+
for j in range(len(claims_b)):
|
| 125 |
+
score = cosine_scores[i][j].item()
|
| 126 |
+
if score >= similarity_threshold:
|
| 127 |
+
pairs.append((claims_a[i], claims_b[j], score))
|
| 128 |
+
|
| 129 |
+
pairs.sort(key=lambda x: x[2], reverse=True)
|
| 130 |
+
return pairs
|
| 131 |
+
|
| 132 |
+
def check_contradiction(self, sentence_a: str, sentence_b: str) -> float:
|
| 133 |
+
try:
|
| 134 |
+
inputs = self.nli_tokenizer(
|
| 135 |
+
sentence_a, sentence_b,
|
| 136 |
+
return_tensors="pt", truncation=True, max_length=512,
|
| 137 |
+
)
|
| 138 |
+
with torch.no_grad():
|
| 139 |
+
outputs = self.nli_model(**inputs)
|
| 140 |
+
probs = torch.softmax(outputs.logits, dim=-1)
|
| 141 |
+
return probs[0][0].item()
|
| 142 |
+
except Exception as e:
|
| 143 |
+
print(f"[ConflictDetector] NLI check failed for pair: {e}")
|
| 144 |
+
return 0.0
|
| 145 |
+
|
| 146 |
+
def classify_conflict_type(self, sentence_a: str, sentence_b: str) -> str:
|
| 147 |
+
try:
|
| 148 |
+
doc_a = self.nlp(sentence_a)
|
| 149 |
+
doc_b = self.nlp(sentence_b)
|
| 150 |
+
except Exception as e:
|
| 151 |
+
print(f"[ConflictDetector] NER classification failed: {e}")
|
| 152 |
+
return "Factual Conflict"
|
| 153 |
+
|
| 154 |
+
entities_a = {ent.label_: ent.text for ent in doc_a.ents}
|
| 155 |
+
entities_b = {ent.label_: ent.text for ent in doc_b.ents}
|
| 156 |
+
|
| 157 |
+
entity_type_map = {
|
| 158 |
+
"PERSON": "Name Mismatch",
|
| 159 |
+
"ORG": "Organization Mismatch",
|
| 160 |
+
"GPE": "Location Mismatch",
|
| 161 |
+
"LOC": "Location Mismatch",
|
| 162 |
+
"DATE": "Date Mismatch",
|
| 163 |
+
"TIME": "Time Mismatch",
|
| 164 |
+
"CARDINAL": "Number Mismatch",
|
| 165 |
+
"ORDINAL": "Order/Rank Mismatch",
|
| 166 |
+
"MONEY": "Financial Mismatch",
|
| 167 |
+
"PERCENT": "Statistics Mismatch",
|
| 168 |
+
"EVENT": "Event Mismatch",
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
conflicts_found = []
|
| 172 |
+
for entity_label, conflict_name in entity_type_map.items():
|
| 173 |
+
val_a = entities_a.get(entity_label)
|
| 174 |
+
val_b = entities_b.get(entity_label)
|
| 175 |
+
if val_a and val_b and val_a.lower() != val_b.lower():
|
| 176 |
+
conflicts_found.append(conflict_name)
|
| 177 |
+
|
| 178 |
+
return " & ".join(set(conflicts_found)) if conflicts_found else "Factual Conflict"
|
| 179 |
+
|
| 180 |
+
def get_severity(self, contradiction_score: float, conflict_type: str) -> str:
|
| 181 |
+
high_priority_types = [
|
| 182 |
+
"Date Mismatch", "Location Mismatch", "Number Mismatch",
|
| 183 |
+
"Event Mismatch", "Factual Conflict",
|
| 184 |
+
]
|
| 185 |
+
is_high_priority = any(t in conflict_type for t in high_priority_types)
|
| 186 |
+
|
| 187 |
+
if contradiction_score >= 0.85:
|
| 188 |
+
return "HIGH"
|
| 189 |
+
elif contradiction_score >= 0.65:
|
| 190 |
+
return "HIGH" if is_high_priority else "MEDIUM"
|
| 191 |
+
else:
|
| 192 |
+
return "MEDIUM" if is_high_priority else "LOW"
|
| 193 |
+
|
| 194 |
+
def detect_conflicts(self, doc_a: str, doc_b: str) -> List[Conflict]:
|
| 195 |
+
contradiction_threshold = 0.85 - (self.strictness * 0.35)
|
| 196 |
+
print(f"[ConflictDetector] Strictness: {self.strictness} | Contradiction threshold: {contradiction_threshold:.2f}")
|
| 197 |
+
|
| 198 |
+
claims_a = self.split_into_claims(doc_a)
|
| 199 |
+
claims_b = self.split_into_claims(doc_b)
|
| 200 |
+
print(f"[ConflictDetector] Doc A: {len(claims_a)} claims | Doc B: {len(claims_b)} claims")
|
| 201 |
+
|
| 202 |
+
if not claims_a or not claims_b:
|
| 203 |
+
print("[ConflictDetector] One or both documents produced no claims. Skipping.")
|
| 204 |
+
return []
|
| 205 |
+
|
| 206 |
+
similar_pairs = self.find_similar_pairs(claims_a, claims_b)
|
| 207 |
+
print(f"[ConflictDetector] Similar pairs found: {len(similar_pairs)}")
|
| 208 |
+
|
| 209 |
+
conflicts = []
|
| 210 |
+
seen_pairs: set = set()
|
| 211 |
+
|
| 212 |
+
for sent_a, sent_b, sim_score in similar_pairs:
|
| 213 |
+
pair_key = (sent_a[:50], sent_b[:50])
|
| 214 |
+
if pair_key in seen_pairs:
|
| 215 |
+
continue
|
| 216 |
+
seen_pairs.add(pair_key)
|
| 217 |
+
|
| 218 |
+
contradiction_score = self.check_contradiction(sent_a, sent_b)
|
| 219 |
+
|
| 220 |
+
if contradiction_score >= contradiction_threshold:
|
| 221 |
+
conflict_type = self.classify_conflict_type(sent_a, sent_b)
|
| 222 |
+
severity = self.get_severity(contradiction_score, conflict_type)
|
| 223 |
+
|
| 224 |
+
conflicts.append(Conflict(
|
| 225 |
+
sentence_a=sent_a,
|
| 226 |
+
sentence_b=sent_b,
|
| 227 |
+
conflict_type=conflict_type,
|
| 228 |
+
severity=severity,
|
| 229 |
+
confidence=round(sim_score, 3),
|
| 230 |
+
contradiction_score=round(contradiction_score, 3),
|
| 231 |
+
))
|
| 232 |
+
|
| 233 |
+
severity_order = {"HIGH": 0, "MEDIUM": 1, "LOW": 2}
|
| 234 |
+
conflicts.sort(key=lambda x: (severity_order[x.severity], -x.contradiction_score))
|
| 235 |
+
|
| 236 |
+
return conflicts
|
| 237 |
+
|
| 238 |
+
def report(self, doc_a: str, doc_b: str, external_source: str = "unknown") -> dict:
|
| 239 |
+
"""
|
| 240 |
+
Runs conflict detection and returns a structured dict.
|
| 241 |
+
Always returns a dict β .
|
| 242 |
+
"""
|
| 243 |
+
# BUG FIX: Previously, when doc_a had no extractable claims (input too
|
| 244 |
+
# short, or all sentences under 6 words), detect_conflicts() returned []
|
| 245 |
+
# and report() returned {"status": "NO_CONFLICTS"}. That is a false result β
|
| 246 |
+
# the pipeline had no basis to say "no conflicts"; it simply couldn't read
|
| 247 |
+
# the input. The AI bot receiving NO_CONFLICTS would tell the user the
|
| 248 |
+
# article is consistent, which is a wrong conclusion from an empty analysis.
|
| 249 |
+
# Now we detect this before running the full pipeline and return a distinct
|
| 250 |
+
# INSUFFICIENT_CONTENT status that accurately describes what happened.
|
| 251 |
+
claims_a = self.split_into_claims(doc_a)
|
| 252 |
+
if not claims_a:
|
| 253 |
+
return {
|
| 254 |
+
"status": "INSUFFICIENT_CONTENT",
|
| 255 |
+
"error": (
|
| 256 |
+
"The input text could not be broken into verifiable claims. "
|
| 257 |
+
"It may be too short (under 6 words per sentence) or contain "
|
| 258 |
+
"only boilerplate/metadata. Provide a paragraph or more of "
|
| 259 |
+
"substantive text for meaningful conflict analysis."
|
| 260 |
+
),
|
| 261 |
+
"total": 0,
|
| 262 |
+
"conflicts": {},
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
try:
|
| 266 |
+
conflicts = self.detect_conflicts(doc_a, doc_b)
|
| 267 |
+
except Exception as e:
|
| 268 |
+
print(f"[ConflictDetector] detect_conflicts raised unexpectedly: {e}")
|
| 269 |
+
return {
|
| 270 |
+
"status": "ERROR",
|
| 271 |
+
"error": f"Detection pipeline failed: {type(e).__name__}: {e}",
|
| 272 |
+
"total": 0,
|
| 273 |
+
"conflicts": {},
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
if not conflicts:
|
| 277 |
+
return {"status": "NO_CONFLICTS", "total": 0, "conflicts": {}}
|
| 278 |
+
|
| 279 |
+
high = [c for c in conflicts if c.severity == "HIGH"]
|
| 280 |
+
medium = [c for c in conflicts if c.severity == "MEDIUM"]
|
| 281 |
+
low = [c for c in conflicts if c.severity == "LOW"]
|
| 282 |
+
|
| 283 |
+
if len(high) >= 3:
|
| 284 |
+
verdict = "BIG_MISMATCH"
|
| 285 |
+
elif len(high) >= 1:
|
| 286 |
+
verdict = "MISMATCH_DETECTED"
|
| 287 |
+
elif len(medium) >= 2:
|
| 288 |
+
verdict = "MINOR_MISMATCH"
|
| 289 |
+
else:
|
| 290 |
+
verdict = "MOSTLY_CONSISTENT"
|
| 291 |
+
|
| 292 |
+
return {
|
| 293 |
+
"status": verdict,
|
| 294 |
+
"total": len(conflicts),
|
| 295 |
+
"high": len(high),
|
| 296 |
+
"medium": len(medium),
|
| 297 |
+
"low": len(low),
|
| 298 |
+
"conflicts": {
|
| 299 |
+
f"conflict_{i}": {
|
| 300 |
+
"conflict_type": conflict.conflict_type,
|
| 301 |
+
"severity": conflict.severity.lower(),
|
| 302 |
+
"contradiction_score": conflict.contradiction_score,
|
| 303 |
+
"similarity_score": conflict.confidence,
|
| 304 |
+
"user_claim": conflict.sentence_a,
|
| 305 |
+
external_source: conflict.sentence_b,
|
| 306 |
+
}
|
| 307 |
+
for i, conflict in enumerate(conflicts, 1)
|
| 308 |
+
},
|
| 309 |
+
}
|
dataretrieval.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
All the process related to data collection and filtering
|
| 3 |
+
will happen here.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import trafilatura, json, tldextract
|
| 7 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 8 |
+
from similarity import ModelFunctions
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# ββ Standardised response builders ββββββββββββββββββββββββββββββββββββββββ #
|
| 12 |
+
|
| 13 |
+
def _ok(data: dict) -> str:
|
| 14 |
+
"""Successful result with matched articles."""
|
| 15 |
+
return {"status": "success", "results": data}
|
| 16 |
+
|
| 17 |
+
def _no_match(fetched: int, scored: int, threshold: float) -> str:
|
| 18 |
+
"""Articles were fetched and scored, but none crossed the threshold."""
|
| 19 |
+
return {
|
| 20 |
+
"status": "no_match",
|
| 21 |
+
"reason": (
|
| 22 |
+
f"Fetched {fetched} article(s) and scored {scored}, "
|
| 23 |
+
f"but none had a similarity score >= {threshold}. "
|
| 24 |
+
"The available content may not be directly related to the input topic."
|
| 25 |
+
)
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
def _no_content(total_links: int, failed: int) -> str:
|
| 29 |
+
"""Every URL either failed to fetch or returned no extractable text."""
|
| 30 |
+
return {
|
| 31 |
+
"status": "error",
|
| 32 |
+
"error": (
|
| 33 |
+
f"Could not retrieve content from any of the {total_links} link(s). "
|
| 34 |
+
f"{failed} link(s) failed during fetch/extraction. "
|
| 35 |
+
"Possible causes: paywalls, network timeouts, bot-blocking, or invalid URLs."
|
| 36 |
+
)
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
def _bad_input(detail: str) -> str:
|
| 40 |
+
"""Caller passed invalid arguments."""
|
| 41 |
+
return {"status": "error", "error": f"Invalid input β {detail}"}
|
| 42 |
+
|
| 43 |
+
def _internal_error(context: str, exc: Exception) -> str:
|
| 44 |
+
"""Unexpected exception in a named context."""
|
| 45 |
+
return {
|
| 46 |
+
"status": "error",
|
| 47 |
+
"error": f"Unexpected failure in [{context}]: {type(exc).__name__}: {exc}"
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# Similarity threshold
|
| 52 |
+
SIMILARITY_THRESHOLD = 0.4
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class DataCollector():
|
| 56 |
+
|
| 57 |
+
def __init__(self, ModelFunctionsObj):
|
| 58 |
+
self.object = ModelFunctionsObj
|
| 59 |
+
|
| 60 |
+
# ------------------------------------------------------------------ #
|
| 61 |
+
# Fetches a single URL β returns (link, text, error_reason) #
|
| 62 |
+
# error_reason is None on success, a short string on failure #
|
| 63 |
+
# ------------------------------------------------------------------ #
|
| 64 |
+
def _fetch_one(self, link: str) -> tuple:
|
| 65 |
+
try:
|
| 66 |
+
html = trafilatura.fetch_url(link)
|
| 67 |
+
if not html:
|
| 68 |
+
return link, None, "no HTML returned (possible bot-block or empty page)"
|
| 69 |
+
text = trafilatura.extract(html)
|
| 70 |
+
if not text:
|
| 71 |
+
return link, None, "HTML fetched but no text could be extracted"
|
| 72 |
+
return link, text, None
|
| 73 |
+
except Exception as e:
|
| 74 |
+
return link, None, f"{type(e).__name__}: {e}"
|
| 75 |
+
|
| 76 |
+
# ------------------------------------------------------------------ #
|
| 77 |
+
# Parallel fetch + batch similarity #
|
| 78 |
+
# ------------------------------------------------------------------ #
|
| 79 |
+
def retriever(self, OriginalContent: str, links: list) -> str:
|
| 80 |
+
|
| 81 |
+
# validate inputs
|
| 82 |
+
if not isinstance(OriginalContent, str) or not OriginalContent.strip():
|
| 83 |
+
return _bad_input("OriginalContent must be a non-empty string.")
|
| 84 |
+
if not isinstance(links, list) or not links:
|
| 85 |
+
return _bad_input("links must be a non-empty list of URL strings.")
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
# Step 1: Parallel fetch
|
| 89 |
+
fetched = {} # link -> raw text
|
| 90 |
+
fetch_failures = [] # track failures for diagnostics
|
| 91 |
+
|
| 92 |
+
with ThreadPoolExecutor(max_workers=20) as executor:
|
| 93 |
+
futures = {executor.submit(self._fetch_one, link): link for link in links}
|
| 94 |
+
for future in as_completed(futures):
|
| 95 |
+
link, text, reason = future.result()
|
| 96 |
+
if text:
|
| 97 |
+
fetched[link] = text
|
| 98 |
+
else:
|
| 99 |
+
fetch_failures.append(f"{link} β {reason}")
|
| 100 |
+
|
| 101 |
+
# Log which URLs failed
|
| 102 |
+
if fetch_failures:
|
| 103 |
+
print(f"[DataRetrieval] {len(fetch_failures)}/{len(links)} link(s) failed:")
|
| 104 |
+
for f in fetch_failures:
|
| 105 |
+
print(f" β {f}")
|
| 106 |
+
|
| 107 |
+
# Zero articles retrieved β no point going further
|
| 108 |
+
if not fetched:
|
| 109 |
+
return _no_content(len(links), len(fetch_failures))
|
| 110 |
+
|
| 111 |
+
# ββ Step 2: Extract titles βββββββββββββββββββββββββββββββββββ
|
| 112 |
+
valid_links = []
|
| 113 |
+
valid_titles = []
|
| 114 |
+
valid_texts = []
|
| 115 |
+
|
| 116 |
+
for link, text in fetched.items():
|
| 117 |
+
try:
|
| 118 |
+
title = text.strip().split(".")[0].lower()
|
| 119 |
+
except (AttributeError, IndexError):
|
| 120 |
+
title = "" # empty string still gets scored, just poorly
|
| 121 |
+
|
| 122 |
+
valid_links.append(link)
|
| 123 |
+
valid_titles.append(title)
|
| 124 |
+
valid_texts.append(text)
|
| 125 |
+
|
| 126 |
+
# ββ Step 3: Single batch similarity pass βββββββββββββββββββββ
|
| 127 |
+
try:
|
| 128 |
+
scores = self.object.BatchSimilarityScores(OriginalContent, valid_titles)
|
| 129 |
+
except Exception as e:
|
| 130 |
+
return _internal_error("BatchSimilarityScores", e)
|
| 131 |
+
|
| 132 |
+
# ββ Step 4: Filter by threshold ββββββββββββββββββββββββββββββ
|
| 133 |
+
data = {}
|
| 134 |
+
for link, text, score in zip(valid_links, valid_texts, scores):
|
| 135 |
+
# print(f"[Score] {score:.4f} {link}") # only for testing dev
|
| 136 |
+
if score >= SIMILARITY_THRESHOLD:
|
| 137 |
+
try:
|
| 138 |
+
data[f"searchresult{len(data) + 1}"] = {
|
| 139 |
+
"organization": tldextract.extract(link).domain,
|
| 140 |
+
"score": score,
|
| 141 |
+
"article": text
|
| 142 |
+
}
|
| 143 |
+
except Exception as e:
|
| 144 |
+
print(f"[DataRetrieval] Could not save result for {link}: {e} β skipping.")
|
| 145 |
+
continue
|
| 146 |
+
|
| 147 |
+
# ββ Step 5: Return with clear status βββββββββββββββββββββββββ
|
| 148 |
+
if not data:
|
| 149 |
+
return _no_match(
|
| 150 |
+
fetched=len(fetched),
|
| 151 |
+
scored=len(valid_titles),
|
| 152 |
+
threshold=SIMILARITY_THRESHOLD
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
return _ok(data)
|
| 156 |
+
|
| 157 |
+
except Exception as e:
|
| 158 |
+
return _internal_error("retriever main block", e)
|
| 159 |
+
|
| 160 |
+
# ------------------------------------------------------------------ #
|
| 161 |
+
# top_results β handles both old bare-dict and new status-wrapped fmt #
|
| 162 |
+
# ------------------------------------------------------------------ #
|
| 163 |
+
def top_results(self, data, num_of_articals: int = 2):
|
| 164 |
+
|
| 165 |
+
try:
|
| 166 |
+
if isinstance(data, str):
|
| 167 |
+
try:
|
| 168 |
+
data = json.loads(data)
|
| 169 |
+
except json.JSONDecodeError as e:
|
| 170 |
+
print(f"[top_results] Failed to parse JSON input: {e}")
|
| 171 |
+
return None
|
| 172 |
+
|
| 173 |
+
# Unwrap new response format if present
|
| 174 |
+
if isinstance(data, dict) and "results" in data:
|
| 175 |
+
data = data["results"]
|
| 176 |
+
|
| 177 |
+
if not isinstance(data, dict) or not data:
|
| 178 |
+
print("[top_results] Invalid or empty data β nothing to sort.")
|
| 179 |
+
return None
|
| 180 |
+
|
| 181 |
+
sorted_items = sorted(
|
| 182 |
+
data.items(),
|
| 183 |
+
key=lambda item: item[1]["score"],
|
| 184 |
+
reverse=True
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
num_of_articals = min(num_of_articals, len(sorted_items))
|
| 188 |
+
top_n = sorted_items[:num_of_articals]
|
| 189 |
+
|
| 190 |
+
result = {}
|
| 191 |
+
for i, (_, value) in enumerate(top_n, start=1):
|
| 192 |
+
result[f"searchresult{i}"] = value
|
| 193 |
+
|
| 194 |
+
return result
|
| 195 |
+
|
| 196 |
+
except Exception as e:
|
| 197 |
+
print(f"[top_results] Unexpected error: {e}")
|
| 198 |
+
return None
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# ββ Standalone helper: fetch and parse a single user-supplied article βββββββ #
|
| 202 |
+
|
| 203 |
+
def get_user_article(user_link: str) -> dict:
|
| 204 |
+
|
| 205 |
+
if not isinstance(user_link, str) or not user_link.strip():
|
| 206 |
+
return {"status": "error", "error": "Invalid or empty URL provided."}
|
| 207 |
+
|
| 208 |
+
try:
|
| 209 |
+
try:
|
| 210 |
+
html = trafilatura.fetch_url(user_link)
|
| 211 |
+
article_text = trafilatura.extract(html) if html else None
|
| 212 |
+
except Exception as e:
|
| 213 |
+
msg = f"Network or extraction failure: {type(e).__name__}: {e}"
|
| 214 |
+
print(f"[get_user_article] {msg}")
|
| 215 |
+
return {"status": "error", "error": msg}
|
| 216 |
+
|
| 217 |
+
if not article_text:
|
| 218 |
+
return {
|
| 219 |
+
"status": "error",
|
| 220 |
+
"error": (
|
| 221 |
+
"Could not extract readable text from the provided URL. "
|
| 222 |
+
"The page may be paywalled, JavaScript-rendered, or block scrapers."
|
| 223 |
+
)
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
try:
|
| 227 |
+
title = article_text.strip().split(".")[0].lower()
|
| 228 |
+
except (AttributeError, IndexError, TypeError, ValueError):
|
| 229 |
+
title = None
|
| 230 |
+
|
| 231 |
+
try:
|
| 232 |
+
organization = tldextract.extract(user_link).domain
|
| 233 |
+
except (AttributeError, TypeError, ValueError):
|
| 234 |
+
organization = None
|
| 235 |
+
|
| 236 |
+
return {
|
| 237 |
+
"status": "success",
|
| 238 |
+
"organization": organization,
|
| 239 |
+
"title": title,
|
| 240 |
+
"article": article_text
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
except Exception as e:
|
| 244 |
+
return {
|
| 245 |
+
"status": "error",
|
| 246 |
+
"error": f"Unexpected failure in get_user_article: {type(e).__name__}: {e}"
|
| 247 |
+
}
|
main.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import logging
|
| 3 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 4 |
+
|
| 5 |
+
from fastapi import FastAPI, Request
|
| 6 |
+
from fastapi.responses import HTMLResponse
|
| 7 |
+
from fastapi.staticfiles import StaticFiles
|
| 8 |
+
from fastapi.templating import Jinja2Templates
|
| 9 |
+
from pydantic import BaseModel, field_validator
|
| 10 |
+
|
| 11 |
+
from prototype import Prototype
|
| 12 |
+
|
| 13 |
+
# ββ Logging βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 14 |
+
logging.basicConfig(
|
| 15 |
+
level=logging.INFO,
|
| 16 |
+
format="%(asctime)s | %(levelname)s | %(message)s"
|
| 17 |
+
)
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
# ββ Constants βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 21 |
+
# Pipeline is I/O-heavy (scraping + LLM calls). 150 s is generous but not infinite.
|
| 22 |
+
# If it hasn't returned by then, tell the user to retry rather than hanging forever.
|
| 23 |
+
PIPELINE_TIMEOUT_SECONDS = 150
|
| 24 |
+
|
| 25 |
+
# ThreadPoolExecutor lets us run the blocking Prototype.run() without freezing
|
| 26 |
+
# FastAPI's async event loop. max_workers=4 handles 4 simultaneous requests.
|
| 27 |
+
THREAD_POOL = ThreadPoolExecutor(max_workers=4)
|
| 28 |
+
|
| 29 |
+
# ββ App βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 30 |
+
app = FastAPI(
|
| 31 |
+
title="FactCheck AI",
|
| 32 |
+
description="AI-powered news fact-checking and trust analysis",
|
| 33 |
+
version="1.1.0",
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
| 37 |
+
templates = Jinja2Templates(directory="templates")
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# ββ Request / Response Models βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 41 |
+
class ChatRequest(BaseModel):
|
| 42 |
+
text: str
|
| 43 |
+
inputType: str # "query" | "article_link"
|
| 44 |
+
searchApiKey: str # user's search API key
|
| 45 |
+
searchProvider: str # "serper.dev" | "gnews" | "publicapi.dev"
|
| 46 |
+
llmApiKey: str # user's LLM API key
|
| 47 |
+
llmProvider: str # "anthropic" | "openai" | "google" | "groq"
|
| 48 |
+
|
| 49 |
+
@field_validator("text")
|
| 50 |
+
@classmethod
|
| 51 |
+
def text_not_empty(cls, v: str) -> str:
|
| 52 |
+
v = v.strip()
|
| 53 |
+
if not v:
|
| 54 |
+
raise ValueError("text must not be empty")
|
| 55 |
+
if len(v) > 5000:
|
| 56 |
+
raise ValueError("text exceeds 5 000 characters")
|
| 57 |
+
return v
|
| 58 |
+
|
| 59 |
+
@field_validator("inputType")
|
| 60 |
+
@classmethod
|
| 61 |
+
def valid_input_type(cls, v: str) -> str:
|
| 62 |
+
allowed = {"query", "article_link"}
|
| 63 |
+
if v not in allowed:
|
| 64 |
+
raise ValueError(f"inputType must be one of {allowed}")
|
| 65 |
+
return v
|
| 66 |
+
|
| 67 |
+
@field_validator("llmProvider")
|
| 68 |
+
@classmethod
|
| 69 |
+
def valid_llm_provider(cls, v: str) -> str:
|
| 70 |
+
allowed = {"anthropic", "openai", "google", "groq"}
|
| 71 |
+
if v not in allowed:
|
| 72 |
+
raise ValueError(f"llmProvider must be one of {allowed}")
|
| 73 |
+
return v
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class ChatResponse(BaseModel):
|
| 77 |
+
response: str
|
| 78 |
+
inputType: str
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# ββ Routes ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 82 |
+
@app.get("/", response_class=HTMLResponse)
|
| 83 |
+
async def homepage(request: Request):
|
| 84 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@app.post("/chat", response_model=ChatResponse)
|
| 88 |
+
async def chat(payload: ChatRequest):
|
| 89 |
+
"""
|
| 90 |
+
Runs the Prototype pipeline end-to-end and streams the LLM explanation
|
| 91 |
+
back to the frontend as a plain string inside ChatResponse.
|
| 92 |
+
|
| 93 |
+
Flow:
|
| 94 |
+
1. Build Prototype with the user-supplied search API key.
|
| 95 |
+
key_provider is always "superdev" β that is the internal name used by
|
| 96 |
+
Apicaller regardless of which search provider the user picked in the UI.
|
| 97 |
+
2. Run the blocking pipeline in a thread-pool so FastAPI stays responsive.
|
| 98 |
+
3. Enforce a 150-second timeout. If it fires, return a user-friendly
|
| 99 |
+
"please try again" message instead of hanging the request forever.
|
| 100 |
+
4. Catch any unexpected exception and return a readable error message.
|
| 101 |
+
The pipeline itself already converts most internal errors into LLM
|
| 102 |
+
explanations, so this outer catch is only a safety net.
|
| 103 |
+
"""
|
| 104 |
+
logger.info(
|
| 105 |
+
"Chat request | type=%s | llm=%s | search=%s | text=%.120s",
|
| 106 |
+
payload.inputType,
|
| 107 |
+
payload.llmProvider,
|
| 108 |
+
payload.searchProvider,
|
| 109 |
+
payload.text,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
# Prototype's internal search provider name is always "superdev".
|
| 113 |
+
# The UI label ("serper.dev", "gnews", etc.) is only for display;
|
| 114 |
+
# the actual API call routing is done inside Apicaller.
|
| 115 |
+
KEY_PROVIDER = "superdev"
|
| 116 |
+
|
| 117 |
+
pipeline = Prototype(payload.searchApiKey, KEY_PROVIDER)
|
| 118 |
+
|
| 119 |
+
def _run_pipeline() -> str:
|
| 120 |
+
"""Blocking call β executed in the thread pool."""
|
| 121 |
+
return pipeline.run(
|
| 122 |
+
payload.text,
|
| 123 |
+
payload.inputType,
|
| 124 |
+
llm_api_key=payload.llmApiKey,
|
| 125 |
+
llm_provider=payload.llmProvider,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
loop = asyncio.get_event_loop()
|
| 129 |
+
|
| 130 |
+
try:
|
| 131 |
+
result: str = await asyncio.wait_for(
|
| 132 |
+
loop.run_in_executor(THREAD_POOL, _run_pipeline),
|
| 133 |
+
timeout=PIPELINE_TIMEOUT_SECONDS,
|
| 134 |
+
)
|
| 135 |
+
logger.info("Pipeline finished successfully | type=%s", payload.inputType)
|
| 136 |
+
return ChatResponse(response=result, inputType=payload.inputType)
|
| 137 |
+
|
| 138 |
+
except asyncio.TimeoutError:
|
| 139 |
+
logger.warning(
|
| 140 |
+
"Pipeline timed out after %ds | type=%s | text=%.80s",
|
| 141 |
+
PIPELINE_TIMEOUT_SECONDS,
|
| 142 |
+
payload.inputType,
|
| 143 |
+
payload.text,
|
| 144 |
+
)
|
| 145 |
+
return ChatResponse(
|
| 146 |
+
response=(
|
| 147 |
+
"β±οΈ Analysis timed out.\n\n"
|
| 148 |
+
"The pipeline ran for over 2.5 minutes without finishing. "
|
| 149 |
+
"This usually happens when sources are slow to load or the claim is very broad.\n\n"
|
| 150 |
+
"Please try again with a shorter or more specific query."
|
| 151 |
+
),
|
| 152 |
+
inputType=payload.inputType,
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
except Exception as exc:
|
| 156 |
+
# This fires only if something completely unexpected blows up β
|
| 157 |
+
# the pipeline's own error handling converts most failures into
|
| 158 |
+
# user-readable LLM explanations before reaching this point.
|
| 159 |
+
logger.exception("Unhandled exception in pipeline: %s", exc)
|
| 160 |
+
return ChatResponse(
|
| 161 |
+
response=(
|
| 162 |
+
"β οΈ Something unexpected went wrong.\n\n"
|
| 163 |
+
"The analysis could not complete. Please check that your API keys "
|
| 164 |
+
"are correct and try again.\n\n"
|
| 165 |
+
f"Technical detail: {str(exc)[:200]}"
|
| 166 |
+
),
|
| 167 |
+
inputType=payload.inputType,
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# ββ Health / Info βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 172 |
+
@app.get("/health")
|
| 173 |
+
async def health():
|
| 174 |
+
return {"status": "ok", "service": "FactCheck AI", "version": "1.1.0"}
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@app.get("/about")
|
| 178 |
+
async def about():
|
| 179 |
+
return {
|
| 180 |
+
"name": "FactCheck AI",
|
| 181 |
+
"description": "AI-powered news verification and trust analysis",
|
| 182 |
+
"version": "1.1.0",
|
| 183 |
+
"modes": ["query verification", "article URL analysis"],
|
| 184 |
+
}
|
prototype.py
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import requests
|
| 3 |
+
from querygeneration import KeywordExtractor, KeywordExtractionError
|
| 4 |
+
from conflictdetection import ConflictDetector
|
| 5 |
+
from dataretrieval import DataCollector, get_user_article
|
| 6 |
+
from apisetup import Apicaller
|
| 7 |
+
from similarity import ModelFunctions
|
| 8 |
+
|
| 9 |
+
_SUPPORTED_PROVIDERS = ("superdev",)
|
| 10 |
+
_SUPPORTED_LLM_PROVIDERS = ("anthropic", "openai", "google", "groq")
|
| 11 |
+
|
| 12 |
+
_FACT_CHECK_SYSTEM_PROMPT = """
|
| 13 |
+
You are an AI assistant that explains fact-check results to normal users.
|
| 14 |
+
Your job is to convert structured analysis data into a simple explanation that anyone can understand.
|
| 15 |
+
Important rules:
|
| 16 |
+
1. NEVER mention internal system terms such as: pipeline, retrieval, similarity score, model, API, system status, JSON, or technical errors.
|
| 17 |
+
2. If the system could not find reliable information, explain it in simple language like: "We could not find reliable news reports about this claim."
|
| 18 |
+
3. Use very simple and clear English. Avoid technical or academic words.
|
| 19 |
+
4. Only use the information provided in the input data.
|
| 20 |
+
5. Do NOT invent facts or add outside knowledge.
|
| 21 |
+
6. Keep the explanation short and easy to read.
|
| 22 |
+
7. Maximum length: 100 words.
|
| 23 |
+
|
| 24 |
+
Output format:
|
| 25 |
+
Verdict: <True / False / Partially True / Inconclusive>
|
| 26 |
+
|
| 27 |
+
Explanation:
|
| 28 |
+
Explain the situation in plain language so an average person can understand what was found.
|
| 29 |
+
|
| 30 |
+
Evidence Summary:
|
| 31 |
+
* Briefly mention what different news sources reported.
|
| 32 |
+
|
| 33 |
+
Notes:
|
| 34 |
+
Mention uncertainty, conflicting reports, or lack of reliable information in simple words.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class Prototype:
|
| 39 |
+
def __init__(self, api_key: str, key_provider: str):
|
| 40 |
+
self.key_provider = key_provider
|
| 41 |
+
self.keyword_extractor = KeywordExtractor()
|
| 42 |
+
self.api_caller = Apicaller(api_key)
|
| 43 |
+
self.model_functions = ModelFunctions()
|
| 44 |
+
self.data_collector = DataCollector(self.model_functions)
|
| 45 |
+
self.conflict_detector = ConflictDetector(strictness=0.7)
|
| 46 |
+
|
| 47 |
+
def extract_keywords(self, input_type: str, data: str) -> list[str]:
|
| 48 |
+
try:
|
| 49 |
+
return self.keyword_extractor.extract(data)
|
| 50 |
+
except KeywordExtractionError as e:
|
| 51 |
+
print(f"[Pipeline] Keyword extraction failed, using fallback: {e}")
|
| 52 |
+
if input_type == "article_link":
|
| 53 |
+
return [data.split(".")[0]]
|
| 54 |
+
return [data]
|
| 55 |
+
|
| 56 |
+
def collect_links(self, keywords: list[str]) -> list[str]:
|
| 57 |
+
try:
|
| 58 |
+
if self.key_provider == "superdev":
|
| 59 |
+
return self.api_caller.superdev(keywords)
|
| 60 |
+
return []
|
| 61 |
+
except Exception as e:
|
| 62 |
+
print(f"[Pipeline] Link collection failed: {e}")
|
| 63 |
+
return []
|
| 64 |
+
|
| 65 |
+
def retrieve_online_data(self, article: str, links: list[str]) -> dict:
|
| 66 |
+
raw_data = self.data_collector.retriever(article, links)
|
| 67 |
+
if not isinstance(raw_data, dict) or raw_data.get("status") != "success":
|
| 68 |
+
return raw_data
|
| 69 |
+
top_data = self.data_collector.top_results(raw_data)
|
| 70 |
+
if top_data is None:
|
| 71 |
+
return {
|
| 72 |
+
"status": "error",
|
| 73 |
+
"error": "top_results() returned no data after a successful fetch. Internal scoring failure."
|
| 74 |
+
}
|
| 75 |
+
return {"status": "success", "results": top_data}
|
| 76 |
+
|
| 77 |
+
def detect_conflicts(self, original_data: dict | str, collected_data: dict) -> str:
|
| 78 |
+
if isinstance(original_data, dict):
|
| 79 |
+
original_article = original_data["article"]
|
| 80 |
+
organization = original_data.get("organization", "unknown")
|
| 81 |
+
else:
|
| 82 |
+
original_article = original_data
|
| 83 |
+
organization = "unknown"
|
| 84 |
+
|
| 85 |
+
results = {}
|
| 86 |
+
for result_name, result in collected_data.items():
|
| 87 |
+
try:
|
| 88 |
+
result["conflict"] = self.conflict_detector.report(
|
| 89 |
+
original_article, result["article"], organization,
|
| 90 |
+
)
|
| 91 |
+
results[result_name] = result
|
| 92 |
+
except Exception as e:
|
| 93 |
+
print(f"[Pipeline] Conflict detection skipped for '{result_name}': {e}")
|
| 94 |
+
|
| 95 |
+
return json.dumps(results, indent=4)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# structured summary for the AI bot
|
| 99 |
+
|
| 100 |
+
def final_explanation(self, userTypedquery: str, userinputType: str, raw_aggregated: str, article_text: str = None) -> dict:
|
| 101 |
+
"""
|
| 102 |
+
Converts raw pipeline output into a structured dict for the AI bot.
|
| 103 |
+
|
| 104 |
+
Preserves every distinct status from dataretrieval.py:
|
| 105 |
+
"success" β pipeline ran fully, analysis list included
|
| 106 |
+
"no_match" β articles fetched but none passed similarity threshold
|
| 107 |
+
(has "reason" key, not "error")
|
| 108 |
+
"error" β fetch/scraping/scoring failure
|
| 109 |
+
(has "error" key)
|
| 110 |
+
"INSUFFICIENT_CONTENT" β input too short to extract claims
|
| 111 |
+
empty dict {} β conflict loop ran but all articles threw individually
|
| 112 |
+
|
| 113 |
+
The AI bot receives the exact reason string from each case β not a
|
| 114 |
+
generic "error" label β so it can give the user a specific explanation.
|
| 115 |
+
"""
|
| 116 |
+
input_key = "user_article" if userinputType == "article_link" else "user_query"
|
| 117 |
+
|
| 118 |
+
# For article_link: use the full scraped text if fetch succeeded, otherwise
|
| 119 |
+
# fall back to the URL (fetch failed β no text available).
|
| 120 |
+
input_value = article_text if (userinputType == "article_link" and article_text) else userTypedquery
|
| 121 |
+
final: dict = {
|
| 122 |
+
input_key: input_value,
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
# -- Parse raw_aggregated ----------------------------------------
|
| 126 |
+
if isinstance(raw_aggregated, dict):
|
| 127 |
+
data = raw_aggregated
|
| 128 |
+
|
| 129 |
+
elif isinstance(raw_aggregated, str):
|
| 130 |
+
try:
|
| 131 |
+
data = json.loads(raw_aggregated)
|
| 132 |
+
except (json.JSONDecodeError, ValueError) as e:
|
| 133 |
+
final["pipeline_status"] = "error"
|
| 134 |
+
final["problem"] = {
|
| 135 |
+
"status": "error",
|
| 136 |
+
"error": f"Pipeline produced unparseable output: {e}",
|
| 137 |
+
}
|
| 138 |
+
return final
|
| 139 |
+
else:
|
| 140 |
+
final["pipeline_status"] = "error"
|
| 141 |
+
final["problem"] = {
|
| 142 |
+
"status": "error",
|
| 143 |
+
"error": (
|
| 144 |
+
f"Unexpected type for raw_aggregated: {type(raw_aggregated).__name__}. "
|
| 145 |
+
"Expected a JSON string or dict."
|
| 146 |
+
),
|
| 147 |
+
}
|
| 148 |
+
return final
|
| 149 |
+
|
| 150 |
+
if not isinstance(data, dict):
|
| 151 |
+
final["pipeline_status"] = "error"
|
| 152 |
+
final["problem"] = {
|
| 153 |
+
"status": "error",
|
| 154 |
+
"error": "Parsed pipeline output is not a dict β cannot interpret results.",
|
| 155 |
+
}
|
| 156 |
+
return final
|
| 157 |
+
|
| 158 |
+
# -- Identify output shape ----------------------------------------
|
| 159 |
+
# detect_conflicts() success β keys like "searchresult1", "searchresult2"
|
| 160 |
+
# dataretrieval / pipeline error β top-level "status" key
|
| 161 |
+
# Empty dict β conflict loop ran, all articles skipped
|
| 162 |
+
has_search_results = any(k.startswith("searchresult") for k in data)
|
| 163 |
+
top_level_status = data.get("status") # None if not present
|
| 164 |
+
|
| 165 |
+
if has_search_results and top_level_status is None:
|
| 166 |
+
# ββ FULL SUCCESS βββββββββββββββββββββββββββββββββββββββββββββ
|
| 167 |
+
final["pipeline_status"] = "success"
|
| 168 |
+
final["retrieved_articles"] = len(data)
|
| 169 |
+
final["analysis"] = list(data.values())
|
| 170 |
+
|
| 171 |
+
elif top_level_status == "no_match":
|
| 172 |
+
# ββ NO_MATCH from _no_match() ββββββββββββββββββββββββββββββββ
|
| 173 |
+
# dataretrieval fetched articles and scored them, but none crossed
|
| 174 |
+
# the similarity threshold. pipeline_status is "error" because the
|
| 175 |
+
# pipeline could not produce an analysis β "reason" key inside
|
| 176 |
+
# problem preserves the specific cause for the AI bot.
|
| 177 |
+
final["pipeline_status"] = "error"
|
| 178 |
+
final["problem"] = data # preserves "status" and "reason" as-is
|
| 179 |
+
|
| 180 |
+
elif top_level_status == "error":
|
| 181 |
+
# ββ HARD ERROR from _no_content / _bad_input / _internal_error ββ
|
| 182 |
+
# or from any pipeline stage (provider check, fetch fail, etc.)
|
| 183 |
+
# Carries an "error" key with the specific reason string.
|
| 184 |
+
final["pipeline_status"] = "error"
|
| 185 |
+
final["problem"] = data # preserves "status" and "error" as-is
|
| 186 |
+
|
| 187 |
+
elif top_level_status == "INSUFFICIENT_CONTENT":
|
| 188 |
+
# ββ SHORT INPUT from conflictdetection.report() ββββββββββββββ
|
| 189 |
+
# Input text could not be broken into claims of >= 6 words.
|
| 190 |
+
# Distinct from an error β the pipeline ran fine, input was just too short.
|
| 191 |
+
final["pipeline_status"] = "insufficient_input"
|
| 192 |
+
final["problem"] = data
|
| 193 |
+
|
| 194 |
+
elif not data:
|
| 195 |
+
# ββ EMPTY DICT βββββββββββββββββββββββββββββββββββββββββββββββ
|
| 196 |
+
# detect_conflicts() ran but every article was individually skipped.
|
| 197 |
+
final["pipeline_status"] = "error"
|
| 198 |
+
final["problem"] = {
|
| 199 |
+
"status": "error",
|
| 200 |
+
"error": (
|
| 201 |
+
"Conflict detection completed but produced no results. "
|
| 202 |
+
"Every retrieved article was individually skipped β "
|
| 203 |
+
"possible causes: model errors, empty article text, or encoding failures."
|
| 204 |
+
),
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
else:
|
| 208 |
+
# ββ UNKNOWN SHAPE ββββββββββββββββββββββββββββββββββββββββββββ
|
| 209 |
+
# Pipeline produced a dict we don't recognise. Pass it through
|
| 210 |
+
# rather than silently discarding it β the AI can still read it.
|
| 211 |
+
final["pipeline_status"] = "error"
|
| 212 |
+
final["problem"] = {
|
| 213 |
+
"status": "error",
|
| 214 |
+
"error": (
|
| 215 |
+
f"Pipeline returned an unrecognised output shape "
|
| 216 |
+
f"(top-level keys: {list(data.keys())}). Cannot classify result."
|
| 217 |
+
),
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
return final
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
# Send the structured summary to any supported LLM
|
| 224 |
+
|
| 225 |
+
def get_response(
|
| 226 |
+
self,
|
| 227 |
+
final_aggregatedStructure: dict,
|
| 228 |
+
user_any_llm_api_key: str,
|
| 229 |
+
llm_provider: str = "anthropic",
|
| 230 |
+
) -> str:
|
| 231 |
+
# -- Input validation --------------------------------------------
|
| 232 |
+
if not isinstance(user_any_llm_api_key, str) or not user_any_llm_api_key.strip():
|
| 233 |
+
return json.dumps({
|
| 234 |
+
"status": "error",
|
| 235 |
+
"error": "LLM API key is missing or empty.",
|
| 236 |
+
}, indent=4)
|
| 237 |
+
|
| 238 |
+
llm_provider = llm_provider.strip().lower()
|
| 239 |
+
if llm_provider not in _SUPPORTED_LLM_PROVIDERS:
|
| 240 |
+
return json.dumps({
|
| 241 |
+
"status": "error",
|
| 242 |
+
"error": (
|
| 243 |
+
f"Unsupported LLM provider: '{llm_provider}'. "
|
| 244 |
+
f"Supported providers are: {', '.join(_SUPPORTED_LLM_PROVIDERS)}."
|
| 245 |
+
),
|
| 246 |
+
}, indent=4)
|
| 247 |
+
|
| 248 |
+
user_message_text = (
|
| 249 |
+
"Here is the fact-checking pipeline output. "
|
| 250 |
+
"Generate the fact-checking report according to your instructions.\n\n"
|
| 251 |
+
f"{json.dumps(final_aggregatedStructure, indent=2)}"
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
# -- Build provider-specific request -----------------------------
|
| 255 |
+
try:
|
| 256 |
+
if llm_provider == "anthropic":
|
| 257 |
+
url = "https://api.anthropic.com/v1/messages"
|
| 258 |
+
headers = {
|
| 259 |
+
"x-api-key": user_any_llm_api_key,
|
| 260 |
+
"anthropic-version": "2023-06-01",
|
| 261 |
+
"content-type": "application/json",
|
| 262 |
+
}
|
| 263 |
+
body = {
|
| 264 |
+
"model": "claude-sonnet-4-20250514",
|
| 265 |
+
"max_tokens": 1000,
|
| 266 |
+
"system": _FACT_CHECK_SYSTEM_PROMPT,
|
| 267 |
+
"messages": [{"role": "user", "content": user_message_text}],
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
elif llm_provider == "openai":
|
| 271 |
+
url = "https://api.openai.com/v1/chat/completions"
|
| 272 |
+
headers = {
|
| 273 |
+
"Authorization": f"Bearer {user_any_llm_api_key}",
|
| 274 |
+
"content-type": "application/json",
|
| 275 |
+
}
|
| 276 |
+
body = {
|
| 277 |
+
"model": "gpt-4o",
|
| 278 |
+
"messages": [
|
| 279 |
+
{"role": "system", "content": _FACT_CHECK_SYSTEM_PROMPT},
|
| 280 |
+
{"role": "user", "content": user_message_text},
|
| 281 |
+
],
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
elif llm_provider == "google":
|
| 285 |
+
url = (
|
| 286 |
+
"https://generativelanguage.googleapis.com/v1beta/models/"
|
| 287 |
+
f"gemini-2.0-flash-lite:generateContent?key={user_any_llm_api_key}"
|
| 288 |
+
)
|
| 289 |
+
headers = {"content-type": "application/json"}
|
| 290 |
+
body = {
|
| 291 |
+
"systemInstruction": {
|
| 292 |
+
"parts": [{"text": _FACT_CHECK_SYSTEM_PROMPT}]
|
| 293 |
+
},
|
| 294 |
+
"contents": [
|
| 295 |
+
{"parts": [{"text": user_message_text}]}
|
| 296 |
+
],
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
elif llm_provider == "groq":
|
| 300 |
+
url = "https://api.groq.com/openai/v1/chat/completions"
|
| 301 |
+
headers = {
|
| 302 |
+
"Authorization": f"Bearer {user_any_llm_api_key}",
|
| 303 |
+
"content-type": "application/json",
|
| 304 |
+
}
|
| 305 |
+
body = {
|
| 306 |
+
"model": "llama-3.3-70b-versatile",
|
| 307 |
+
"messages": [
|
| 308 |
+
{"role": "system", "content": _FACT_CHECK_SYSTEM_PROMPT},
|
| 309 |
+
{"role": "user", "content": user_message_text},
|
| 310 |
+
],
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
except Exception as e:
|
| 314 |
+
return json.dumps({
|
| 315 |
+
"status": "error",
|
| 316 |
+
"error": f"Failed to build request for provider '{llm_provider}': {e}",
|
| 317 |
+
}, indent=4)
|
| 318 |
+
|
| 319 |
+
# -- Make the HTTP call ------------------------------------------
|
| 320 |
+
try:
|
| 321 |
+
response = requests.post(url, headers=headers, json=body, timeout=30)
|
| 322 |
+
|
| 323 |
+
except requests.exceptions.Timeout:
|
| 324 |
+
return json.dumps({
|
| 325 |
+
"status": "error",
|
| 326 |
+
"error": f"[{llm_provider}] Request timed out after 30 seconds.",
|
| 327 |
+
}, indent=4)
|
| 328 |
+
|
| 329 |
+
except requests.exceptions.ConnectionError as e:
|
| 330 |
+
return json.dumps({
|
| 331 |
+
"status": "error",
|
| 332 |
+
"error": f"[{llm_provider}] Could not connect. Check your network: {e}",
|
| 333 |
+
}, indent=4)
|
| 334 |
+
|
| 335 |
+
except requests.exceptions.RequestException as e:
|
| 336 |
+
return json.dumps({
|
| 337 |
+
"status": "error",
|
| 338 |
+
"error": f"[{llm_provider}] Request failed: {type(e).__name__}: {e}",
|
| 339 |
+
}, indent=4)
|
| 340 |
+
|
| 341 |
+
# -- Handle non-200 HTTP responses --------------------------------
|
| 342 |
+
if response.status_code == 401:
|
| 343 |
+
return json.dumps({
|
| 344 |
+
"status": "error",
|
| 345 |
+
"error": f"[{llm_provider}] API key is invalid or unauthorized (HTTP 401).",
|
| 346 |
+
}, indent=4)
|
| 347 |
+
|
| 348 |
+
if response.status_code == 429:
|
| 349 |
+
return json.dumps({
|
| 350 |
+
"status": "error",
|
| 351 |
+
"error": f"[{llm_provider}] Rate limit exceeded (HTTP 429). Wait and retry.",
|
| 352 |
+
}, indent=4)
|
| 353 |
+
|
| 354 |
+
if response.status_code != 200:
|
| 355 |
+
body_preview = response.text[:300] if response.text else "(empty body)"
|
| 356 |
+
return json.dumps({
|
| 357 |
+
"status": "error",
|
| 358 |
+
"error": (
|
| 359 |
+
f"[{llm_provider}] HTTP {response.status_code}. "
|
| 360 |
+
f"Body preview: {body_preview}"
|
| 361 |
+
),
|
| 362 |
+
}, indent=4)
|
| 363 |
+
|
| 364 |
+
# -- Parse the response body -------------------------------------
|
| 365 |
+
try:
|
| 366 |
+
response_data = response.json()
|
| 367 |
+
except (ValueError, json.JSONDecodeError) as e:
|
| 368 |
+
return json.dumps({
|
| 369 |
+
"status": "error",
|
| 370 |
+
"error": f"[{llm_provider}] Returned a non-JSON body: {e}",
|
| 371 |
+
}, indent=4)
|
| 372 |
+
|
| 373 |
+
# -- Extract text using provider-specific response shape ---------
|
| 374 |
+
try:
|
| 375 |
+
if llm_provider == "anthropic":
|
| 376 |
+
# {"content": [{"type": "text", "text": "..."}]}
|
| 377 |
+
content_blocks = response_data.get("content", [])
|
| 378 |
+
explanation = "\n".join(
|
| 379 |
+
block["text"]
|
| 380 |
+
for block in content_blocks
|
| 381 |
+
if isinstance(block, dict) and block.get("type") == "text"
|
| 382 |
+
).strip()
|
| 383 |
+
|
| 384 |
+
elif llm_provider == "openai":
|
| 385 |
+
# {"choices": [{"message": {"role": "assistant", "content": "..."}}]}
|
| 386 |
+
choices = response_data.get("choices", [])
|
| 387 |
+
if not choices:
|
| 388 |
+
return json.dumps({
|
| 389 |
+
"status": "error",
|
| 390 |
+
"error": "[openai] Response contained no choices.",
|
| 391 |
+
}, indent=4)
|
| 392 |
+
explanation = choices[0].get("message", {}).get("content", "").strip()
|
| 393 |
+
|
| 394 |
+
elif llm_provider == "google":
|
| 395 |
+
# {"candidates": [{"content": {"parts": [{"text": "..."}]}}]}
|
| 396 |
+
candidates = response_data.get("candidates", [])
|
| 397 |
+
if not candidates:
|
| 398 |
+
return json.dumps({
|
| 399 |
+
"status": "error",
|
| 400 |
+
"error": "[google] Response contained no candidates.",
|
| 401 |
+
}, indent=4)
|
| 402 |
+
parts = candidates[0].get("content", {}).get("parts", [])
|
| 403 |
+
explanation = "\n".join(
|
| 404 |
+
p["text"] for p in parts if isinstance(p, dict) and "text" in p
|
| 405 |
+
).strip()
|
| 406 |
+
|
| 407 |
+
elif llm_provider == "groq":
|
| 408 |
+
# {"choices": [{"message": {"role": "assistant", "content": "..."}}]}
|
| 409 |
+
choices = response_data.get("choices", [])
|
| 410 |
+
if not choices:
|
| 411 |
+
return json.dumps({
|
| 412 |
+
"status": "error",
|
| 413 |
+
"error": "[groq] Response contained no choices.",
|
| 414 |
+
}, indent=4)
|
| 415 |
+
explanation = choices[0].get("message", {}).get("content", "").strip()
|
| 416 |
+
|
| 417 |
+
except (KeyError, IndexError, TypeError) as e:
|
| 418 |
+
return json.dumps({
|
| 419 |
+
"status": "error",
|
| 420 |
+
"error": f"[{llm_provider}] Unexpected response structure: {e}",
|
| 421 |
+
}, indent=4)
|
| 422 |
+
|
| 423 |
+
if not explanation:
|
| 424 |
+
return json.dumps({
|
| 425 |
+
"status": "error",
|
| 426 |
+
"error": f"[{llm_provider}] Response contained no text.",
|
| 427 |
+
}, indent=4)
|
| 428 |
+
|
| 429 |
+
return explanation
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
# Full pipeline β single entry point
|
| 434 |
+
|
| 435 |
+
def run(
|
| 436 |
+
self,
|
| 437 |
+
user_input: str,
|
| 438 |
+
input_type: str,
|
| 439 |
+
llm_api_key: str = None,
|
| 440 |
+
llm_provider: str = "anthropic",
|
| 441 |
+
) -> str:
|
| 442 |
+
raw_aggregated: str | None = None
|
| 443 |
+
article_data = None
|
| 444 |
+
collected_data = None
|
| 445 |
+
text_for_keywords = None
|
| 446 |
+
|
| 447 |
+
# -- Step 1: Validate key_provider --------------------------------
|
| 448 |
+
if self.key_provider not in _SUPPORTED_PROVIDERS:
|
| 449 |
+
raw_aggregated = json.dumps({
|
| 450 |
+
"status": "error",
|
| 451 |
+
"error": (
|
| 452 |
+
f"Unsupported key provider: '{self.key_provider}'. "
|
| 453 |
+
f"Supported providers are: {', '.join(_SUPPORTED_PROVIDERS)}."
|
| 454 |
+
),
|
| 455 |
+
}, indent=4)
|
| 456 |
+
|
| 457 |
+
# -- Step 2: Fetch article / validate query -----------------------
|
| 458 |
+
if raw_aggregated is None:
|
| 459 |
+
if input_type == "article_link":
|
| 460 |
+
article_data = get_user_article(user_input)
|
| 461 |
+
print(f"[Pipeline] Article fetch status: {article_data.get('status')}")
|
| 462 |
+
if article_data.get("status") != "success":
|
| 463 |
+
raw_aggregated = json.dumps(article_data, indent=4)
|
| 464 |
+
else:
|
| 465 |
+
text_for_keywords = article_data["article"]
|
| 466 |
+
else:
|
| 467 |
+
claims_preview = self.conflict_detector.split_into_claims(user_input)
|
| 468 |
+
if not claims_preview:
|
| 469 |
+
raw_aggregated = json.dumps({
|
| 470 |
+
"status": "error",
|
| 471 |
+
"error": (
|
| 472 |
+
"Input text is too short for conflict detection. "
|
| 473 |
+
"Provide at least one complete sentence (6+ words) so the "
|
| 474 |
+
"system can extract claims to compare against retrieved articles."
|
| 475 |
+
),
|
| 476 |
+
}, indent=4)
|
| 477 |
+
else:
|
| 478 |
+
article_data = user_input
|
| 479 |
+
text_for_keywords = user_input
|
| 480 |
+
|
| 481 |
+
# -- Step 3: Extract keywords -------------------------------------
|
| 482 |
+
if raw_aggregated is None:
|
| 483 |
+
keywords = self.extract_keywords(input_type, text_for_keywords)
|
| 484 |
+
print(f"[Pipeline] Keywords: {keywords}")
|
| 485 |
+
|
| 486 |
+
# -- Step 4: Collect links ----------------------------------------
|
| 487 |
+
if raw_aggregated is None:
|
| 488 |
+
links = self.collect_links(keywords)
|
| 489 |
+
print(f"[Pipeline] Collected {len(links)} links")
|
| 490 |
+
if not links:
|
| 491 |
+
raw_aggregated = json.dumps({
|
| 492 |
+
"status": "error",
|
| 493 |
+
"error": (
|
| 494 |
+
"No search result links were collected. "
|
| 495 |
+
"Possible causes: API key is invalid or rate-limited, "
|
| 496 |
+
"all extracted keywords were too short, or a network failure occurred."
|
| 497 |
+
),
|
| 498 |
+
}, indent=4)
|
| 499 |
+
|
| 500 |
+
# -- Step 5: Retrieve and rank articles ---------------------------
|
| 501 |
+
if raw_aggregated is None:
|
| 502 |
+
retrieval_result = self.retrieve_online_data(text_for_keywords, links)
|
| 503 |
+
print(f"[Pipeline] Retrieval status: {retrieval_result.get('status')}")
|
| 504 |
+
if retrieval_result.get("status") != "success":
|
| 505 |
+
raw_aggregated = json.dumps(retrieval_result, indent=4)
|
| 506 |
+
else:
|
| 507 |
+
collected_data = retrieval_result["results"]
|
| 508 |
+
print(f"[Pipeline] Retrieved {len(collected_data)} results")
|
| 509 |
+
|
| 510 |
+
# -- Step 6: Conflict detection -----------------------------------
|
| 511 |
+
if raw_aggregated is None:
|
| 512 |
+
raw_aggregated = self.detect_conflicts(article_data, collected_data)
|
| 513 |
+
|
| 514 |
+
# -- Step 7: Build AI-readable summary ----------------------------
|
| 515 |
+
|
| 516 |
+
fetched_article_text = (
|
| 517 |
+
article_data.get("article")
|
| 518 |
+
if isinstance(article_data, dict) and input_type == "article_link"
|
| 519 |
+
else None
|
| 520 |
+
)
|
| 521 |
+
final_structure = self.final_explanation(user_input, input_type, raw_aggregated, fetched_article_text)
|
| 522 |
+
|
| 523 |
+
# -- Step 8: Generate AI explanation (optional) -------------------
|
| 524 |
+
if llm_api_key:
|
| 525 |
+
return self.get_response(final_structure, llm_api_key, llm_provider)
|
| 526 |
+
|
| 527 |
+
return json.dumps(final_structure, indent=4)
|
querygeneration.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from keybert import KeyBERT
|
| 2 |
+
from transformers import logging
|
| 3 |
+
|
| 4 |
+
logging.set_verbosity_error()
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class KeywordExtractionError(Exception):
|
| 8 |
+
"""Raised when keyword extraction fails and no fallback is possible."""
|
| 9 |
+
pass
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class KeywordExtractor:
|
| 13 |
+
def __init__(self, model_name: str = "all-MiniLM-L6-v2"):
|
| 14 |
+
try:
|
| 15 |
+
self.kw_model = KeyBERT(model=model_name)
|
| 16 |
+
except Exception as e:
|
| 17 |
+
raise KeywordExtractionError(
|
| 18 |
+
f"Failed to load KeyBERT model '{model_name}': {e}"
|
| 19 |
+
) from e
|
| 20 |
+
|
| 21 |
+
def extract(
|
| 22 |
+
self,
|
| 23 |
+
text: str,
|
| 24 |
+
num_keywords: int = 3,
|
| 25 |
+
ngram_range: tuple = (1, 2),
|
| 26 |
+
) -> list[str]:
|
| 27 |
+
"""
|
| 28 |
+
Extract keywords from text.
|
| 29 |
+
Returns a list of keyword strings.
|
| 30 |
+
Raises KeywordExtractionError if extraction fails completely.
|
| 31 |
+
"""
|
| 32 |
+
if not isinstance(text, str) or not text.strip():
|
| 33 |
+
raise ValueError("Input text must be a non-empty string.")
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
keywords = self.kw_model.extract_keywords(
|
| 37 |
+
text,
|
| 38 |
+
keyphrase_ngram_range=ngram_range,
|
| 39 |
+
stop_words="english",
|
| 40 |
+
top_n=num_keywords,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
# extract_keywords returns list of (keyword, score) tuples
|
| 44 |
+
result = [kw[0] for kw in keywords if kw]
|
| 45 |
+
|
| 46 |
+
if not result:
|
| 47 |
+
raise KeywordExtractionError("Model returned no keywords for the given text.")
|
| 48 |
+
|
| 49 |
+
return result
|
| 50 |
+
|
| 51 |
+
except KeywordExtractionError:
|
| 52 |
+
raise # let it bubble up cleanly
|
| 53 |
+
|
| 54 |
+
except Exception as e:
|
| 55 |
+
raise KeywordExtractionError(f"Unexpected error during keyword extraction: {e}") from e
|
requirements.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
numpy
|
| 2 |
+
newspaper3k
|
| 3 |
+
trafilatura
|
| 4 |
+
torch
|
| 5 |
+
transformers
|
| 6 |
+
sentence-transformers
|
| 7 |
+
scikit-learn
|
| 8 |
+
keybert
|
| 9 |
+
tldextract
|
| 10 |
+
spacy
|
| 11 |
+
langdetect
|
| 12 |
+
fastapi
|
| 13 |
+
uvicorn
|
| 14 |
+
Jinja2
|
| 15 |
+
pydantic
|
| 16 |
+
html_clean
|
| 17 |
+
lxml_html_clean
|
| 18 |
+
aiohttp
|
| 19 |
+
google-generativeai
|
| 20 |
+
groq
|
similarity.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from sentence_transformers import SentenceTransformer
|
| 4 |
+
from transformers import logging
|
| 5 |
+
|
| 6 |
+
logging.set_verbosity_error()
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class SimilarityModelError(Exception):
|
| 10 |
+
"""Raised when the similarity model fails to load or run."""
|
| 11 |
+
pass
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ModelFunctions:
|
| 15 |
+
def __init__(self, model_name: str = "all-mpnet-base-v2"):
|
| 16 |
+
try:
|
| 17 |
+
self.sbert_model = SentenceTransformer(model_name)
|
| 18 |
+
except Exception as e:
|
| 19 |
+
raise SimilarityModelError(
|
| 20 |
+
f"Failed to load SentenceTransformer model '{model_name}': {e}"
|
| 21 |
+
) from e
|
| 22 |
+
|
| 23 |
+
# ------------------------------------------------------------------
|
| 24 |
+
# Single pair comparison
|
| 25 |
+
# ------------------------------------------------------------------
|
| 26 |
+
def SimilarityScore(self, sent1: str, sent2: str) -> float:
|
| 27 |
+
"""
|
| 28 |
+
Returns cosine similarity between two sentences (0.0 to 1.0).
|
| 29 |
+
Returns 0.0 on failure instead of crashing β safe to use in loops.
|
| 30 |
+
"""
|
| 31 |
+
if not isinstance(sent1, str) or not isinstance(sent2, str):
|
| 32 |
+
print("[ModelFunctions] SimilarityScore: both inputs must be strings.")
|
| 33 |
+
return 0.0
|
| 34 |
+
|
| 35 |
+
if not sent1.strip() or not sent2.strip():
|
| 36 |
+
print("[ModelFunctions] SimilarityScore: received empty string input.")
|
| 37 |
+
return 0.0
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
embeddings = self.sbert_model.encode(
|
| 41 |
+
[sent1, sent2],
|
| 42 |
+
convert_to_tensor=True,
|
| 43 |
+
batch_size=32,
|
| 44 |
+
)
|
| 45 |
+
emb1, emb2 = embeddings
|
| 46 |
+
score = F.cosine_similarity(emb1, emb2, dim=0)
|
| 47 |
+
return round(score.item(), 4)
|
| 48 |
+
|
| 49 |
+
except Exception as e:
|
| 50 |
+
print(f"[ModelFunctions] SimilarityScore failed: {e}")
|
| 51 |
+
return 0.0
|
| 52 |
+
|
| 53 |
+
# ------------------------------------------------------------------
|
| 54 |
+
# Batch comparison β one forward pass for all candidates
|
| 55 |
+
# ------------------------------------------------------------------
|
| 56 |
+
def BatchSimilarityScores(self, original: str, candidates: list[str]) -> list[float]:
|
| 57 |
+
"""
|
| 58 |
+
Compares `original` against every string in `candidates` in a
|
| 59 |
+
single encoding pass. Returns a list of scores in the same order.
|
| 60 |
+
|
| 61 |
+
Why this is faster than calling SimilarityScore() N times:
|
| 62 |
+
SentenceTransformer.encode() has per-call overhead (tokenization,
|
| 63 |
+
GPU dispatch). One call with N+1 sentences amortises that cost once.
|
| 64 |
+
"""
|
| 65 |
+
if not isinstance(original, str) or not original.strip():
|
| 66 |
+
print("[ModelFunctions] BatchSimilarityScores: original must be a non-empty string.")
|
| 67 |
+
return [0.0] * len(candidates)
|
| 68 |
+
|
| 69 |
+
if not candidates:
|
| 70 |
+
return []
|
| 71 |
+
|
| 72 |
+
# Filter out any non-string or empty entries, track their positions
|
| 73 |
+
valid_candidates = []
|
| 74 |
+
index_map = [] # maps valid index β original index
|
| 75 |
+
|
| 76 |
+
for i, candidate in enumerate(candidates):
|
| 77 |
+
if isinstance(candidate, str) and candidate.strip():
|
| 78 |
+
valid_candidates.append(candidate)
|
| 79 |
+
index_map.append(i)
|
| 80 |
+
else:
|
| 81 |
+
print(f"[ModelFunctions] Skipping invalid candidate at index {i}.")
|
| 82 |
+
|
| 83 |
+
if not valid_candidates:
|
| 84 |
+
print("[ModelFunctions] No valid candidates to compare against.")
|
| 85 |
+
return [0.0] * len(candidates)
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
all_texts = [original] + valid_candidates
|
| 89 |
+
|
| 90 |
+
embeddings = self.sbert_model.encode(
|
| 91 |
+
all_texts,
|
| 92 |
+
convert_to_tensor=True,
|
| 93 |
+
batch_size=64,
|
| 94 |
+
show_progress_bar=False,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
orig_emb = embeddings[0] # shape: (hidden,)
|
| 98 |
+
cand_embs = embeddings[1:] # shape: (N, hidden)
|
| 99 |
+
|
| 100 |
+
scores = F.cosine_similarity(
|
| 101 |
+
orig_emb.unsqueeze(0).expand_as(cand_embs),
|
| 102 |
+
cand_embs,
|
| 103 |
+
dim=1,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
# Rebuild full result list with 0.0 for skipped entries
|
| 107 |
+
result = [0.0] * len(candidates)
|
| 108 |
+
for valid_idx, original_idx in enumerate(index_map):
|
| 109 |
+
result[original_idx] = round(scores[valid_idx].item(), 4)
|
| 110 |
+
|
| 111 |
+
return result
|
| 112 |
+
|
| 113 |
+
except Exception as e:
|
| 114 |
+
print(f"[ModelFunctions] BatchSimilarityScores failed: {e}")
|
| 115 |
+
return [0.0] * len(candidates)
|
static/looks.css
ADDED
|
@@ -0,0 +1,1075 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* =========================================
|
| 2 |
+
FactCheck AI β Design System
|
| 3 |
+
Aesthetic: Dark Editorial / Intelligence Briefing
|
| 4 |
+
========================================= */
|
| 5 |
+
|
| 6 |
+
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
| 7 |
+
|
| 8 |
+
:root {
|
| 9 |
+
--bg-deep: #0b0d0f;
|
| 10 |
+
--bg-surface: #111418;
|
| 11 |
+
--bg-elevated: #181d23;
|
| 12 |
+
--bg-hover: #1e242c;
|
| 13 |
+
--border: rgba(255,255,255,0.07);
|
| 14 |
+
--border-bright: rgba(255,255,255,0.14);
|
| 15 |
+
|
| 16 |
+
--text-primary: #e8e6e1;
|
| 17 |
+
--text-secondary: #7a8898;
|
| 18 |
+
--text-muted: #4a5568;
|
| 19 |
+
|
| 20 |
+
--accent: #e8c547;
|
| 21 |
+
--accent-dim: rgba(232,197,71,0.12);
|
| 22 |
+
--accent-glow: rgba(232,197,71,0.25);
|
| 23 |
+
|
| 24 |
+
--green: #3ecf8e;
|
| 25 |
+
--red: #e85d4a;
|
| 26 |
+
--blue: #5b9cf6;
|
| 27 |
+
|
| 28 |
+
--font-display: 'Playfair Display', Georgia, serif;
|
| 29 |
+
--font-mono: 'IBM Plex Mono', 'Courier New', monospace;
|
| 30 |
+
--font-body: 'IBM Plex Sans', system-ui, sans-serif;
|
| 31 |
+
|
| 32 |
+
--radius-sm: 4px;
|
| 33 |
+
--radius-md: 8px;
|
| 34 |
+
--radius-lg: 14px;
|
| 35 |
+
|
| 36 |
+
--sidebar-w: 230px;
|
| 37 |
+
--transition: 0.18s ease;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
html, body {
|
| 41 |
+
height: 100%;
|
| 42 |
+
background: var(--bg-deep);
|
| 43 |
+
color: var(--text-primary);
|
| 44 |
+
font-family: var(--font-body);
|
| 45 |
+
font-size: 15px;
|
| 46 |
+
line-height: 1.6;
|
| 47 |
+
-webkit-font-smoothing: antialiased;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
/* ββ Background ββ */
|
| 51 |
+
.bg-grid {
|
| 52 |
+
position: fixed; inset: 0; z-index: -1; pointer-events: none;
|
| 53 |
+
background-image:
|
| 54 |
+
linear-gradient(rgba(255,255,255,0.025) 1px, transparent 1px),
|
| 55 |
+
linear-gradient(90deg, rgba(255,255,255,0.025) 1px, transparent 1px);
|
| 56 |
+
background-size: 48px 48px;
|
| 57 |
+
mask-image: radial-gradient(ellipse 80% 80% at 50% 0%, black 40%, transparent 100%);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
.bg-noise {
|
| 61 |
+
position: fixed; inset: 0; z-index: -1; pointer-events: none; opacity: 0.025;
|
| 62 |
+
background-image: url("data:image/svg+xml,%3Csvg viewBox='0 0 256 256' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='noise'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='0.9' numOctaves='4' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23noise)'/%3E%3C/svg%3E");
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
/* ββ Layout ββ */
|
| 66 |
+
.app-wrapper {
|
| 67 |
+
position: relative;
|
| 68 |
+
display: flex;
|
| 69 |
+
height: 100dvh;
|
| 70 |
+
max-width: 1400px;
|
| 71 |
+
margin: 0 auto;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
/* =========================================
|
| 75 |
+
SIDEBAR
|
| 76 |
+
========================================= */
|
| 77 |
+
.sidebar {
|
| 78 |
+
width: var(--sidebar-w);
|
| 79 |
+
flex-shrink: 0;
|
| 80 |
+
background: var(--bg-surface);
|
| 81 |
+
border-right: 1px solid var(--border);
|
| 82 |
+
display: flex;
|
| 83 |
+
flex-direction: column;
|
| 84 |
+
padding: 24px 16px;
|
| 85 |
+
gap: 0;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
.sidebar-brand {
|
| 89 |
+
display: flex;
|
| 90 |
+
align-items: center;
|
| 91 |
+
gap: 10px;
|
| 92 |
+
padding: 0 6px 20px;
|
| 93 |
+
border-bottom: 1px solid var(--border);
|
| 94 |
+
margin-bottom: 20px;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
.brand-mark {
|
| 98 |
+
font-size: 22px;
|
| 99 |
+
color: var(--accent);
|
| 100 |
+
line-height: 1;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
.brand-name {
|
| 104 |
+
font-family: var(--font-mono);
|
| 105 |
+
font-size: 13px;
|
| 106 |
+
font-weight: 600;
|
| 107 |
+
letter-spacing: 0.03em;
|
| 108 |
+
color: var(--text-primary);
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
.brand-name em { font-style: normal; color: var(--accent); }
|
| 112 |
+
|
| 113 |
+
.sidebar-info {
|
| 114 |
+
margin-bottom: 24px;
|
| 115 |
+
padding: 0 4px;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
.sidebar-desc {
|
| 119 |
+
font-size: 13px;
|
| 120 |
+
color: var(--text-secondary);
|
| 121 |
+
line-height: 1.65;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
/* How it works tips */
|
| 125 |
+
.sidebar-tips {
|
| 126 |
+
display: flex;
|
| 127 |
+
flex-direction: column;
|
| 128 |
+
gap: 12px;
|
| 129 |
+
padding: 16px 12px;
|
| 130 |
+
background: var(--bg-elevated);
|
| 131 |
+
border: 1px solid var(--border);
|
| 132 |
+
border-radius: var(--radius-md);
|
| 133 |
+
margin-bottom: 20px;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
.tips-label {
|
| 137 |
+
font-family: var(--font-mono);
|
| 138 |
+
font-size: 10px;
|
| 139 |
+
letter-spacing: 0.12em;
|
| 140 |
+
text-transform: uppercase;
|
| 141 |
+
color: var(--text-muted);
|
| 142 |
+
display: block;
|
| 143 |
+
margin-bottom: 2px;
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
.tip-item {
|
| 147 |
+
display: flex;
|
| 148 |
+
gap: 10px;
|
| 149 |
+
align-items: flex-start;
|
| 150 |
+
font-size: 12.5px;
|
| 151 |
+
color: var(--text-secondary);
|
| 152 |
+
line-height: 1.5;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
.tip-icon { font-size: 13px; flex-shrink: 0; margin-top: 1px; }
|
| 156 |
+
.tip-item strong { color: var(--text-primary); font-weight: 500; }
|
| 157 |
+
|
| 158 |
+
/* Bottom */
|
| 159 |
+
.sidebar-status {
|
| 160 |
+
display: flex;
|
| 161 |
+
align-items: center;
|
| 162 |
+
gap: 8px;
|
| 163 |
+
padding: 10px 12px;
|
| 164 |
+
margin-top: auto;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
.status-dot {
|
| 168 |
+
width: 7px; height: 7px;
|
| 169 |
+
border-radius: 50%;
|
| 170 |
+
background: var(--red);
|
| 171 |
+
box-shadow: 0 0 8px var(--red);
|
| 172 |
+
animation: pulse-dot 2.5s ease-in-out infinite;
|
| 173 |
+
transition: background 0.3s ease, box-shadow 0.3s ease;
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
@keyframes pulse-dot {
|
| 177 |
+
0%, 100% { opacity: 1; }
|
| 178 |
+
50% { opacity: 0.4; }
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
.status-text {
|
| 182 |
+
font-family: var(--font-mono);
|
| 183 |
+
font-size: 11px;
|
| 184 |
+
color: var(--text-muted);
|
| 185 |
+
letter-spacing: 0.04em;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
.sidebar-link {
|
| 189 |
+
display: flex;
|
| 190 |
+
align-items: center;
|
| 191 |
+
gap: 8px;
|
| 192 |
+
padding: 8px 12px;
|
| 193 |
+
border-radius: var(--radius-md);
|
| 194 |
+
color: var(--text-muted);
|
| 195 |
+
text-decoration: none;
|
| 196 |
+
font-size: 12px;
|
| 197 |
+
font-family: var(--font-mono);
|
| 198 |
+
transition: color var(--transition);
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
.sidebar-link:hover { color: var(--text-secondary); }
|
| 202 |
+
|
| 203 |
+
/* =========================================
|
| 204 |
+
CHAT AREA
|
| 205 |
+
========================================= */
|
| 206 |
+
.chat-area {
|
| 207 |
+
flex: 1;
|
| 208 |
+
display: flex;
|
| 209 |
+
flex-direction: column;
|
| 210 |
+
overflow: hidden;
|
| 211 |
+
background: var(--bg-deep);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
.chat-header {
|
| 215 |
+
display: flex;
|
| 216 |
+
align-items: center;
|
| 217 |
+
justify-content: space-between;
|
| 218 |
+
padding: 16px 28px;
|
| 219 |
+
border-bottom: 1px solid var(--border);
|
| 220 |
+
background: rgba(11,13,15,0.8);
|
| 221 |
+
backdrop-filter: blur(12px);
|
| 222 |
+
flex-shrink: 0;
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
.header-left { display: flex; align-items: baseline; gap: 14px; }
|
| 226 |
+
|
| 227 |
+
.header-title {
|
| 228 |
+
font-family: var(--font-display);
|
| 229 |
+
font-size: 20px;
|
| 230 |
+
font-weight: 700;
|
| 231 |
+
color: var(--text-primary);
|
| 232 |
+
letter-spacing: -0.01em;
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
.header-session {
|
| 236 |
+
font-family: var(--font-mono);
|
| 237 |
+
font-size: 11px;
|
| 238 |
+
color: var(--text-muted);
|
| 239 |
+
letter-spacing: 0.06em;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
.clear-btn {
|
| 243 |
+
display: flex;
|
| 244 |
+
align-items: center;
|
| 245 |
+
gap: 6px;
|
| 246 |
+
padding: 6px 12px;
|
| 247 |
+
border: 1px solid var(--border);
|
| 248 |
+
border-radius: var(--radius-sm);
|
| 249 |
+
background: transparent;
|
| 250 |
+
color: var(--text-muted);
|
| 251 |
+
font-size: 12px;
|
| 252 |
+
font-family: var(--font-mono);
|
| 253 |
+
cursor: pointer;
|
| 254 |
+
transition: all var(--transition);
|
| 255 |
+
letter-spacing: 0.04em;
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
.clear-btn:hover {
|
| 259 |
+
border-color: var(--red);
|
| 260 |
+
color: var(--red);
|
| 261 |
+
background: rgba(232,93,74,0.08);
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
/* ββ Messages ββ */
|
| 265 |
+
.messages-container {
|
| 266 |
+
flex: 1;
|
| 267 |
+
overflow-y: auto;
|
| 268 |
+
padding: 32px 28px;
|
| 269 |
+
display: flex;
|
| 270 |
+
flex-direction: column;
|
| 271 |
+
gap: 24px;
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
.messages-container::-webkit-scrollbar { width: 4px; }
|
| 275 |
+
.messages-container::-webkit-scrollbar-track { background: transparent; }
|
| 276 |
+
.messages-container::-webkit-scrollbar-thumb { background: var(--border-bright); border-radius: 2px; }
|
| 277 |
+
|
| 278 |
+
.message {
|
| 279 |
+
max-width: 760px;
|
| 280 |
+
opacity: 0;
|
| 281 |
+
transform: translateY(8px);
|
| 282 |
+
transition: opacity 0.25s ease, transform 0.25s ease;
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
.message.visible,
|
| 286 |
+
.intro-message { opacity: 1; transform: translateY(0); }
|
| 287 |
+
|
| 288 |
+
.fade-out {
|
| 289 |
+
opacity: 0 !important;
|
| 290 |
+
transform: translateY(-6px) !important;
|
| 291 |
+
transition: opacity 0.25s ease, transform 0.25s ease !important;
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
.message-meta {
|
| 295 |
+
display: flex;
|
| 296 |
+
align-items: center;
|
| 297 |
+
gap: 10px;
|
| 298 |
+
margin-bottom: 6px;
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
.msg-sender {
|
| 302 |
+
font-family: var(--font-mono);
|
| 303 |
+
font-size: 11px;
|
| 304 |
+
font-weight: 600;
|
| 305 |
+
letter-spacing: 0.08em;
|
| 306 |
+
text-transform: uppercase;
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
.msg-time {
|
| 310 |
+
font-family: var(--font-mono);
|
| 311 |
+
font-size: 10px;
|
| 312 |
+
color: var(--text-muted);
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
.message-body {
|
| 316 |
+
padding: 16px 20px;
|
| 317 |
+
border-radius: var(--radius-lg);
|
| 318 |
+
border: 1px solid var(--border);
|
| 319 |
+
line-height: 1.7;
|
| 320 |
+
font-size: 14.5px;
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
.message-body p + p { margin-top: 10px; }
|
| 324 |
+
|
| 325 |
+
.bot-message { align-self: flex-start; }
|
| 326 |
+
.bot-message .msg-sender { color: var(--accent); }
|
| 327 |
+
.bot-message .message-body {
|
| 328 |
+
background: var(--bg-elevated);
|
| 329 |
+
border-color: var(--border-bright);
|
| 330 |
+
color: var(--text-primary);
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
.user-message { align-self: flex-end; }
|
| 334 |
+
.user-message .msg-sender { color: var(--blue); }
|
| 335 |
+
.user-message .message-body {
|
| 336 |
+
background: rgba(91,156,246,0.08);
|
| 337 |
+
border-color: rgba(91,156,246,0.2);
|
| 338 |
+
color: var(--text-primary);
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
.error-message .message-body {
|
| 342 |
+
background: rgba(232,93,74,0.07);
|
| 343 |
+
border-color: rgba(232,93,74,0.25);
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
/* No-key message β amber tint to signal action needed */
|
| 347 |
+
.no-key-message .message-body {
|
| 348 |
+
background: var(--accent-dim);
|
| 349 |
+
border-color: rgba(232,197,71,0.22);
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
/* ββ "Enter API Key" inline button ββ */
|
| 353 |
+
.enter-key-btn {
|
| 354 |
+
display: inline-flex;
|
| 355 |
+
align-items: center;
|
| 356 |
+
gap: 7px;
|
| 357 |
+
margin-top: 14px;
|
| 358 |
+
padding: 8px 16px;
|
| 359 |
+
background: var(--accent);
|
| 360 |
+
color: var(--bg-deep);
|
| 361 |
+
border: none;
|
| 362 |
+
border-radius: var(--radius-md);
|
| 363 |
+
font-family: var(--font-mono);
|
| 364 |
+
font-size: 12px;
|
| 365 |
+
font-weight: 600;
|
| 366 |
+
letter-spacing: 0.04em;
|
| 367 |
+
cursor: pointer;
|
| 368 |
+
transition: all var(--transition);
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
.enter-key-btn:hover {
|
| 372 |
+
background: #f0d060;
|
| 373 |
+
transform: translateY(-1px);
|
| 374 |
+
box-shadow: 0 4px 14px var(--accent-glow);
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
.enter-key-btn:active { transform: translateY(0); }
|
| 378 |
+
|
| 379 |
+
/* Loading */
|
| 380 |
+
.loading-message .message-body {
|
| 381 |
+
display: flex;
|
| 382 |
+
align-items: center;
|
| 383 |
+
gap: 12px;
|
| 384 |
+
padding: 14px 20px;
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
.typing-indicator { display: flex; align-items: center; gap: 5px; }
|
| 388 |
+
|
| 389 |
+
.typing-indicator span {
|
| 390 |
+
display: block;
|
| 391 |
+
width: 6px; height: 6px;
|
| 392 |
+
border-radius: 50%;
|
| 393 |
+
background: var(--accent);
|
| 394 |
+
animation: bounce-dot 1.2s ease-in-out infinite;
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
.typing-indicator span:nth-child(2) { animation-delay: 0.15s; }
|
| 398 |
+
.typing-indicator span:nth-child(3) { animation-delay: 0.3s; }
|
| 399 |
+
|
| 400 |
+
@keyframes bounce-dot {
|
| 401 |
+
0%, 80%, 100% { transform: scale(0.7); opacity: 0.4; }
|
| 402 |
+
40% { transform: scale(1); opacity: 1; }
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
.loading-text {
|
| 406 |
+
font-family: var(--font-mono);
|
| 407 |
+
font-size: 12px;
|
| 408 |
+
color: var(--text-muted);
|
| 409 |
+
letter-spacing: 0.06em;
|
| 410 |
+
animation: flicker 1.5s ease-in-out infinite;
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
@keyframes flicker {
|
| 414 |
+
0%, 100% { opacity: 1; }
|
| 415 |
+
50% { opacity: 0.4; }
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
/* =========================================
|
| 419 |
+
INPUT AREA
|
| 420 |
+
========================================= */
|
| 421 |
+
.input-area {
|
| 422 |
+
padding: 16px 28px 20px;
|
| 423 |
+
border-top: 1px solid var(--border);
|
| 424 |
+
background: rgba(11,13,15,0.88);
|
| 425 |
+
backdrop-filter: blur(12px);
|
| 426 |
+
flex-shrink: 0;
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
/* ββ The main input box ββ */
|
| 430 |
+
.input-form {
|
| 431 |
+
display: flex;
|
| 432 |
+
align-items: center;
|
| 433 |
+
gap: 0;
|
| 434 |
+
background: var(--bg-elevated);
|
| 435 |
+
border: 1px solid var(--border-bright);
|
| 436 |
+
border-radius: var(--radius-lg);
|
| 437 |
+
padding: 8px 10px 8px 10px;
|
| 438 |
+
transition: border-color var(--transition), box-shadow var(--transition);
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
.input-form:focus-within {
|
| 442 |
+
border-color: rgba(232,197,71,0.35);
|
| 443 |
+
box-shadow: 0 0 0 3px var(--accent-glow);
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
/* ββ Mode pill toggle (inside input box) ββ */
|
| 447 |
+
.mode-pill {
|
| 448 |
+
display: flex;
|
| 449 |
+
align-items: center;
|
| 450 |
+
gap: 2px;
|
| 451 |
+
background: var(--bg-hover);
|
| 452 |
+
border: 1px solid var(--border);
|
| 453 |
+
border-radius: 8px;
|
| 454 |
+
padding: 3px;
|
| 455 |
+
flex-shrink: 0;
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
.pill-btn {
|
| 459 |
+
display: flex;
|
| 460 |
+
align-items: center;
|
| 461 |
+
gap: 5px;
|
| 462 |
+
padding: 5px 10px;
|
| 463 |
+
border: none;
|
| 464 |
+
border-radius: 6px;
|
| 465 |
+
background: transparent;
|
| 466 |
+
color: var(--text-muted);
|
| 467 |
+
font-family: var(--font-mono);
|
| 468 |
+
font-size: 11px;
|
| 469 |
+
font-weight: 500;
|
| 470 |
+
letter-spacing: 0.04em;
|
| 471 |
+
cursor: pointer;
|
| 472 |
+
transition: all var(--transition);
|
| 473 |
+
white-space: nowrap;
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
.pill-btn:hover {
|
| 477 |
+
color: var(--text-secondary);
|
| 478 |
+
background: rgba(255,255,255,0.05);
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
.pill-btn.active {
|
| 482 |
+
background: var(--accent);
|
| 483 |
+
color: var(--bg-deep);
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
.pill-btn.active svg { opacity: 1; }
|
| 487 |
+
.pill-btn svg { opacity: 0.6; }
|
| 488 |
+
|
| 489 |
+
/* ββ Vertical divider between pill and textarea ββ */
|
| 490 |
+
.input-divider {
|
| 491 |
+
width: 1px;
|
| 492 |
+
height: 28px;
|
| 493 |
+
background: var(--border-bright);
|
| 494 |
+
margin: 0 12px;
|
| 495 |
+
flex-shrink: 0;
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
.textarea-wrap { flex: 1; }
|
| 499 |
+
|
| 500 |
+
textarea#user-input {
|
| 501 |
+
width: 100%;
|
| 502 |
+
background: transparent;
|
| 503 |
+
border: none;
|
| 504 |
+
outline: none;
|
| 505 |
+
resize: none;
|
| 506 |
+
color: var(--text-primary);
|
| 507 |
+
font-family: var(--font-body);
|
| 508 |
+
font-size: 14.5px;
|
| 509 |
+
line-height: 1.6;
|
| 510 |
+
min-height: 26px;
|
| 511 |
+
max-height: 160px;
|
| 512 |
+
overflow-y: auto;
|
| 513 |
+
caret-color: var(--accent);
|
| 514 |
+
padding: 2px 0;
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
textarea#user-input::placeholder { color: var(--text-muted); }
|
| 518 |
+
|
| 519 |
+
/* ββ Send button ββ */
|
| 520 |
+
#send-btn {
|
| 521 |
+
flex-shrink: 0;
|
| 522 |
+
width: 36px; height: 36px;
|
| 523 |
+
border-radius: var(--radius-md);
|
| 524 |
+
border: none;
|
| 525 |
+
background: var(--accent);
|
| 526 |
+
color: var(--bg-deep);
|
| 527 |
+
display: flex;
|
| 528 |
+
align-items: center;
|
| 529 |
+
justify-content: center;
|
| 530 |
+
cursor: pointer;
|
| 531 |
+
margin-left: 10px;
|
| 532 |
+
transition: all var(--transition);
|
| 533 |
+
}
|
| 534 |
+
|
| 535 |
+
#send-btn:hover {
|
| 536 |
+
background: #f0d060;
|
| 537 |
+
transform: scale(1.06);
|
| 538 |
+
box-shadow: 0 4px 16px var(--accent-glow);
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
#send-btn:active { transform: scale(0.96); }
|
| 542 |
+
|
| 543 |
+
.input-hint {
|
| 544 |
+
margin-top: 8px;
|
| 545 |
+
font-size: 11px;
|
| 546 |
+
font-family: var(--font-mono);
|
| 547 |
+
color: var(--text-muted);
|
| 548 |
+
letter-spacing: 0.03em;
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
kbd {
|
| 552 |
+
display: inline-block;
|
| 553 |
+
padding: 1px 5px;
|
| 554 |
+
background: var(--bg-hover);
|
| 555 |
+
border: 1px solid var(--border-bright);
|
| 556 |
+
border-radius: 3px;
|
| 557 |
+
font-family: var(--font-mono);
|
| 558 |
+
font-size: 10px;
|
| 559 |
+
color: var(--text-secondary);
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
/* =========================================
|
| 563 |
+
API KEY MODAL
|
| 564 |
+
========================================= */
|
| 565 |
+
.modal-overlay {
|
| 566 |
+
position: fixed;
|
| 567 |
+
inset: 0;
|
| 568 |
+
z-index: 200;
|
| 569 |
+
background: rgba(11, 13, 15, 0.82);
|
| 570 |
+
backdrop-filter: blur(10px);
|
| 571 |
+
-webkit-backdrop-filter: blur(10px);
|
| 572 |
+
display: flex;
|
| 573 |
+
align-items: center;
|
| 574 |
+
justify-content: center;
|
| 575 |
+
padding: 24px;
|
| 576 |
+
opacity: 0;
|
| 577 |
+
transition: opacity 0.25s ease;
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
.modal-overlay.visible { opacity: 1; }
|
| 581 |
+
.modal-overlay.hidden { display: none; }
|
| 582 |
+
|
| 583 |
+
.modal-card {
|
| 584 |
+
position: relative;
|
| 585 |
+
background: var(--bg-elevated);
|
| 586 |
+
border: 1px solid var(--border-bright);
|
| 587 |
+
border-radius: var(--radius-lg);
|
| 588 |
+
padding: 32px 30px 28px;
|
| 589 |
+
width: 100%;
|
| 590 |
+
max-width: 500px;
|
| 591 |
+
box-shadow: 0 32px 80px rgba(0,0,0,0.7), 0 0 0 1px rgba(232,197,71,0.06);
|
| 592 |
+
animation: modal-rise 0.28s ease forwards;
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
@keyframes modal-rise {
|
| 596 |
+
from { transform: translateY(12px); opacity: 0; }
|
| 597 |
+
to { transform: translateY(0); opacity: 1; }
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
/* Close button */
|
| 601 |
+
.modal-close {
|
| 602 |
+
position: absolute;
|
| 603 |
+
top: 16px;
|
| 604 |
+
right: 16px;
|
| 605 |
+
width: 28px;
|
| 606 |
+
height: 28px;
|
| 607 |
+
background: var(--bg-hover);
|
| 608 |
+
border: 1px solid var(--border);
|
| 609 |
+
border-radius: var(--radius-sm);
|
| 610 |
+
color: var(--text-muted);
|
| 611 |
+
display: flex;
|
| 612 |
+
align-items: center;
|
| 613 |
+
justify-content: center;
|
| 614 |
+
cursor: pointer;
|
| 615 |
+
transition: all var(--transition);
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
.modal-close:hover {
|
| 619 |
+
border-color: var(--red);
|
| 620 |
+
color: var(--red);
|
| 621 |
+
background: rgba(232,93,74,0.08);
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
/* Header */
|
| 625 |
+
.modal-header {
|
| 626 |
+
display: flex;
|
| 627 |
+
align-items: flex-start;
|
| 628 |
+
gap: 14px;
|
| 629 |
+
margin-bottom: 20px;
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
.modal-icon {
|
| 633 |
+
font-size: 26px;
|
| 634 |
+
line-height: 1;
|
| 635 |
+
margin-top: 2px;
|
| 636 |
+
flex-shrink: 0;
|
| 637 |
+
}
|
| 638 |
+
|
| 639 |
+
.modal-title {
|
| 640 |
+
font-family: var(--font-display);
|
| 641 |
+
font-size: 20px;
|
| 642 |
+
font-weight: 700;
|
| 643 |
+
color: var(--text-primary);
|
| 644 |
+
letter-spacing: -0.01em;
|
| 645 |
+
line-height: 1.2;
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
.modal-subtitle {
|
| 649 |
+
font-family: var(--font-mono);
|
| 650 |
+
font-size: 11px;
|
| 651 |
+
color: var(--text-muted);
|
| 652 |
+
margin-top: 4px;
|
| 653 |
+
letter-spacing: 0.04em;
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
/* Divider */
|
| 657 |
+
.modal-divider {
|
| 658 |
+
height: 1px;
|
| 659 |
+
background: var(--border);
|
| 660 |
+
margin-bottom: 20px;
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
/* Description text */
|
| 664 |
+
.modal-desc {
|
| 665 |
+
font-size: 13.5px;
|
| 666 |
+
color: var(--text-secondary);
|
| 667 |
+
line-height: 1.65;
|
| 668 |
+
margin-bottom: 18px;
|
| 669 |
+
}
|
| 670 |
+
|
| 671 |
+
.modal-desc strong { color: var(--text-primary); font-weight: 500; }
|
| 672 |
+
|
| 673 |
+
.modal-link {
|
| 674 |
+
color: var(--accent);
|
| 675 |
+
text-decoration: none;
|
| 676 |
+
border-bottom: 1px solid rgba(232,197,71,0.3);
|
| 677 |
+
transition: border-color var(--transition);
|
| 678 |
+
}
|
| 679 |
+
|
| 680 |
+
.modal-link:hover { border-color: var(--accent); }
|
| 681 |
+
|
| 682 |
+
/* Input wrapper */
|
| 683 |
+
.modal-input-wrap {
|
| 684 |
+
position: relative;
|
| 685 |
+
margin-bottom: 10px;
|
| 686 |
+
}
|
| 687 |
+
|
| 688 |
+
.modal-input {
|
| 689 |
+
width: 100%;
|
| 690 |
+
background: var(--bg-deep);
|
| 691 |
+
border: 1px solid var(--border-bright);
|
| 692 |
+
border-radius: var(--radius-md);
|
| 693 |
+
padding: 11px 44px 11px 14px;
|
| 694 |
+
color: var(--text-primary);
|
| 695 |
+
font-family: var(--font-mono);
|
| 696 |
+
font-size: 13px;
|
| 697 |
+
letter-spacing: 0.04em;
|
| 698 |
+
outline: none;
|
| 699 |
+
transition: border-color var(--transition), box-shadow var(--transition);
|
| 700 |
+
caret-color: var(--accent);
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
.modal-input:focus {
|
| 704 |
+
border-color: rgba(232,197,71,0.4);
|
| 705 |
+
box-shadow: 0 0 0 3px var(--accent-glow);
|
| 706 |
+
}
|
| 707 |
+
|
| 708 |
+
.modal-input::placeholder { color: var(--text-muted); }
|
| 709 |
+
|
| 710 |
+
/* Toggle visibility button */
|
| 711 |
+
.modal-eye {
|
| 712 |
+
position: absolute;
|
| 713 |
+
right: 12px;
|
| 714 |
+
top: 50%;
|
| 715 |
+
transform: translateY(-50%);
|
| 716 |
+
background: none;
|
| 717 |
+
border: none;
|
| 718 |
+
color: var(--text-muted);
|
| 719 |
+
cursor: pointer;
|
| 720 |
+
padding: 4px;
|
| 721 |
+
display: flex;
|
| 722 |
+
align-items: center;
|
| 723 |
+
transition: color var(--transition);
|
| 724 |
+
}
|
| 725 |
+
|
| 726 |
+
.modal-eye:hover { color: var(--text-secondary); }
|
| 727 |
+
|
| 728 |
+
/* Status message */
|
| 729 |
+
.modal-status {
|
| 730 |
+
font-family: var(--font-mono);
|
| 731 |
+
font-size: 12px;
|
| 732 |
+
letter-spacing: 0.03em;
|
| 733 |
+
line-height: 1.5;
|
| 734 |
+
min-height: 18px;
|
| 735 |
+
margin-bottom: 14px;
|
| 736 |
+
opacity: 0;
|
| 737 |
+
transition: opacity 0.2s ease;
|
| 738 |
+
}
|
| 739 |
+
|
| 740 |
+
.modal-status--visible { opacity: 1; }
|
| 741 |
+
.modal-status--error { color: var(--red); }
|
| 742 |
+
.modal-status--success { color: var(--green); }
|
| 743 |
+
|
| 744 |
+
/* Privacy note */
|
| 745 |
+
.modal-privacy {
|
| 746 |
+
display: flex;
|
| 747 |
+
align-items: flex-start;
|
| 748 |
+
gap: 8px;
|
| 749 |
+
padding: 10px 12px;
|
| 750 |
+
background: var(--bg-deep);
|
| 751 |
+
border: 1px solid var(--border);
|
| 752 |
+
border-radius: var(--radius-sm);
|
| 753 |
+
margin-bottom: 20px;
|
| 754 |
+
color: var(--text-muted);
|
| 755 |
+
font-size: 12px;
|
| 756 |
+
font-family: var(--font-mono);
|
| 757 |
+
line-height: 1.5;
|
| 758 |
+
letter-spacing: 0.02em;
|
| 759 |
+
}
|
| 760 |
+
|
| 761 |
+
.modal-privacy svg { flex-shrink: 0; margin-top: 2px; color: var(--accent); opacity: 0.7; }
|
| 762 |
+
|
| 763 |
+
/* Actions row */
|
| 764 |
+
.modal-actions {
|
| 765 |
+
display: flex;
|
| 766 |
+
align-items: center;
|
| 767 |
+
gap: 10px;
|
| 768 |
+
}
|
| 769 |
+
|
| 770 |
+
.modal-btn-skip {
|
| 771 |
+
flex: 0 0 auto;
|
| 772 |
+
padding: 9px 18px;
|
| 773 |
+
background: transparent;
|
| 774 |
+
border: 1px solid var(--border-bright);
|
| 775 |
+
border-radius: var(--radius-md);
|
| 776 |
+
color: var(--text-muted);
|
| 777 |
+
font-family: var(--font-mono);
|
| 778 |
+
font-size: 12px;
|
| 779 |
+
cursor: pointer;
|
| 780 |
+
transition: all var(--transition);
|
| 781 |
+
letter-spacing: 0.04em;
|
| 782 |
+
}
|
| 783 |
+
|
| 784 |
+
.modal-btn-skip:hover {
|
| 785 |
+
color: var(--text-secondary);
|
| 786 |
+
border-color: rgba(255,255,255,0.2);
|
| 787 |
+
}
|
| 788 |
+
|
| 789 |
+
.modal-btn-submit {
|
| 790 |
+
flex: 1;
|
| 791 |
+
padding: 10px 20px;
|
| 792 |
+
background: var(--accent);
|
| 793 |
+
border: none;
|
| 794 |
+
border-radius: var(--radius-md);
|
| 795 |
+
color: var(--bg-deep);
|
| 796 |
+
font-family: var(--font-mono);
|
| 797 |
+
font-size: 12px;
|
| 798 |
+
font-weight: 600;
|
| 799 |
+
letter-spacing: 0.05em;
|
| 800 |
+
cursor: pointer;
|
| 801 |
+
transition: all var(--transition);
|
| 802 |
+
display: flex;
|
| 803 |
+
align-items: center;
|
| 804 |
+
justify-content: center;
|
| 805 |
+
gap: 8px;
|
| 806 |
+
min-height: 40px;
|
| 807 |
+
}
|
| 808 |
+
|
| 809 |
+
.modal-btn-submit:hover:not(:disabled) {
|
| 810 |
+
background: #f0d060;
|
| 811 |
+
box-shadow: 0 4px 18px var(--accent-glow);
|
| 812 |
+
transform: translateY(-1px);
|
| 813 |
+
}
|
| 814 |
+
|
| 815 |
+
.modal-btn-submit:disabled {
|
| 816 |
+
opacity: 0.65;
|
| 817 |
+
cursor: not-allowed;
|
| 818 |
+
transform: none;
|
| 819 |
+
}
|
| 820 |
+
|
| 821 |
+
/* Spinner (reuse typing-indicator style) */
|
| 822 |
+
.modal-spinner {
|
| 823 |
+
display: none;
|
| 824 |
+
align-items: center;
|
| 825 |
+
gap: 4px;
|
| 826 |
+
}
|
| 827 |
+
|
| 828 |
+
.modal-spinner span {
|
| 829 |
+
display: block;
|
| 830 |
+
width: 5px; height: 5px;
|
| 831 |
+
border-radius: 50%;
|
| 832 |
+
background: var(--bg-deep);
|
| 833 |
+
animation: bounce-dot 1.2s ease-in-out infinite;
|
| 834 |
+
}
|
| 835 |
+
|
| 836 |
+
.modal-spinner span:nth-child(2) { animation-delay: 0.15s; }
|
| 837 |
+
.modal-spinner span:nth-child(3) { animation-delay: 0.3s; }
|
| 838 |
+
|
| 839 |
+
/* =========================================
|
| 840 |
+
TASK 1 β MODAL ADDITIONS (two-section)
|
| 841 |
+
========================================= */
|
| 842 |
+
|
| 843 |
+
/* Increase card width slightly for two sections */
|
| 844 |
+
.modal-card {
|
| 845 |
+
max-width: 500px;
|
| 846 |
+
max-height: 92vh;
|
| 847 |
+
overflow-y: auto;
|
| 848 |
+
scrollbar-width: thin;
|
| 849 |
+
scrollbar-color: var(--border-bright) transparent;
|
| 850 |
+
}
|
| 851 |
+
|
| 852 |
+
.modal-card::-webkit-scrollbar { width: 4px; }
|
| 853 |
+
.modal-card::-webkit-scrollbar-track { background: transparent; }
|
| 854 |
+
.modal-card::-webkit-scrollbar-thumb { background: var(--border-bright); border-radius: 2px; }
|
| 855 |
+
|
| 856 |
+
/* Section wrapper */
|
| 857 |
+
.modal-section {
|
| 858 |
+
margin-bottom: 4px;
|
| 859 |
+
}
|
| 860 |
+
|
| 861 |
+
.modal-section-head {
|
| 862 |
+
display: flex;
|
| 863 |
+
align-items: center;
|
| 864 |
+
gap: 8px;
|
| 865 |
+
margin-bottom: 6px;
|
| 866 |
+
}
|
| 867 |
+
|
| 868 |
+
.modal-section-label {
|
| 869 |
+
font-family: var(--font-mono);
|
| 870 |
+
font-size: 11px;
|
| 871 |
+
font-weight: 600;
|
| 872 |
+
letter-spacing: 0.1em;
|
| 873 |
+
text-transform: uppercase;
|
| 874 |
+
color: var(--text-primary);
|
| 875 |
+
}
|
| 876 |
+
|
| 877 |
+
/* Badges */
|
| 878 |
+
.modal-badge {
|
| 879 |
+
font-family: var(--font-mono);
|
| 880 |
+
font-size: 9px;
|
| 881 |
+
padding: 2px 7px;
|
| 882 |
+
border-radius: 3px;
|
| 883 |
+
letter-spacing: 0.06em;
|
| 884 |
+
text-transform: uppercase;
|
| 885 |
+
font-weight: 600;
|
| 886 |
+
}
|
| 887 |
+
|
| 888 |
+
.modal-badge--required {
|
| 889 |
+
background: rgba(232, 93, 74, 0.12);
|
| 890 |
+
color: var(--red);
|
| 891 |
+
border: 1px solid rgba(232, 93, 74, 0.25);
|
| 892 |
+
}
|
| 893 |
+
|
| 894 |
+
.modal-badge--indimodel {
|
| 895 |
+
background: rgba(91, 156, 246, 0.1);
|
| 896 |
+
color: var(--blue);
|
| 897 |
+
border: 1px solid rgba(91, 156, 246, 0.2);
|
| 898 |
+
}
|
| 899 |
+
|
| 900 |
+
/* Provider selector row */
|
| 901 |
+
.provider-row {
|
| 902 |
+
display: flex;
|
| 903 |
+
flex-wrap: wrap;
|
| 904 |
+
gap: 6px;
|
| 905 |
+
margin-bottom: 10px;
|
| 906 |
+
}
|
| 907 |
+
|
| 908 |
+
.provider-btn {
|
| 909 |
+
display: inline-flex;
|
| 910 |
+
align-items: center;
|
| 911 |
+
gap: 5px;
|
| 912 |
+
padding: 5px 11px;
|
| 913 |
+
border: 1px solid var(--border-bright);
|
| 914 |
+
border-radius: var(--radius-sm);
|
| 915 |
+
background: var(--bg-deep);
|
| 916 |
+
color: var(--text-muted);
|
| 917 |
+
font-family: var(--font-mono);
|
| 918 |
+
font-size: 11px;
|
| 919 |
+
letter-spacing: 0.03em;
|
| 920 |
+
cursor: pointer;
|
| 921 |
+
transition: all var(--transition);
|
| 922 |
+
white-space: nowrap;
|
| 923 |
+
}
|
| 924 |
+
|
| 925 |
+
.provider-btn:hover {
|
| 926 |
+
color: var(--text-secondary);
|
| 927 |
+
border-color: rgba(255, 255, 255, 0.2);
|
| 928 |
+
background: var(--bg-hover);
|
| 929 |
+
}
|
| 930 |
+
|
| 931 |
+
.provider-btn.active {
|
| 932 |
+
background: var(--accent-dim);
|
| 933 |
+
border-color: rgba(232, 197, 71, 0.4);
|
| 934 |
+
color: var(--accent);
|
| 935 |
+
}
|
| 936 |
+
|
| 937 |
+
.rec-tag {
|
| 938 |
+
font-size: 9px;
|
| 939 |
+
padding: 1px 5px;
|
| 940 |
+
background: rgba(232, 197, 71, 0.14);
|
| 941 |
+
border: 1px solid rgba(232, 197, 71, 0.2);
|
| 942 |
+
border-radius: 3px;
|
| 943 |
+
color: var(--accent);
|
| 944 |
+
letter-spacing: 0.04em;
|
| 945 |
+
font-weight: 600;
|
| 946 |
+
}
|
| 947 |
+
|
| 948 |
+
/* =========================================
|
| 949 |
+
TASK 2 β MODEL SELECTOR
|
| 950 |
+
========================================= */
|
| 951 |
+
.model-selector {
|
| 952 |
+
display: flex;
|
| 953 |
+
align-items: center;
|
| 954 |
+
gap: 7px;
|
| 955 |
+
}
|
| 956 |
+
|
| 957 |
+
.model-selector-label {
|
| 958 |
+
font-family: var(--font-mono);
|
| 959 |
+
font-size: 11px;
|
| 960 |
+
color: var(--text-muted);
|
| 961 |
+
letter-spacing: 0.06em;
|
| 962 |
+
text-transform: uppercase;
|
| 963 |
+
}
|
| 964 |
+
|
| 965 |
+
.model-select {
|
| 966 |
+
font-family: var(--font-mono);
|
| 967 |
+
font-size: 11px;
|
| 968 |
+
background: var(--bg-elevated);
|
| 969 |
+
border: 1px solid var(--border-bright);
|
| 970 |
+
border-radius: var(--radius-sm);
|
| 971 |
+
color: var(--text-secondary);
|
| 972 |
+
padding: 4px 10px 4px 8px;
|
| 973 |
+
cursor: pointer;
|
| 974 |
+
outline: none;
|
| 975 |
+
letter-spacing: 0.04em;
|
| 976 |
+
transition: border-color var(--transition), box-shadow var(--transition);
|
| 977 |
+
appearance: auto;
|
| 978 |
+
}
|
| 979 |
+
|
| 980 |
+
.model-select:hover {
|
| 981 |
+
border-color: rgba(255, 255, 255, 0.2);
|
| 982 |
+
}
|
| 983 |
+
|
| 984 |
+
.model-select:focus {
|
| 985 |
+
border-color: rgba(232, 197, 71, 0.4);
|
| 986 |
+
box-shadow: 0 0 0 2px var(--accent-glow);
|
| 987 |
+
}
|
| 988 |
+
|
| 989 |
+
/* =========================================
|
| 990 |
+
RESPONSIVE
|
| 991 |
+
========================================= */
|
| 992 |
+
@media (max-width: 700px) {
|
| 993 |
+
/* ββ Sidebar: hidden by default on mobile ββ */
|
| 994 |
+
.sidebar {
|
| 995 |
+
display: none;
|
| 996 |
+
}
|
| 997 |
+
|
| 998 |
+
/* ββ Task 3: sidebar slides in when .mobile-open is added ββ */
|
| 999 |
+
.sidebar.mobile-open {
|
| 1000 |
+
display: flex;
|
| 1001 |
+
position: fixed;
|
| 1002 |
+
top: 0;
|
| 1003 |
+
left: 0;
|
| 1004 |
+
bottom: 0;
|
| 1005 |
+
width: var(--sidebar-w);
|
| 1006 |
+
z-index: 9999;
|
| 1007 |
+
box-shadow: 6px 0 32px rgba(0, 0, 0, 0.6);
|
| 1008 |
+
animation: sidebar-slide-in 0.22s ease forwards;
|
| 1009 |
+
}
|
| 1010 |
+
|
| 1011 |
+
.sidebar-backdrop.active {
|
| 1012 |
+
display: block;
|
| 1013 |
+
animation: backdrop-fade-in 0.22s ease forwards;
|
| 1014 |
+
}
|
| 1015 |
+
|
| 1016 |
+
/* ββ Task 3: make title look tappable ββ */
|
| 1017 |
+
.header-title {
|
| 1018 |
+
cursor: pointer;
|
| 1019 |
+
user-select: none;
|
| 1020 |
+
-webkit-user-select: none;
|
| 1021 |
+
}
|
| 1022 |
+
|
| 1023 |
+
.header-title::after {
|
| 1024 |
+
content: ' \2630';
|
| 1025 |
+
font-size: 13px;
|
| 1026 |
+
opacity: 0.45;
|
| 1027 |
+
margin-left: 4px;
|
| 1028 |
+
}
|
| 1029 |
+
|
| 1030 |
+
/* ββ General mobile tweaks ββ */
|
| 1031 |
+
.chat-header { padding: 12px 16px; }
|
| 1032 |
+
.messages-container { padding: 20px 16px; }
|
| 1033 |
+
.input-area { padding: 12px 16px 16px; }
|
| 1034 |
+
.pill-btn { padding: 5px 8px; font-size: 10px; }
|
| 1035 |
+
|
| 1036 |
+
.modal-card { padding: 20px 18px 18px; max-width: 100%; }
|
| 1037 |
+
.modal-title { font-size: 18px; }
|
| 1038 |
+
|
| 1039 |
+
.provider-row { gap: 5px; }
|
| 1040 |
+
.provider-btn { font-size: 10px; padding: 4px 8px; }
|
| 1041 |
+
.rec-tag { display: none; }
|
| 1042 |
+
}
|
| 1043 |
+
|
| 1044 |
+
/* Desktop: hide backdrop always */
|
| 1045 |
+
@media (min-width: 701px) {
|
| 1046 |
+
.sidebar-backdrop { display: none !important; }
|
| 1047 |
+
.header-title { cursor: default; }
|
| 1048 |
+
}
|
| 1049 |
+
|
| 1050 |
+
/* ββ Sidebar slide animation (global β outside media query for browser compat) ββ */
|
| 1051 |
+
@keyframes sidebar-slide-in {
|
| 1052 |
+
from { transform: translateX(-100%); opacity: 0.6; }
|
| 1053 |
+
to { transform: translateX(0); opacity: 1; }
|
| 1054 |
+
}
|
| 1055 |
+
|
| 1056 |
+
@keyframes backdrop-fade-in {
|
| 1057 |
+
from { opacity: 0; }
|
| 1058 |
+
to { opacity: 1; }
|
| 1059 |
+
}
|
| 1060 |
+
|
| 1061 |
+
/* Backdrop base β hidden on all screens by default, activated via JS class */
|
| 1062 |
+
.sidebar-backdrop {
|
| 1063 |
+
display: none;
|
| 1064 |
+
position: fixed;
|
| 1065 |
+
inset: 0;
|
| 1066 |
+
z-index: 9998;
|
| 1067 |
+
background: rgba(0, 0, 0, 0.55);
|
| 1068 |
+
backdrop-filter: blur(3px);
|
| 1069 |
+
-webkit-backdrop-filter: blur(3px);
|
| 1070 |
+
}
|
| 1071 |
+
|
| 1072 |
+
.sidebar-backdrop.active {
|
| 1073 |
+
display: block;
|
| 1074 |
+
animation: backdrop-fade-in 0.22s ease forwards;
|
| 1075 |
+
}
|
templates/index.html
ADDED
|
@@ -0,0 +1,771 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>FactCheck AI β News Verification System</title>
|
| 7 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 8 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 9 |
+
<link href="https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;500;600&family=Playfair+Display:wght@700;900&family=IBM+Plex+Sans:wght@300;400;500&display=swap" rel="stylesheet">
|
| 10 |
+
<link rel="stylesheet" href="{{ url_for('static', path='/looks.css') }}">
|
| 11 |
+
</head>
|
| 12 |
+
<body>
|
| 13 |
+
|
| 14 |
+
<div class="bg-grid" aria-hidden="true"></div>
|
| 15 |
+
<div class="bg-noise" aria-hidden="true"></div>
|
| 16 |
+
|
| 17 |
+
<!-- ββββββββββββββββββββββββββββββββββββββββ
|
| 18 |
+
API KEY MODAL (Task 1 β two sections)
|
| 19 |
+
ββββββββββββββββββββββββββββββββββββββββ -->
|
| 20 |
+
<div class="modal-overlay" id="api-modal" role="dialog" aria-modal="true" aria-labelledby="modal-title">
|
| 21 |
+
<div class="modal-card">
|
| 22 |
+
|
| 23 |
+
<!-- Close / skip -->
|
| 24 |
+
<button class="modal-close" id="modal-skip" aria-label="Skip for now">
|
| 25 |
+
<svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"><line x1="18" y1="6" x2="6" y2="18"/><line x1="6" y1="6" x2="18" y2="18"/></svg>
|
| 26 |
+
</button>
|
| 27 |
+
|
| 28 |
+
<!-- Header -->
|
| 29 |
+
<div class="modal-header">
|
| 30 |
+
<span class="modal-icon">🔑</span>
|
| 31 |
+
<div>
|
| 32 |
+
<h2 class="modal-title" id="modal-title">Configure API Keys</h2>
|
| 33 |
+
<p class="modal-subtitle">Connect your Search and LLM APIs to enable verification</p>
|
| 34 |
+
</div>
|
| 35 |
+
</div>
|
| 36 |
+
|
| 37 |
+
<div class="modal-divider"></div>
|
| 38 |
+
|
| 39 |
+
<!-- ββ SECTION 1: Search API ββ -->
|
| 40 |
+
<div class="modal-section">
|
| 41 |
+
<div class="modal-section-head">
|
| 42 |
+
<span class="modal-section-label">Search API</span>
|
| 43 |
+
<span class="modal-badge modal-badge--required">Required</span>
|
| 44 |
+
</div>
|
| 45 |
+
<p class="modal-desc">
|
| 46 |
+
Used to retrieve live sources for claim verification.
|
| 47 |
+
Recommended: <a href="https://serper.dev" target="_blank" rel="noopener" class="modal-link">serper.dev</a> (free tier available).
|
| 48 |
+
</p>
|
| 49 |
+
|
| 50 |
+
<!-- Provider selector buttons -->
|
| 51 |
+
<div class="provider-row" id="search-provider-row">
|
| 52 |
+
<button type="button" class="provider-btn active" data-provider="serper.dev">
|
| 53 |
+
serper.dev<span class="rec-tag">Recommended</span>
|
| 54 |
+
</button>
|
| 55 |
+
<button type="button" class="provider-btn" data-provider="gnews">gnews</button>
|
| 56 |
+
<button type="button" class="provider-btn" data-provider="publicapi.dev">publicapi.dev</button>
|
| 57 |
+
</div>
|
| 58 |
+
|
| 59 |
+
<!-- Key input -->
|
| 60 |
+
<div class="modal-input-wrap">
|
| 61 |
+
<input
|
| 62 |
+
type="password"
|
| 63 |
+
id="search-key-input"
|
| 64 |
+
class="modal-input"
|
| 65 |
+
placeholder="Enter your Search API key…"
|
| 66 |
+
autocomplete="off"
|
| 67 |
+
spellcheck="false"
|
| 68 |
+
/>
|
| 69 |
+
<button class="modal-eye" id="toggle-search-vis" aria-label="Toggle key visibility" type="button">
|
| 70 |
+
<svg id="search-eye-icon" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 71 |
+
<path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/>
|
| 72 |
+
</svg>
|
| 73 |
+
</button>
|
| 74 |
+
</div>
|
| 75 |
+
<div class="modal-status" id="search-modal-status" aria-live="polite"></div>
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
<div class="modal-divider"></div>
|
| 79 |
+
|
| 80 |
+
<!-- ββ SECTION 2: LLM API ββ -->
|
| 81 |
+
<div class="modal-section">
|
| 82 |
+
<div class="modal-section-head">
|
| 83 |
+
<span class="modal-section-label">LLM API</span>
|
| 84 |
+
<span class="modal-badge modal-badge--indimodel">Required</span>
|
| 85 |
+
</div>
|
| 86 |
+
<p class="modal-desc">
|
| 87 |
+
Powers IndiModel's analysis and trust-scoring.
|
| 88 |
+
Recommended: <a href="https://console.groq.com" target="_blank" rel="noopener" class="modal-link">groq</a> (fast inference, free tier).
|
| 89 |
+
</p>
|
| 90 |
+
|
| 91 |
+
<!-- Provider selector buttons -->
|
| 92 |
+
<div class="provider-row" id="llm-provider-row">
|
| 93 |
+
<button type="button" class="provider-btn active" data-provider="groq">
|
| 94 |
+
groq<span class="rec-tag">Recommended</span>
|
| 95 |
+
</button>
|
| 96 |
+
<button type="button" class="provider-btn" data-provider="anthropic">anthropic</button>
|
| 97 |
+
<button type="button" class="provider-btn" data-provider="openai">openai</button>
|
| 98 |
+
<button type="button" class="provider-btn" data-provider="google">google</button>
|
| 99 |
+
</div>
|
| 100 |
+
|
| 101 |
+
<!-- Key input -->
|
| 102 |
+
<div class="modal-input-wrap">
|
| 103 |
+
<input
|
| 104 |
+
type="password"
|
| 105 |
+
id="llm-key-input"
|
| 106 |
+
class="modal-input"
|
| 107 |
+
placeholder="Enter your LLM API key…"
|
| 108 |
+
autocomplete="off"
|
| 109 |
+
spellcheck="false"
|
| 110 |
+
/>
|
| 111 |
+
<button class="modal-eye" id="toggle-llm-vis" aria-label="Toggle key visibility" type="button">
|
| 112 |
+
<svg id="llm-eye-icon" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 113 |
+
<path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/>
|
| 114 |
+
</svg>
|
| 115 |
+
</button>
|
| 116 |
+
</div>
|
| 117 |
+
<div class="modal-status" id="llm-modal-status" aria-live="polite"></div>
|
| 118 |
+
</div>
|
| 119 |
+
|
| 120 |
+
<!-- Privacy note -->
|
| 121 |
+
<div class="modal-privacy">
|
| 122 |
+
<svg width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><rect x="3" y="11" width="18" height="11" rx="2" ry="2"/><path d="M7 11V7a5 5 0 0 1 10 0v4"/></svg>
|
| 123 |
+
<span>All keys stored in session memory only β cleared when you close or refresh this page</span>
|
| 124 |
+
</div>
|
| 125 |
+
|
| 126 |
+
<!-- Actions -->
|
| 127 |
+
<div class="modal-actions">
|
| 128 |
+
<button class="modal-btn-skip" id="modal-skip-2">Skip for now</button>
|
| 129 |
+
<button class="modal-btn-submit" id="modal-submit" type="button">
|
| 130 |
+
<span id="submit-label">Validate & Connect</span>
|
| 131 |
+
<div class="modal-spinner" id="modal-spinner" aria-hidden="true">
|
| 132 |
+
<span></span><span></span><span></span>
|
| 133 |
+
</div>
|
| 134 |
+
</button>
|
| 135 |
+
</div>
|
| 136 |
+
|
| 137 |
+
</div>
|
| 138 |
+
</div>
|
| 139 |
+
<!-- ββββ END MODAL ββββ -->
|
| 140 |
+
|
| 141 |
+
<!-- Task 3 β Mobile sidebar backdrop overlay -->
|
| 142 |
+
<div class="sidebar-backdrop" id="sidebar-backdrop"></div>
|
| 143 |
+
|
| 144 |
+
<div class="app-wrapper">
|
| 145 |
+
|
| 146 |
+
<!-- ββ Sidebar ββ -->
|
| 147 |
+
<aside class="sidebar" id="sidebar">
|
| 148 |
+
<div class="sidebar-brand">
|
| 149 |
+
<span class="brand-mark">⬡</span>
|
| 150 |
+
<span class="brand-name">FactCheck<em>AI</em></span>
|
| 151 |
+
</div>
|
| 152 |
+
|
| 153 |
+
<div class="sidebar-info">
|
| 154 |
+
<p class="sidebar-desc">Submit a claim or paste an article URL. The AI will analyze the content and tell you whether you should trust it.</p>
|
| 155 |
+
</div>
|
| 156 |
+
|
| 157 |
+
<div class="sidebar-tips">
|
| 158 |
+
<span class="tips-label">How it works</span>
|
| 159 |
+
<div class="tip-item">
|
| 160 |
+
<span class="tip-icon">🔍</span>
|
| 161 |
+
<span>Use <strong>query</strong> for claims, headlines, or facts you want verified</span>
|
| 162 |
+
</div>
|
| 163 |
+
<div class="tip-item">
|
| 164 |
+
<span class="tip-icon">🔗</span>
|
| 165 |
+
<span>Use <strong>Article</strong> and paste a URL to analyze a full news article</span>
|
| 166 |
+
</div>
|
| 167 |
+
<div class="tip-item">
|
| 168 |
+
<span class="tip-icon">✅</span>
|
| 169 |
+
<span>Get a trust verdict with reasoning and source context</span>
|
| 170 |
+
</div>
|
| 171 |
+
</div>
|
| 172 |
+
|
| 173 |
+
<div class="sidebar-status">
|
| 174 |
+
<span class="status-dot" id="api-status-dot"></span>
|
| 175 |
+
<span class="status-text" id="api-status-text">Awaiting API Key</span>
|
| 176 |
+
</div>
|
| 177 |
+
|
| 178 |
+
<a class="sidebar-link" href="https://github.com/vishwjeet71" target="_blank" rel="noopener noreferrer">
|
| 179 |
+
<svg width="14" height="14" viewBox="0 0 24 24" fill="currentColor"><path d="M12 0C5.37 0 0 5.37 0 12c0 5.31 3.435 9.795 8.205 11.385.6.105.825-.255.825-.57 0-.285-.015-1.23-.015-2.235-3.015.555-3.795-.735-4.035-1.41-.135-.345-.72-1.41-1.23-1.695-.42-.225-1.02-.78-.015-.795.945-.015 1.62.87 1.845 1.23 1.08 1.815 2.805 1.305 3.495.99.105-.78.42-1.305.765-1.605-2.67-.3-5.46-1.335-5.46-5.925 0-1.305.465-2.385 1.23-3.225-.12-.3-.54-1.53.12-3.18 0 0 1.005-.315 3.3 1.23.96-.27 1.98-.405 3-.405s2.04.135 3 .405c2.295-1.56 3.3-1.23 3.3-1.23.66 1.65.24 2.88.12 3.18.765.84 1.23 1.905 1.23 3.225 0 4.605-2.805 5.625-5.475 5.925.435.375.81 1.095.81 2.22 0 1.605-.015 2.895-.015 3.3 0 .315.225.69.825.57A12.02 12.02 0 0 0 24 12c0-6.63-5.37-12-12-12z"/></svg>
|
| 180 |
+
Documentation
|
| 181 |
+
</a>
|
| 182 |
+
</aside>
|
| 183 |
+
|
| 184 |
+
<!-- ββ Main chat ββ -->
|
| 185 |
+
<main class="chat-area">
|
| 186 |
+
|
| 187 |
+
<header class="chat-header">
|
| 188 |
+
<div class="header-left">
|
| 189 |
+
<!-- Task 3: clicking title on mobile opens sidebar -->
|
| 190 |
+
<h1 class="header-title" id="header-title">News Verification</h1>
|
| 191 |
+
<!-- Task 2: model dropdown replaces session ID -->
|
| 192 |
+
<div class="model-selector">
|
| 193 |
+
<span class="model-selector-label">Model</span>
|
| 194 |
+
<select id="model-select" class="model-select" aria-label="Select model">
|
| 195 |
+
<option value="prototype">Prototype</option>
|
| 196 |
+
<option value="indimodel">IndiModel</option>
|
| 197 |
+
</select>
|
| 198 |
+
</div>
|
| 199 |
+
</div>
|
| 200 |
+
<button class="clear-btn" id="clear-chat" title="Clear conversation">
|
| 201 |
+
<svg width="13" height="13" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><polyline points="3 6 5 6 21 6"/><path d="M19 6l-1 14H6L5 6"/><path d="M10 11v6M14 11v6"/><path d="M9 6V4h6v2"/></svg>
|
| 202 |
+
Clear
|
| 203 |
+
</button>
|
| 204 |
+
</header>
|
| 205 |
+
|
| 206 |
+
<div class="messages-container" id="chat-messages" role="log" aria-live="polite">
|
| 207 |
+
<div class="message bot-message intro-message">
|
| 208 |
+
<div class="message-meta">
|
| 209 |
+
<span class="msg-sender">FactCheck AI</span>
|
| 210 |
+
<span class="msg-time" data-time="now"></span>
|
| 211 |
+
</div>
|
| 212 |
+
<div class="message-body">
|
| 213 |
+
<p>Welcome. Use the <strong>query / Article</strong> toggle in the input box below to choose your mode, then submit your content. I'll analyze it and give you a clear trust verdict.</p>
|
| 214 |
+
</div>
|
| 215 |
+
</div>
|
| 216 |
+
</div>
|
| 217 |
+
|
| 218 |
+
<!-- ββ Input Footer ββ -->
|
| 219 |
+
<footer class="input-area">
|
| 220 |
+
<form id="chat-form" class="input-form" autocomplete="off">
|
| 221 |
+
|
| 222 |
+
<div class="mode-pill">
|
| 223 |
+
<button type="button" class="pill-btn active" id="btn-query">
|
| 224 |
+
<svg width="11" height="11" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"><circle cx="11" cy="11" r="8"/><path d="m21 21-4.35-4.35"/></svg>
|
| 225 |
+
query
|
| 226 |
+
</button>
|
| 227 |
+
<button type="button" class="pill-btn" id="btn-article">
|
| 228 |
+
<svg width="11" height="11" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"><path d="M10 13a5 5 0 0 0 7.54.54l3-3a5 5 0 0 0-7.07-7.07l-1.72 1.71"/><path d="M14 11a5 5 0 0 0-7.54-.54l-3 3a5 5 0 0 0 7.07 7.07l1.71-1.71"/></svg>
|
| 229 |
+
Article
|
| 230 |
+
</button>
|
| 231 |
+
</div>
|
| 232 |
+
|
| 233 |
+
<div class="input-divider" aria-hidden="true"></div>
|
| 234 |
+
|
| 235 |
+
<div class="textarea-wrap">
|
| 236 |
+
<textarea
|
| 237 |
+
id="user-input"
|
| 238 |
+
placeholder="Enter a claim or headline to verify..."
|
| 239 |
+
rows="1"
|
| 240 |
+
aria-label="Message input"
|
| 241 |
+
required
|
| 242 |
+
></textarea>
|
| 243 |
+
</div>
|
| 244 |
+
|
| 245 |
+
<button type="submit" id="send-btn" aria-label="Send">
|
| 246 |
+
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor" width="17" height="17">
|
| 247 |
+
<path d="M2.01 21L23 12 2.01 3 2 10l15 2-15 2z"/>
|
| 248 |
+
</svg>
|
| 249 |
+
</button>
|
| 250 |
+
</form>
|
| 251 |
+
<p class="input-hint"><kbd>Enter</kbd> to send · <kbd>Shift+Enter</kbd> for new line</p>
|
| 252 |
+
</footer>
|
| 253 |
+
|
| 254 |
+
</main>
|
| 255 |
+
</div>
|
| 256 |
+
|
| 257 |
+
<script>
|
| 258 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 259 |
+
|
| 260 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 261 |
+
// STATE (all session memory β clears on refresh)
|
| 262 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 263 |
+
let searchApiKey = null;
|
| 264 |
+
let searchApiProvider = 'serper.dev';
|
| 265 |
+
let llmApiKey = null;
|
| 266 |
+
let llmApiProvider = 'groq';
|
| 267 |
+
|
| 268 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 269 |
+
// DOM REFS
|
| 270 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 271 |
+
const apiModal = document.getElementById('api-modal');
|
| 272 |
+
const modalSubmit = document.getElementById('modal-submit');
|
| 273 |
+
const modalSkip = document.getElementById('modal-skip');
|
| 274 |
+
const modalSkip2 = document.getElementById('modal-skip-2');
|
| 275 |
+
const modalSpinner = document.getElementById('modal-spinner');
|
| 276 |
+
const submitLabel = document.getElementById('submit-label');
|
| 277 |
+
|
| 278 |
+
const searchKeyInput = document.getElementById('search-key-input');
|
| 279 |
+
const llmKeyInput = document.getElementById('llm-key-input');
|
| 280 |
+
const searchModalStatus = document.getElementById('search-modal-status');
|
| 281 |
+
const llmModalStatus = document.getElementById('llm-modal-status');
|
| 282 |
+
const toggleSearchVis = document.getElementById('toggle-search-vis');
|
| 283 |
+
const toggleLLMVis = document.getElementById('toggle-llm-vis');
|
| 284 |
+
const searchEyeIcon = document.getElementById('search-eye-icon');
|
| 285 |
+
const llmEyeIcon = document.getElementById('llm-eye-icon');
|
| 286 |
+
|
| 287 |
+
const apiStatusDot = document.getElementById('api-status-dot');
|
| 288 |
+
const apiStatusText = document.getElementById('api-status-text');
|
| 289 |
+
|
| 290 |
+
const chatMessages = document.getElementById('chat-messages');
|
| 291 |
+
const chatForm = document.getElementById('chat-form');
|
| 292 |
+
const userInput = document.getElementById('user-input');
|
| 293 |
+
const btnquery = document.getElementById('btn-query');
|
| 294 |
+
const btnArticle = document.getElementById('btn-article');
|
| 295 |
+
const clearBtn = document.getElementById('clear-chat');
|
| 296 |
+
const modelSelect = document.getElementById('model-select');
|
| 297 |
+
|
| 298 |
+
// Task 3
|
| 299 |
+
const sidebar = document.getElementById('sidebar');
|
| 300 |
+
const sidebarBackdrop = document.getElementById('sidebar-backdrop');
|
| 301 |
+
const headerTitle = document.getElementById('header-title');
|
| 302 |
+
|
| 303 |
+
// ββ Init ββ
|
| 304 |
+
document.querySelectorAll('[data-time="now"]').forEach(el => el.textContent = getTime());
|
| 305 |
+
|
| 306 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 307 |
+
// TASK 3 β MOBILE SIDEBAR TOGGLE
|
| 308 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 309 |
+
function isMobile() { return window.innerWidth <= 700; }
|
| 310 |
+
|
| 311 |
+
function openSidebar() {
|
| 312 |
+
sidebar.classList.add('mobile-open');
|
| 313 |
+
sidebarBackdrop.classList.add('active');
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
function closeSidebar() {
|
| 317 |
+
sidebar.classList.remove('mobile-open');
|
| 318 |
+
sidebarBackdrop.classList.remove('active');
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
headerTitle.addEventListener('click', () => {
|
| 322 |
+
if (!isMobile()) return;
|
| 323 |
+
sidebar.classList.contains('mobile-open') ? closeSidebar() : openSidebar();
|
| 324 |
+
});
|
| 325 |
+
|
| 326 |
+
sidebarBackdrop.addEventListener('click', closeSidebar);
|
| 327 |
+
|
| 328 |
+
window.addEventListener('resize', () => {
|
| 329 |
+
if (!isMobile()) closeSidebar();
|
| 330 |
+
});
|
| 331 |
+
|
| 332 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 333 |
+
// MODAL β Open / Close
|
| 334 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 335 |
+
function openModal() {
|
| 336 |
+
apiModal.classList.remove('hidden');
|
| 337 |
+
requestAnimationFrame(() => apiModal.classList.add('visible'));
|
| 338 |
+
searchKeyInput.focus();
|
| 339 |
+
clearAllModalStatus();
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
function closeModal() {
|
| 343 |
+
apiModal.classList.remove('visible');
|
| 344 |
+
setTimeout(() => apiModal.classList.add('hidden'), 250);
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
openModal();
|
| 348 |
+
|
| 349 |
+
modalSkip.addEventListener('click', handleSkip);
|
| 350 |
+
modalSkip2.addEventListener('click', handleSkip);
|
| 351 |
+
|
| 352 |
+
function handleSkip() {
|
| 353 |
+
searchApiKey = null;
|
| 354 |
+
llmApiKey = null;
|
| 355 |
+
updateSidebarStatus(false, false);
|
| 356 |
+
closeModal();
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
apiModal.addEventListener('click', (e) => {
|
| 360 |
+
if (e.target === apiModal) handleSkip();
|
| 361 |
+
});
|
| 362 |
+
|
| 363 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 364 |
+
// TASK 1 β PROVIDER BUTTONS
|
| 365 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 366 |
+
document.querySelectorAll('#search-provider-row .provider-btn').forEach(btn => {
|
| 367 |
+
btn.addEventListener('click', () => {
|
| 368 |
+
document.querySelectorAll('#search-provider-row .provider-btn')
|
| 369 |
+
.forEach(b => b.classList.remove('active'));
|
| 370 |
+
btn.classList.add('active');
|
| 371 |
+
searchApiProvider = btn.dataset.provider;
|
| 372 |
+
clearSectionStatus('search');
|
| 373 |
+
});
|
| 374 |
+
});
|
| 375 |
+
|
| 376 |
+
document.querySelectorAll('#llm-provider-row .provider-btn').forEach(btn => {
|
| 377 |
+
btn.addEventListener('click', () => {
|
| 378 |
+
document.querySelectorAll('#llm-provider-row .provider-btn')
|
| 379 |
+
.forEach(b => b.classList.remove('active'));
|
| 380 |
+
btn.classList.add('active');
|
| 381 |
+
llmApiProvider = btn.dataset.provider;
|
| 382 |
+
clearSectionStatus('llm');
|
| 383 |
+
});
|
| 384 |
+
});
|
| 385 |
+
|
| 386 |
+
// ββ Eye icon toggles ββ
|
| 387 |
+
toggleSearchVis.addEventListener('click', () => toggleEye(searchKeyInput, searchEyeIcon));
|
| 388 |
+
toggleLLMVis.addEventListener('click', () => toggleEye(llmKeyInput, llmEyeIcon));
|
| 389 |
+
|
| 390 |
+
function toggleEye(input, icon) {
|
| 391 |
+
const isPass = input.type === 'password';
|
| 392 |
+
input.type = isPass ? 'text' : 'password';
|
| 393 |
+
icon.innerHTML = isPass
|
| 394 |
+
? '<path d="M17.94 17.94A10.07 10.07 0 0 1 12 20c-7 0-11-8-11-8a18.45 18.45 0 0 1 5.06-5.94M9.9 4.24A9.12 9.12 0 0 1 12 4c7 0 11 8 11 8a18.5 18.5 0 0 1-2.16 3.19m-6.72-1.07a3 3 0 1 1-4.24-4.24"/><line x1="1" y1="1" x2="23" y2="23"/>'
|
| 395 |
+
: '<path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"/><circle cx="12" cy="12" r="3"/>';
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 399 |
+
// TASK 1 β VALIDATE & SAVE
|
| 400 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 401 |
+
modalSubmit.addEventListener('click', validateAndSave);
|
| 402 |
+
searchKeyInput.addEventListener('keydown', e => { if (e.key === 'Enter') validateAndSave(); });
|
| 403 |
+
llmKeyInput.addEventListener('keydown', e => { if (e.key === 'Enter') validateAndSave(); });
|
| 404 |
+
|
| 405 |
+
async function validateAndSave() {
|
| 406 |
+
const sKey = searchKeyInput.value.trim();
|
| 407 |
+
const lKey = llmKeyInput.value.trim();
|
| 408 |
+
|
| 409 |
+
if (!sKey) {
|
| 410 |
+
setSectionStatus('search', 'error', '⚠️ Please enter a Search API key.');
|
| 411 |
+
return;
|
| 412 |
+
}
|
| 413 |
+
if (sKey.length < 20) {
|
| 414 |
+
setSectionStatus('search', 'error', '⚠️ That key looks too short β check it and try again.');
|
| 415 |
+
return;
|
| 416 |
+
}
|
| 417 |
+
if (lKey && lKey.length < 20) {
|
| 418 |
+
setSectionStatus('llm', 'error', '⚠️ That LLM key looks too short β check it and try again.');
|
| 419 |
+
return;
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
setLoading(true);
|
| 423 |
+
clearAllModalStatus();
|
| 424 |
+
|
| 425 |
+
const searchOk = await validateSearchKey(sKey, searchApiProvider);
|
| 426 |
+
if (!searchOk) { setLoading(false); return; }
|
| 427 |
+
|
| 428 |
+
if (lKey) {
|
| 429 |
+
const llmOk = await validateLLMKey(lKey, llmApiProvider);
|
| 430 |
+
if (!llmOk) { setLoading(false); return; }
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
setLoading(false);
|
| 434 |
+
updateSidebarStatus(true, !!llmApiKey);
|
| 435 |
+
setTimeout(() => closeModal(), 900);
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
async function validateSearchKey(key, provider) {
|
| 439 |
+
try {
|
| 440 |
+
let res;
|
| 441 |
+
if (provider === 'serper.dev') {
|
| 442 |
+
res = await fetch('https://google.serper.dev/search', {
|
| 443 |
+
method: 'POST',
|
| 444 |
+
headers: { 'X-API-KEY': key, 'Content-Type': 'application/json' },
|
| 445 |
+
body: JSON.stringify({ q: 'test', num: 1 })
|
| 446 |
+
});
|
| 447 |
+
} else if (provider === 'gnews') {
|
| 448 |
+
res = await fetch(
|
| 449 |
+
'https://gnews.io/api/v4/top-headlines?token=' + encodeURIComponent(key) + '&max=1&lang=en'
|
| 450 |
+
);
|
| 451 |
+
} else if (provider === 'publicapi.dev') {
|
| 452 |
+
// No CORS-friendly validation endpoint available β accept on format
|
| 453 |
+
searchApiKey = key;
|
| 454 |
+
setSectionStatus('search', 'success', '✅ Key saved for publicapi.dev.');
|
| 455 |
+
return true;
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
if (res.ok) {
|
| 459 |
+
searchApiKey = key;
|
| 460 |
+
setSectionStatus('search', 'success', '✅ Search API key validated.');
|
| 461 |
+
return true;
|
| 462 |
+
} else if (res.status === 401 || res.status === 403) {
|
| 463 |
+
setSectionStatus('search', 'error', '❌ Invalid key β access denied. Double-check and retry.');
|
| 464 |
+
return false;
|
| 465 |
+
} else if (res.status === 429) {
|
| 466 |
+
setSectionStatus('search', 'error', '⚠️ Rate limit hit on this key. Wait a moment then retry.');
|
| 467 |
+
return false;
|
| 468 |
+
} else {
|
| 469 |
+
setSectionStatus('search', 'error', '❌ Unexpected error (HTTP ' + res.status + ').');
|
| 470 |
+
return false;
|
| 471 |
+
}
|
| 472 |
+
} catch (err) {
|
| 473 |
+
// CORS or network β save key, tested on first chat request
|
| 474 |
+
searchApiKey = key;
|
| 475 |
+
setSectionStatus('search', 'success', '✅ Key saved. Remote validation blocked by browser security β will be tested on first request.');
|
| 476 |
+
return true;
|
| 477 |
+
}
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
async function validateLLMKey(key, provider) {
|
| 481 |
+
try {
|
| 482 |
+
let res;
|
| 483 |
+
if (provider === 'groq') {
|
| 484 |
+
res = await fetch('https://api.groq.com/openai/v1/models', {
|
| 485 |
+
headers: { 'Authorization': 'Bearer ' + key }
|
| 486 |
+
});
|
| 487 |
+
} else if (provider === 'openai') {
|
| 488 |
+
res = await fetch('https://api.openai.com/v1/models', {
|
| 489 |
+
headers: { 'Authorization': 'Bearer ' + key }
|
| 490 |
+
});
|
| 491 |
+
} else if (provider === 'anthropic') {
|
| 492 |
+
if (!key.startsWith('sk-ant-')) {
|
| 493 |
+
setSectionStatus('llm', 'error', '⚠️ Anthropic keys must start with "sk-ant-". Check your key.');
|
| 494 |
+
return false;
|
| 495 |
+
}
|
| 496 |
+
llmApiKey = key;
|
| 497 |
+
setSectionStatus('llm', 'success', '✅ Anthropic key format valid and saved.');
|
| 498 |
+
return true;
|
| 499 |
+
} else if (provider === 'google') {
|
| 500 |
+
if (!key.startsWith('AIza')) {
|
| 501 |
+
setSectionStatus('llm', 'error', '⚠️ Google API keys must start with "AIza". Check your key.');
|
| 502 |
+
return false;
|
| 503 |
+
}
|
| 504 |
+
llmApiKey = key;
|
| 505 |
+
setSectionStatus('llm', 'success', '✅ Google key format valid and saved.');
|
| 506 |
+
return true;
|
| 507 |
+
}
|
| 508 |
+
|
| 509 |
+
if (res.ok) {
|
| 510 |
+
llmApiKey = key;
|
| 511 |
+
setSectionStatus('llm', 'success', '✅ LLM API key validated.');
|
| 512 |
+
return true;
|
| 513 |
+
} else if (res.status === 401 || res.status === 403) {
|
| 514 |
+
setSectionStatus('llm', 'error', '❌ Invalid LLM key β access denied. Double-check and retry.');
|
| 515 |
+
return false;
|
| 516 |
+
} else {
|
| 517 |
+
setSectionStatus('llm', 'error', '❌ Unexpected error (HTTP ' + res.status + ').');
|
| 518 |
+
return false;
|
| 519 |
+
}
|
| 520 |
+
} catch (err) {
|
| 521 |
+
llmApiKey = key;
|
| 522 |
+
setSectionStatus('llm', 'success', '✅ Key saved. Remote validation blocked by browser security β will be tested on first request.');
|
| 523 |
+
return true;
|
| 524 |
+
}
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
function setLoading(on) {
|
| 528 |
+
modalSpinner.style.display = on ? 'flex' : 'none';
|
| 529 |
+
submitLabel.style.display = on ? 'none' : 'inline';
|
| 530 |
+
modalSubmit.disabled = on;
|
| 531 |
+
searchKeyInput.disabled = on;
|
| 532 |
+
llmKeyInput.disabled = on;
|
| 533 |
+
}
|
| 534 |
+
|
| 535 |
+
function setSectionStatus(section, type, msg) {
|
| 536 |
+
const el = section === 'search' ? searchModalStatus : llmModalStatus;
|
| 537 |
+
el.innerHTML = msg;
|
| 538 |
+
el.className = 'modal-status modal-status--' + type + ' modal-status--visible';
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
function clearSectionStatus(section) {
|
| 542 |
+
const el = section === 'search' ? searchModalStatus : llmModalStatus;
|
| 543 |
+
el.innerHTML = '';
|
| 544 |
+
el.className = 'modal-status';
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
function clearAllModalStatus() {
|
| 548 |
+
clearSectionStatus('search');
|
| 549 |
+
clearSectionStatus('llm');
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
function updateSidebarStatus(searchConnected, llmConnected) {
|
| 553 |
+
if (searchConnected && llmConnected) {
|
| 554 |
+
apiStatusDot.style.background = 'var(--green)';
|
| 555 |
+
apiStatusDot.style.boxShadow = '0 0 8px var(--green)';
|
| 556 |
+
apiStatusText.textContent = 'Search + LLM Connected';
|
| 557 |
+
} else if (searchConnected) {
|
| 558 |
+
apiStatusDot.style.background = 'var(--green)';
|
| 559 |
+
apiStatusDot.style.boxShadow = '0 0 8px var(--green)';
|
| 560 |
+
apiStatusText.textContent = 'Search Connected';
|
| 561 |
+
} else {
|
| 562 |
+
apiStatusDot.style.background = 'var(--red)';
|
| 563 |
+
apiStatusDot.style.boxShadow = '0 0 8px var(--red)';
|
| 564 |
+
apiStatusText.textContent = 'No API Key';
|
| 565 |
+
}
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 569 |
+
// TASK 2 β MODEL SELECTOR
|
| 570 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 571 |
+
modelSelect.addEventListener('change', () => {
|
| 572 |
+
if (modelSelect.value === 'indimodel' && (!searchApiKey || !llmApiKey)) {
|
| 573 |
+
clearAllModalStatus();
|
| 574 |
+
openModal();
|
| 575 |
+
}
|
| 576 |
+
});
|
| 577 |
+
|
| 578 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 579 |
+
// MODE TOGGLE (query / Article)
|
| 580 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 581 |
+
let inputType = 'query';
|
| 582 |
+
|
| 583 |
+
function setMode(type) {
|
| 584 |
+
inputType = type;
|
| 585 |
+
const isquery = type === 'query';
|
| 586 |
+
btnquery.classList.toggle('active', isquery);
|
| 587 |
+
btnArticle.classList.toggle('active', !isquery);
|
| 588 |
+
userInput.placeholder = isquery
|
| 589 |
+
? 'Enter a claim or headline to verify...'
|
| 590 |
+
: 'Paste an article URL here...';
|
| 591 |
+
userInput.focus();
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
btnquery.addEventListener('click', () => setMode('query'));
|
| 595 |
+
btnArticle.addEventListener('click', () => setMode('article_link'));
|
| 596 |
+
|
| 597 |
+
clearBtn.addEventListener('click', () => {
|
| 598 |
+
chatMessages.querySelectorAll('.message:not(.intro-message)').forEach(m => {
|
| 599 |
+
m.classList.add('fade-out');
|
| 600 |
+
setTimeout(() => m.remove(), 280);
|
| 601 |
+
});
|
| 602 |
+
});
|
| 603 |
+
|
| 604 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 605 |
+
// FORM SUBMIT
|
| 606 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 607 |
+
chatForm.addEventListener('submit', async (e) => {
|
| 608 |
+
e.preventDefault();
|
| 609 |
+
const text = userInput.value.trim();
|
| 610 |
+
if (!text) return;
|
| 611 |
+
|
| 612 |
+
const model = modelSelect.value;
|
| 613 |
+
|
| 614 |
+
appendMessage(text, 'user');
|
| 615 |
+
userInput.value = '';
|
| 616 |
+
userInput.style.height = 'auto';
|
| 617 |
+
|
| 618 |
+
// Block if no Search API key (both models need it)
|
| 619 |
+
if (!searchApiKey) {
|
| 620 |
+
appendNoKeyMessage('search');
|
| 621 |
+
return;
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
// IndiModel also needs LLM key
|
| 625 |
+
if (model === 'indimodel' && !llmApiKey) {
|
| 626 |
+
appendNoKeyMessage('llm');
|
| 627 |
+
return;
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
// IndiModel β coming soon
|
| 631 |
+
if (model === 'indimodel') {
|
| 632 |
+
const lid = appendLoading();
|
| 633 |
+
setTimeout(() => {
|
| 634 |
+
replaceLoading(lid,
|
| 635 |
+
'IndiModel β Coming Very Soon!\n\n' +
|
| 636 |
+
'This model is currently in active development. It will use your connected LLM API (' + llmApiProvider + ') ' +
|
| 637 |
+
'together with the Search API (' + searchApiProvider + ') to perform deep claim analysis, ' +
|
| 638 |
+
'cross-reference sources, detect manipulation patterns, and deliver verified trust verdicts.\n\n' +
|
| 639 |
+
'Your API keys are saved and ready. Switch to Prototype to use the current pipeline while IndiModel is being built.'
|
| 640 |
+
);
|
| 641 |
+
}, 700);
|
| 642 |
+
return;
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
// Prototype β current pipeline
|
| 646 |
+
const loadingId = appendLoading();
|
| 647 |
+
try {
|
| 648 |
+
const res = await fetch('/chat', {
|
| 649 |
+
method: 'POST',
|
| 650 |
+
headers: { 'Content-Type': 'application/json' },
|
| 651 |
+
body: JSON.stringify({ text, inputType, searchApiKey, searchProvider: searchApiProvider, llmApiKey, llmProvider: llmApiProvider }),
|
| 652 |
+
});
|
| 653 |
+
if (!res.ok) throw new Error('HTTP ' + res.status);
|
| 654 |
+
const data = await res.json();
|
| 655 |
+
replaceLoading(loadingId, data.response);
|
| 656 |
+
} catch (err) {
|
| 657 |
+
console.error(err);
|
| 658 |
+
replaceLoading(loadingId, 'Connection error. Please check your network and try again.', true);
|
| 659 |
+
}
|
| 660 |
+
});
|
| 661 |
+
|
| 662 |
+
// Textarea auto-resize
|
| 663 |
+
userInput.addEventListener('input', () => {
|
| 664 |
+
userInput.style.height = 'auto';
|
| 665 |
+
userInput.style.height = Math.min(userInput.scrollHeight, 160) + 'px';
|
| 666 |
+
});
|
| 667 |
+
userInput.addEventListener('keydown', (e) => {
|
| 668 |
+
if (e.key === 'Enter' && !e.shiftKey) {
|
| 669 |
+
e.preventDefault();
|
| 670 |
+
chatForm.dispatchEvent(new Event('submit'));
|
| 671 |
+
}
|
| 672 |
+
});
|
| 673 |
+
|
| 674 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 675 |
+
// HELPERS
|
| 676 |
+
// ββββββββββββββββββββββββββββββββββββββββββ
|
| 677 |
+
function getTime() {
|
| 678 |
+
return new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
|
| 679 |
+
}
|
| 680 |
+
|
| 681 |
+
function appendMessage(text, type) {
|
| 682 |
+
const div = document.createElement('div');
|
| 683 |
+
div.classList.add('message', type + '-message');
|
| 684 |
+
div.innerHTML =
|
| 685 |
+
'<div class="message-meta">' +
|
| 686 |
+
'<span class="msg-sender">' + (type === 'user' ? 'You' : 'FactCheck AI') + '</span>' +
|
| 687 |
+
'<span class="msg-time">' + getTime() + '</span>' +
|
| 688 |
+
'</div>' +
|
| 689 |
+
'<div class="message-body"><p>' + escapeHtml(text) + '</p></div>';
|
| 690 |
+
chatMessages.appendChild(div);
|
| 691 |
+
scrollToBottom();
|
| 692 |
+
requestAnimationFrame(() => div.classList.add('visible'));
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
function appendNoKeyMessage(keyType) {
|
| 696 |
+
const isLLM = keyType === 'llm';
|
| 697 |
+
const div = document.createElement('div');
|
| 698 |
+
div.classList.add('message', 'bot-message', 'no-key-message');
|
| 699 |
+
div.innerHTML =
|
| 700 |
+
'<div class="message-meta">' +
|
| 701 |
+
'<span class="msg-sender">FactCheck AI</span>' +
|
| 702 |
+
'<span class="msg-time">' + getTime() + '</span>' +
|
| 703 |
+
'</div>' +
|
| 704 |
+
'<div class="message-body">' +
|
| 705 |
+
'<p>🔑 <strong>No ' + (isLLM ? 'LLM' : 'Search') + ' API key detected.</strong> ' +
|
| 706 |
+
(isLLM
|
| 707 |
+
? 'IndiModel requires a connected LLM API key to function.'
|
| 708 |
+
: 'Verification requires live search access β connect a Search API key to continue.'
|
| 709 |
+
) + '</p>' +
|
| 710 |
+
'<p style="margin-top:10px">Please enter your API key' + (isLLM ? 's' : '') + ' to continue.</p>' +
|
| 711 |
+
'<button class="enter-key-btn reopen-modal-btn">' +
|
| 712 |
+
'<svg width="13" height="13" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"><rect x="3" y="11" width="18" height="11" rx="2" ry="2"/><path d="M7 11V7a5 5 0 0 1 10 0v4"/></svg>' +
|
| 713 |
+
'Enter API Key' + (isLLM ? 's' : '') +
|
| 714 |
+
'</button>' +
|
| 715 |
+
'</div>';
|
| 716 |
+
chatMessages.appendChild(div);
|
| 717 |
+
scrollToBottom();
|
| 718 |
+
requestAnimationFrame(() => div.classList.add('visible'));
|
| 719 |
+
div.querySelector('.reopen-modal-btn').addEventListener('click', () => {
|
| 720 |
+
searchKeyInput.value = '';
|
| 721 |
+
llmKeyInput.value = '';
|
| 722 |
+
clearAllModalStatus();
|
| 723 |
+
openModal();
|
| 724 |
+
});
|
| 725 |
+
}
|
| 726 |
+
|
| 727 |
+
function appendLoading() {
|
| 728 |
+
const id = 'load-' + Date.now();
|
| 729 |
+
const div = document.createElement('div');
|
| 730 |
+
div.id = id;
|
| 731 |
+
div.classList.add('message', 'bot-message', 'loading-message');
|
| 732 |
+
div.innerHTML =
|
| 733 |
+
'<div class="message-meta">' +
|
| 734 |
+
'<span class="msg-sender">FactCheck AI</span>' +
|
| 735 |
+
'<span class="msg-time">' + getTime() + '</span>' +
|
| 736 |
+
'</div>' +
|
| 737 |
+
'<div class="message-body">' +
|
| 738 |
+
'<div class="typing-indicator"><span></span><span></span><span></span></div>' +
|
| 739 |
+
'<span class="loading-text">Analyzing…</span>' +
|
| 740 |
+
'</div>';
|
| 741 |
+
chatMessages.appendChild(div);
|
| 742 |
+
requestAnimationFrame(() => div.classList.add('visible'));
|
| 743 |
+
scrollToBottom();
|
| 744 |
+
return id;
|
| 745 |
+
}
|
| 746 |
+
|
| 747 |
+
function replaceLoading(id, text, isError) {
|
| 748 |
+
const el = document.getElementById(id);
|
| 749 |
+
if (!el) return;
|
| 750 |
+
el.classList.remove('loading-message');
|
| 751 |
+
if (isError) el.classList.add('error-message');
|
| 752 |
+
const paras = text.split('\n\n').map(p => '<p>' + escapeHtml(p.trim()) + '</p>').join('');
|
| 753 |
+
el.querySelector('.message-body').innerHTML = paras;
|
| 754 |
+
scrollToBottom();
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
+
function escapeHtml(s) {
|
| 758 |
+
return s
|
| 759 |
+
.replace(/&/g, '&')
|
| 760 |
+
.replace(/</g, '<')
|
| 761 |
+
.replace(/>/g, '>')
|
| 762 |
+
.replace(/"/g, '"');
|
| 763 |
+
}
|
| 764 |
+
|
| 765 |
+
function scrollToBottom() {
|
| 766 |
+
chatMessages.scrollTo({ top: chatMessages.scrollHeight, behavior: 'smooth' });
|
| 767 |
+
}
|
| 768 |
+
});
|
| 769 |
+
</script>
|
| 770 |
+
</body>
|
| 771 |
+
</html>
|