File size: 2,888 Bytes
208266a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
from src.db.vector_store import NewsVectorStore
from src.models.test_inference import BiasPredictor
from src.analysis.source_bias import get_source_bias, get_source_record
from collections import defaultdict


class NewsAnalysisPipeline:

    def __init__(self):
        print("Initializing NewsLens pipeline...")
        self.vector_store = NewsVectorStore()
        self.bias_predictor = BiasPredictor()
        print("Pipeline ready.")

    def analyze(self, topic: str, top_k: int = 10) -> dict:
        articles = self.vector_store.query(topic, top_k=top_k)

        if not articles:
            return {"topic": topic, "results": [], "summary": {}}

        results = []
        texts = [article["text"] for article in articles]
        predictions = self.bias_predictor.predict_batch(texts)

        for article, prediction in zip(articles, predictions):
            source_record = get_source_record(article["source"])
            results.append({
                "source": article["source"],
                "source_bias": source_record["bias"],
                "source_bias_provenance": source_record["provenance"],
                "url": article["url"],
                "title": article.get("title", ""),
                "description": article.get("description", ""),
                "publishedAt": article.get("publishedAt", ""),
                "text": article["text"],
                "text_label": prediction["label"],
                "confidence": prediction["confidence"],
                "probabilities": {
                    "Not Biased": round(prediction["probabilities"][0], 4),
                    "Biased": round(prediction["probabilities"][1], 4),
                },
                "similarity_score": article["similarity_score"]
            })
        # Aggregate per source
        summary = defaultdict(lambda: {
            "source_bias": "Unknown",
            "Biased": 0,
            "Not Biased": 0,
            "total": 0
        })
        for r in results:
            source = r["source"]
            summary[source]["source_bias"] = r["source_bias"]
            summary[source][r["text_label"]] += 1
            summary[source]["total"] += 1

        return {
            "topic": topic,
            "results": results,
            "summary": dict(summary)
        }


if __name__ == "__main__":
    pipeline = NewsAnalysisPipeline()
    output = pipeline.analyze("climate change", top_k=10)

    print(f"\n=== Results for: '{output['topic']}' ===")
    for r in output["results"]:
        print(f"[{r['text_label']}] ({r['confidence']:.2f}) | Source lean: {r['source_bias']}{r['source']}: {r['text'][:80]}...")

    print("\n=== Source Summary ===")
    for source, counts in output["summary"].items():
        print(f"{source} ({counts['source_bias']}): Biased={counts['Biased']}, Not Biased={counts['Not Biased']}, Total={counts['total']}")