ifieryarrows commited on
Commit
aa191f1
·
verified ·
1 Parent(s): 5e59c7d

Sync from GitHub (tests passed)

Browse files
app/ai_engine.py CHANGED
@@ -30,7 +30,16 @@ import xgboost as xgb
30
  from sklearn.metrics import mean_absolute_error, mean_squared_error
31
 
32
  from app.db import SessionLocal, init_db
33
- from app.models import NewsArticle, NewsSentiment, DailySentiment, PriceBar
 
 
 
 
 
 
 
 
 
34
  from app.settings import get_settings
35
  from app.features import build_feature_matrix, get_feature_descriptions
36
  from app.lock import pipeline_lock
@@ -52,6 +61,116 @@ HYBRID_FALLBACK_PARSE_MODEL_NAME = "hybrid_fallback_parse"
52
  LLM_LABELS = {"BULLISH", "BEARISH", "NEUTRAL"}
53
  LLM_SCORING_MAX_TOKENS_PRIMARY = 2000
54
  LLM_SCORING_MAX_TOKENS_RETRY = 6000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
 
57
  # =============================================================================
@@ -692,6 +811,837 @@ def score_batch_with_finbert(articles: list) -> list[dict]:
692
  return results
693
 
694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695
  def score_unscored_articles(
696
  session: Session,
697
  chunk_size: int = 12
@@ -1374,18 +2324,31 @@ def run_full_pipeline(
1374
  Returns:
1375
  Dict with results from each stage
1376
  """
 
1377
  results = {
1378
  "scored_articles": 0,
 
1379
  "aggregated_days": 0,
 
1380
  "model_result": None,
1381
  "timestamp": datetime.now(timezone.utc).isoformat(),
1382
  }
1383
 
1384
  with SessionLocal() as session:
1385
  if score_sentiment:
1386
- results["scored_articles"] = score_unscored_articles(session)
 
 
 
 
 
 
 
 
1387
 
1388
  if aggregate_sentiment:
 
 
1389
  results["aggregated_days"] = aggregate_daily_sentiment(session)
1390
 
1391
  if train_model:
@@ -1426,6 +2389,18 @@ def main():
1426
  action="store_true",
1427
  help="Run sentiment scoring + daily aggregation (no training)"
1428
  )
 
 
 
 
 
 
 
 
 
 
 
 
1429
  parser.add_argument(
1430
  "--target-symbol",
1431
  type=str,
@@ -1452,8 +2427,9 @@ def main():
1452
  score = args.run_all or args.score_only or args.refresh_sentiment
1453
  aggregate = args.run_all or args.aggregate_only or args.refresh_sentiment
1454
  train = args.run_all or args.train_only
 
1455
 
1456
- if not (score or aggregate or train):
1457
  parser.print_help()
1458
  return
1459
 
@@ -1463,6 +2439,26 @@ def main():
1463
 
1464
  # Run pipeline
1465
  def do_run():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1466
  return run_full_pipeline(
1467
  target_symbol=args.target_symbol,
1468
  score_sentiment=score,
@@ -1487,9 +2483,21 @@ def main():
1487
 
1488
  if score:
1489
  print(f"\nSentiment Scoring: {results['scored_articles']} articles")
 
 
 
 
 
 
1490
 
1491
  if aggregate:
1492
  print(f"Daily Aggregation: {results['aggregated_days']} days")
 
 
 
 
 
 
1493
 
1494
  if train and results.get("model_result"):
1495
  mr = results["model_result"]
 
30
  from sklearn.metrics import mean_absolute_error, mean_squared_error
31
 
32
  from app.db import SessionLocal, init_db
33
+ from app.models import (
34
+ NewsArticle,
35
+ NewsSentiment,
36
+ DailySentiment,
37
+ PriceBar,
38
+ NewsProcessed,
39
+ NewsRaw,
40
+ NewsSentimentV2,
41
+ DailySentimentV2,
42
+ )
43
  from app.settings import get_settings
44
  from app.features import build_feature_matrix, get_feature_descriptions
45
  from app.lock import pipeline_lock
 
61
  LLM_LABELS = {"BULLISH", "BEARISH", "NEUTRAL"}
62
  LLM_SCORING_MAX_TOKENS_PRIMARY = 2000
63
  LLM_SCORING_MAX_TOKENS_RETRY = 6000
64
+ LLM_V2_LABEL_THRESHOLD = 0.15
65
+ LLM_V2_EVENT_TYPES = {
66
+ "supply_disruption",
67
+ "supply_expansion",
68
+ "demand_increase",
69
+ "demand_decrease",
70
+ "inventory_draw",
71
+ "inventory_build",
72
+ "policy_support",
73
+ "policy_drag",
74
+ "macro_usd_up",
75
+ "macro_usd_down",
76
+ "cost_push",
77
+ "mixed_unclear",
78
+ "non_copper",
79
+ }
80
+ LLM_V2_EVENT_SIGN = {
81
+ "supply_disruption": 1,
82
+ "inventory_draw": 1,
83
+ "demand_increase": 1,
84
+ "policy_support": 1,
85
+ "macro_usd_down": 1,
86
+ "cost_push": 1,
87
+ "supply_expansion": -1,
88
+ "inventory_build": -1,
89
+ "demand_decrease": -1,
90
+ "policy_drag": -1,
91
+ "macro_usd_up": -1,
92
+ "mixed_unclear": 0,
93
+ "non_copper": 0,
94
+ }
95
+ LLM_V2_EVENT_STRENGTH = {
96
+ "supply_disruption": 1.0,
97
+ "inventory_draw": 0.9,
98
+ "demand_increase": 0.95,
99
+ "policy_support": 0.8,
100
+ "macro_usd_down": 0.7,
101
+ "cost_push": 0.75,
102
+ "supply_expansion": 1.0,
103
+ "inventory_build": 0.9,
104
+ "demand_decrease": 0.95,
105
+ "policy_drag": 0.8,
106
+ "macro_usd_up": 0.7,
107
+ "mixed_unclear": 0.25,
108
+ "non_copper": 0.0,
109
+ }
110
+ LLM_V2_SYSTEM_PROMPT = """You are a Senior Copper Futures Analyst focused on COMEX HG=F front-month contract.
111
+ Your job is to estimate 1-5 trading day copper price impact from each article.
112
+
113
+ Core principle:
114
+ Classify by expected HG=F price reaction, NOT by whether the news is "good" or "bad" for the economy/company.
115
+
116
+ Output requirements:
117
+ Return ONLY a JSON array. One object per input id.
118
+ Each object must contain exactly:
119
+ - id (integer)
120
+ - label ("BULLISH" | "BEARISH" | "NEUTRAL")
121
+ - impact_score (number, -1.00 to 1.00, two decimals)
122
+ - confidence (number, 0.00 to 1.00, two decimals)
123
+ - relevance (number, 0.00 to 1.00, two decimals)
124
+ - event_type (one of: supply_disruption, supply_expansion, demand_increase, demand_decrease, inventory_draw, inventory_build, policy_support, policy_drag, macro_usd_up, macro_usd_down, cost_push, mixed_unclear, non_copper)
125
+ - reasoning (single line, <= 160 chars)
126
+
127
+ Copper-specific reasoning rules:
128
+ 1) Supply tightening is typically BULLISH for copper price.
129
+ 2) Supply expansion is typically BEARISH.
130
+ 3) Demand increase is typically BULLISH.
131
+ 4) Demand decrease is typically BEARISH.
132
+ 5) USD stronger is usually BEARISH for dollar-denominated copper; USD weaker is usually BULLISH.
133
+ 6) If article is not materially related to copper supply/demand/pricing, use non_copper + NEUTRAL with low relevance/confidence.
134
+ 7) Use NEUTRAL only when net effect is truly mixed/unclear within 1-5 day horizon.
135
+
136
+ Label mapping:
137
+ - impact_score >= 0.15 => BULLISH
138
+ - impact_score <= -0.15 => BEARISH
139
+ - otherwise => NEUTRAL
140
+ """
141
+ LLM_SCORING_RESPONSE_FORMAT_V2 = {
142
+ "type": "json_schema",
143
+ "json_schema": {
144
+ "name": "news_sentiment_scores_v2",
145
+ "strict": True,
146
+ "schema": {
147
+ "type": "array",
148
+ "items": {
149
+ "type": "object",
150
+ "properties": {
151
+ "id": {"type": "integer"},
152
+ "label": {"type": "string", "enum": ["BULLISH", "BEARISH", "NEUTRAL"]},
153
+ "impact_score": {"type": "number", "minimum": -1, "maximum": 1},
154
+ "confidence": {"type": "number", "minimum": 0, "maximum": 1},
155
+ "relevance": {"type": "number", "minimum": 0, "maximum": 1},
156
+ "event_type": {"type": "string", "enum": sorted(LLM_V2_EVENT_TYPES)},
157
+ "reasoning": {"type": "string"},
158
+ },
159
+ "required": [
160
+ "id",
161
+ "label",
162
+ "impact_score",
163
+ "confidence",
164
+ "relevance",
165
+ "event_type",
166
+ "reasoning",
167
+ ],
168
+ "additionalProperties": False,
169
+ },
170
+ },
171
+ },
172
+ }
173
+ SCORING_V2_VERSION = "commodity_v2"
174
 
175
 
176
  # =============================================================================
 
811
  return results
812
 
813
 
814
+ def _clip(value: float, lower: float, upper: float) -> float:
815
+ """Clamp numeric value."""
816
+ return max(lower, min(upper, float(value)))
817
+
818
+
819
+ def _label_from_impact_score(impact_score: float) -> str:
820
+ """Map impact score to discrete label."""
821
+ if impact_score >= LLM_V2_LABEL_THRESHOLD:
822
+ return "BULLISH"
823
+ if impact_score <= -LLM_V2_LABEL_THRESHOLD:
824
+ return "BEARISH"
825
+ return "NEUTRAL"
826
+
827
+
828
+ def _sign(value: float, eps: float = 1e-9) -> int:
829
+ """Return numeric sign with epsilon deadzone."""
830
+ if value > eps:
831
+ return 1
832
+ if value < -eps:
833
+ return -1
834
+ return 0
835
+
836
+
837
+ def _normalize_event_type(value: Any) -> str:
838
+ """Normalize event type to allowed vocabulary."""
839
+ normalized = str(value or "").strip().lower()
840
+ if normalized in LLM_V2_EVENT_TYPES:
841
+ return normalized
842
+ return "mixed_unclear"
843
+
844
+
845
+ def _infer_event_type_from_text(text: str) -> str:
846
+ """Heuristic event inference used only for deterministic fallback."""
847
+ lower = (text or "").lower()
848
+ if not lower or "copper" not in lower:
849
+ return "non_copper"
850
+
851
+ supply_disruption_keywords = [
852
+ "outage",
853
+ "strike",
854
+ "disruption",
855
+ "shutdown",
856
+ "halt",
857
+ "sanction",
858
+ "inventory draw",
859
+ "stocks fell",
860
+ "warehouse draw",
861
+ ]
862
+ supply_expansion_keywords = [
863
+ "ramp-up",
864
+ "ramp up",
865
+ "increase output",
866
+ "production increase",
867
+ "new mine",
868
+ "inventory build",
869
+ "stocks rose",
870
+ ]
871
+ demand_increase_keywords = [
872
+ "stimulus",
873
+ "grid investment",
874
+ "ev demand",
875
+ "demand rise",
876
+ "stockpile purchase",
877
+ "import growth",
878
+ ]
879
+ demand_decrease_keywords = [
880
+ "slowdown",
881
+ "weak demand",
882
+ "demand decline",
883
+ "construction slump",
884
+ "pmi contraction",
885
+ "import decline",
886
+ ]
887
+ if any(token in lower for token in supply_disruption_keywords):
888
+ return "supply_disruption"
889
+ if any(token in lower for token in supply_expansion_keywords):
890
+ return "supply_expansion"
891
+ if any(token in lower for token in demand_increase_keywords):
892
+ return "demand_increase"
893
+ if any(token in lower for token in demand_decrease_keywords):
894
+ return "demand_decrease"
895
+ if "dollar strengthens" in lower or "usd stronger" in lower:
896
+ return "macro_usd_up"
897
+ if "dollar weakens" in lower or "usd weaker" in lower:
898
+ return "macro_usd_down"
899
+ return "mixed_unclear"
900
+
901
+
902
+ def _build_llm_v2_user_prompt(articles: list[dict], horizon_days: int) -> str:
903
+ """Build compact JSON prompt for batch scoring."""
904
+ normalized_articles = [
905
+ {
906
+ "id": int(article["id"]),
907
+ "title": str(article.get("title") or "")[:500],
908
+ "description": str(article.get("description") or "")[:800],
909
+ }
910
+ for article in articles
911
+ ]
912
+ return (
913
+ f"Classify each article for {horizon_days}-day HG=F copper futures impact.\n"
914
+ "Return one object per id.\n\n"
915
+ f"Input articles JSON:\n{json.dumps(normalized_articles, ensure_ascii=True)}"
916
+ )
917
+
918
+
919
+ def _parse_llm_v2_items(
920
+ *,
921
+ raw_results: Any,
922
+ expected_ids: list[int],
923
+ model_name: str,
924
+ ) -> tuple[dict[int, dict], list[int]]:
925
+ """
926
+ Parse/validate V2 LLM outputs.
927
+
928
+ Returns:
929
+ (valid_results_by_id, failed_ids)
930
+ """
931
+ if not isinstance(raw_results, list):
932
+ raise ValueError(f"Structured result must be a list, got {type(raw_results).__name__}")
933
+
934
+ expected = set(expected_ids)
935
+ valid: dict[int, dict] = {}
936
+ failed_ids: set[int] = set()
937
+
938
+ for item in raw_results:
939
+ if not isinstance(item, dict):
940
+ continue
941
+ if "id" not in item:
942
+ continue
943
+
944
+ try:
945
+ article_id = int(item["id"])
946
+ except (TypeError, ValueError):
947
+ continue
948
+
949
+ if article_id not in expected:
950
+ continue
951
+ if article_id in valid:
952
+ failed_ids.add(article_id)
953
+ continue
954
+
955
+ raw_label = item.get("label", item.get("classification"))
956
+ raw_impact = item.get("impact_score", item.get("score"))
957
+ raw_confidence = item.get("confidence")
958
+ raw_relevance = item.get("relevance", item.get("relevance_score"))
959
+ raw_event_type = item.get("event_type")
960
+ raw_reasoning = item.get("reasoning", "")
961
+
962
+ try:
963
+ if raw_impact is None:
964
+ raise ValueError("missing impact_score")
965
+ if raw_confidence is None:
966
+ raise ValueError("missing confidence")
967
+ if raw_relevance is None:
968
+ raise ValueError("missing relevance")
969
+
970
+ impact_score = _clip(float(raw_impact), -1.0, 1.0)
971
+ confidence = _clip(float(raw_confidence), 0.0, 1.0)
972
+ relevance = _clip(float(raw_relevance), 0.0, 1.0)
973
+ except (TypeError, ValueError):
974
+ failed_ids.add(article_id)
975
+ continue
976
+
977
+ event_type = _normalize_event_type(raw_event_type)
978
+ label_from_impact = _label_from_impact_score(impact_score)
979
+ if raw_label is None:
980
+ label = label_from_impact
981
+ else:
982
+ label = str(raw_label).upper().strip()
983
+ if label not in LLM_LABELS:
984
+ label = label_from_impact
985
+ if label != label_from_impact:
986
+ # Keep deterministic consistency between score and class.
987
+ label = label_from_impact
988
+
989
+ reasoning = _sanitize_reasoning_text(raw_reasoning)[:160]
990
+
991
+ valid[article_id] = {
992
+ "id": article_id,
993
+ "label": label,
994
+ "impact_score": impact_score,
995
+ "confidence": confidence,
996
+ "relevance": relevance,
997
+ "event_type": event_type,
998
+ "reasoning": reasoning,
999
+ "llm_model": model_name,
1000
+ }
1001
+
1002
+ # Mark missing ids as failed.
1003
+ for article_id in expected_ids:
1004
+ if article_id not in valid:
1005
+ failed_ids.add(article_id)
1006
+
1007
+ return valid, sorted(failed_ids)
1008
+
1009
+
1010
+ async def _repair_json_response_v2(
1011
+ *,
1012
+ settings: Any,
1013
+ model_name: str,
1014
+ malformed_content: str,
1015
+ expected_ids: list[int],
1016
+ ) -> str:
1017
+ """Repair malformed JSON into V2 contract with no semantic rewrite."""
1018
+ repair_prompt = (
1019
+ "Convert the malformed output into valid JSON array.\n"
1020
+ f"Expected ids: {expected_ids}\n"
1021
+ "Keep keys: id,label,impact_score,confidence,relevance,event_type,reasoning.\n"
1022
+ "Do not add explanations.\n\n"
1023
+ f"MALFORMED:\n{malformed_content}"
1024
+ )
1025
+ repair_data = await create_chat_completion(
1026
+ api_key=settings.openrouter_api_key,
1027
+ model=model_name,
1028
+ messages=[
1029
+ {
1030
+ "role": "system",
1031
+ "content": "You repair JSON only. Return valid JSON array without markdown.",
1032
+ },
1033
+ {"role": "user", "content": repair_prompt},
1034
+ ],
1035
+ max_tokens=2600,
1036
+ temperature=0.0,
1037
+ timeout_seconds=60.0,
1038
+ max_retries=settings.openrouter_max_retries,
1039
+ rpm=settings.openrouter_rpm,
1040
+ fallback_models=settings.openrouter_fallback_models_list,
1041
+ referer="https://copper-mind.vercel.app",
1042
+ title="CopperMind V2 JSON Repair",
1043
+ extra_payload={"reasoning": {"exclude": True}},
1044
+ )
1045
+ repaired_content = _extract_chat_message_content(repair_data)
1046
+ if not repaired_content:
1047
+ raise LLMStructuredOutputError("V2 JSON repair returned empty content")
1048
+ return repaired_content
1049
+
1050
+
1051
+ async def _score_subset_with_model_v2(
1052
+ *,
1053
+ settings: Any,
1054
+ model_name: str,
1055
+ articles: list[dict],
1056
+ horizon_days: int,
1057
+ ) -> tuple[dict[int, dict], list[int], int]:
1058
+ """
1059
+ Score subset with one model.
1060
+
1061
+ Returns:
1062
+ (valid_results_by_id, failed_ids, parse_fail_count)
1063
+ """
1064
+ if not articles:
1065
+ return {}, [], 0
1066
+
1067
+ expected_ids = [int(article["id"]) for article in articles]
1068
+ user_prompt = _build_llm_v2_user_prompt(articles, horizon_days=horizon_days)
1069
+
1070
+ async def _request(strict_schema: bool, *, max_tokens: int) -> dict[str, Any]:
1071
+ request_kwargs: dict[str, Any] = {
1072
+ "api_key": settings.openrouter_api_key,
1073
+ "model": model_name,
1074
+ "messages": [
1075
+ {"role": "system", "content": LLM_V2_SYSTEM_PROMPT},
1076
+ {"role": "user", "content": user_prompt},
1077
+ ],
1078
+ "max_tokens": max_tokens,
1079
+ "temperature": 0.0,
1080
+ "timeout_seconds": 60.0,
1081
+ "max_retries": settings.openrouter_max_retries,
1082
+ "rpm": settings.openrouter_rpm,
1083
+ "fallback_models": settings.openrouter_fallback_models_list,
1084
+ "referer": "https://copper-mind.vercel.app",
1085
+ "title": "CopperMind Sentiment Analysis V2",
1086
+ "extra_payload": {"reasoning": {"exclude": True}},
1087
+ }
1088
+ if strict_schema:
1089
+ request_kwargs["response_format"] = LLM_SCORING_RESPONSE_FORMAT_V2
1090
+ request_kwargs["provider"] = LLM_SCORING_PROVIDER_OPTIONS
1091
+ return await create_chat_completion(**request_kwargs)
1092
+
1093
+ async def _request_with_provider_fallback(*, max_tokens: int) -> dict[str, Any]:
1094
+ try:
1095
+ return await _request(strict_schema=True, max_tokens=max_tokens)
1096
+ except OpenRouterError as exc:
1097
+ message = str(exc).lower()
1098
+ if exc.status_code == 404 and "no endpoints found" in message:
1099
+ logger.warning(
1100
+ "V2 structured scoring unsupported by provider route for model=%s. Retrying relaxed mode.",
1101
+ model_name,
1102
+ )
1103
+ return await _request(strict_schema=False, max_tokens=max_tokens)
1104
+ raise
1105
+
1106
+ parse_fail_count = 0
1107
+ try:
1108
+ data = await _request_with_provider_fallback(max_tokens=LLM_SCORING_MAX_TOKENS_PRIMARY)
1109
+ except Exception:
1110
+ return {}, expected_ids, len(expected_ids)
1111
+
1112
+ content = _extract_chat_message_content(data)
1113
+ if not content:
1114
+ finish_reason = _extract_finish_reason(data)
1115
+ if finish_reason == "length":
1116
+ data = await _request_with_provider_fallback(max_tokens=LLM_SCORING_MAX_TOKENS_RETRY)
1117
+ content = _extract_chat_message_content(data)
1118
+ if not content:
1119
+ return {}, expected_ids, len(expected_ids)
1120
+
1121
+ try:
1122
+ raw_results = json.loads(_clean_json_content(content))
1123
+ valid, failed = _parse_llm_v2_items(
1124
+ raw_results=raw_results,
1125
+ expected_ids=expected_ids,
1126
+ model_name=model_name,
1127
+ )
1128
+ parse_fail_count += len(failed)
1129
+ return valid, failed, parse_fail_count
1130
+ except Exception:
1131
+ parse_fail_count += len(expected_ids)
1132
+
1133
+ try:
1134
+ repaired = await _repair_json_response_v2(
1135
+ settings=settings,
1136
+ model_name=model_name,
1137
+ malformed_content=content,
1138
+ expected_ids=expected_ids,
1139
+ )
1140
+ raw_results = json.loads(_clean_json_content(repaired))
1141
+ valid, failed = _parse_llm_v2_items(
1142
+ raw_results=raw_results,
1143
+ expected_ids=expected_ids,
1144
+ model_name=model_name,
1145
+ )
1146
+ return valid, failed, parse_fail_count
1147
+ except Exception:
1148
+ return {}, expected_ids, parse_fail_count
1149
+
1150
+
1151
+ async def score_batch_with_llm_v2(
1152
+ articles: list[dict],
1153
+ *,
1154
+ horizon_days: int = 5,
1155
+ ) -> dict[str, Any]:
1156
+ """
1157
+ Commodity-aware sentiment scoring with fast+reliable model escalation.
1158
+ """
1159
+ settings = get_settings()
1160
+ if not settings.openrouter_api_key:
1161
+ raise RuntimeError("OpenRouter API key not configured")
1162
+
1163
+ fast_model = settings.resolved_scoring_fast_model
1164
+ reliable_model = settings.resolved_scoring_reliable_model
1165
+ conflict_threshold = _clip(
1166
+ float(getattr(settings, "sentiment_escalate_conflict_threshold", 0.55)),
1167
+ 0.0,
1168
+ 1.0,
1169
+ )
1170
+
1171
+ normalized_articles = [
1172
+ {
1173
+ "id": int(article["id"]),
1174
+ "title": str(article.get("title") or "")[:500],
1175
+ "description": str(article.get("description") or "")[:800],
1176
+ "text": str(article.get("text") or "")[:1800],
1177
+ }
1178
+ for article in articles
1179
+ ]
1180
+ expected_ids = [item["id"] for item in normalized_articles]
1181
+ article_by_id = {item["id"]: item for item in normalized_articles}
1182
+
1183
+ fast_valid, fast_failed, parse_fail_fast = await _score_subset_with_model_v2(
1184
+ settings=settings,
1185
+ model_name=fast_model,
1186
+ articles=normalized_articles,
1187
+ horizon_days=horizon_days,
1188
+ )
1189
+
1190
+ results_by_id = dict(fast_valid)
1191
+ parse_fail_total = int(parse_fail_fast)
1192
+
1193
+ conflict_ids: list[int] = []
1194
+ for article_id, item in fast_valid.items():
1195
+ event_type = _normalize_event_type(item.get("event_type"))
1196
+ rule_sign = int(LLM_V2_EVENT_SIGN.get(event_type, 0))
1197
+ llm_sign = _sign(float(item.get("impact_score", 0.0)))
1198
+ conflict_strength = _clip(
1199
+ float(item.get("confidence", 0.0)) * float(item.get("relevance", 0.0)),
1200
+ 0.0,
1201
+ 1.0,
1202
+ )
1203
+ if rule_sign != 0 and llm_sign != 0 and llm_sign != rule_sign and conflict_strength >= conflict_threshold:
1204
+ conflict_ids.append(article_id)
1205
+ results_by_id.pop(article_id, None)
1206
+
1207
+ escalation_ids = sorted(set(fast_failed).union(conflict_ids))
1208
+ escalation_count = len(escalation_ids)
1209
+
1210
+ if escalation_ids:
1211
+ reliable_subset = [
1212
+ article_by_id[article_id]
1213
+ for article_id in escalation_ids
1214
+ if article_id in article_by_id
1215
+ ]
1216
+ reliable_valid, _reliable_failed, parse_fail_reliable = await _score_subset_with_model_v2(
1217
+ settings=settings,
1218
+ model_name=reliable_model,
1219
+ articles=reliable_subset,
1220
+ horizon_days=horizon_days,
1221
+ )
1222
+ results_by_id.update(reliable_valid)
1223
+ parse_fail_total += int(parse_fail_reliable)
1224
+
1225
+ results = [results_by_id[article_id] for article_id in expected_ids if article_id in results_by_id]
1226
+ failed_ids = [article_id for article_id in expected_ids if article_id not in results_by_id]
1227
+ fallback_count = len(failed_ids)
1228
+
1229
+ return {
1230
+ "results": results,
1231
+ "failed_ids": failed_ids,
1232
+ "parse_fail_count": parse_fail_total,
1233
+ "escalation_count": escalation_count,
1234
+ "fallback_count": fallback_count,
1235
+ "model_fast": fast_model,
1236
+ "model_reliable": reliable_model,
1237
+ }
1238
+
1239
+
1240
+ def score_batch_with_finbert_v2(articles: list[dict]) -> dict[int, dict]:
1241
+ """Score text with FinBERT for tone/intensity features."""
1242
+ pipe = get_finbert_pipeline()
1243
+ results: dict[int, dict] = {}
1244
+
1245
+ for article in articles:
1246
+ article_id = int(article["id"])
1247
+ text = str(
1248
+ article.get("text")
1249
+ or f"{article.get('title', '')} {article.get('description', '')}"
1250
+ )[:1200]
1251
+ scores = score_text_with_finbert(pipe, text)
1252
+ results[article_id] = {
1253
+ "prob_positive": float(scores["prob_positive"]),
1254
+ "prob_neutral": float(scores["prob_neutral"]),
1255
+ "prob_negative": float(scores["prob_negative"]),
1256
+ "tone": float(scores["score"]),
1257
+ "magnitude": abs(float(scores["prob_positive"]) - float(scores["prob_negative"])),
1258
+ }
1259
+
1260
+ return results
1261
+
1262
+
1263
+ def compute_final_score_v2(
1264
+ *,
1265
+ impact_score_llm: float,
1266
+ confidence_llm: float,
1267
+ relevance_score: float,
1268
+ event_type: str,
1269
+ prob_positive: float,
1270
+ prob_negative: float,
1271
+ ) -> dict[str, float | int]:
1272
+ """Compute deterministic ensemble score for V2."""
1273
+ llm_impact = _clip(float(impact_score_llm), -1.0, 1.0)
1274
+ llm_conf = _clip(float(confidence_llm), 0.0, 1.0)
1275
+ relevance = _clip(float(relevance_score), 0.0, 1.0)
1276
+ tone = _clip(float(prob_positive) - float(prob_negative), -1.0, 1.0)
1277
+ tone_mag = abs(tone)
1278
+
1279
+ normalized_event = _normalize_event_type(event_type)
1280
+ rule_sign = int(LLM_V2_EVENT_SIGN.get(normalized_event, 0))
1281
+ rule_strength = float(LLM_V2_EVENT_STRENGTH.get(normalized_event, 0.25))
1282
+
1283
+ llm_sign = _sign(llm_impact)
1284
+ final_sign = llm_sign if llm_sign != 0 else rule_sign
1285
+ if final_sign == 0 and tone_mag >= 0.2:
1286
+ final_sign = _sign(tone)
1287
+
1288
+ impact_mag = _clip(
1289
+ (0.55 * abs(llm_impact))
1290
+ + (0.25 * tone_mag)
1291
+ + (0.20 * _clip(rule_strength, 0.0, 1.0)),
1292
+ 0.0,
1293
+ 1.0,
1294
+ )
1295
+ if final_sign == 0:
1296
+ impact_mag = min(impact_mag, 0.12)
1297
+
1298
+ final_score = float(final_sign) * impact_mag
1299
+ agreement = 1.0 if (rule_sign == 0 or llm_sign == 0 or llm_sign == rule_sign) else 0.4
1300
+ confidence_cal = _clip((0.50 * llm_conf) + (0.30 * agreement) + (0.20 * relevance), 0.01, 0.99)
1301
+
1302
+ return {
1303
+ "rule_sign": rule_sign,
1304
+ "rule_strength": rule_strength,
1305
+ "final_score": final_score,
1306
+ "confidence_calibrated": confidence_cal,
1307
+ }
1308
+
1309
+
1310
+ def _build_article_fallback_v2(
1311
+ *,
1312
+ article: dict,
1313
+ finbert: dict,
1314
+ model_fast: str,
1315
+ model_reliable: str,
1316
+ ) -> dict:
1317
+ """Deterministic article-level fallback without zero-only outputs."""
1318
+ text = str(article.get("text") or f"{article.get('title', '')} {article.get('description', '')}")
1319
+ event_type = _infer_event_type_from_text(text)
1320
+ rule_sign = int(LLM_V2_EVENT_SIGN.get(event_type, 0))
1321
+ tone = float(finbert.get("tone", 0.0))
1322
+ tone_sign = _sign(tone)
1323
+ direction = rule_sign if rule_sign != 0 else tone_sign
1324
+ if direction == 0:
1325
+ impact_score = 0.0
1326
+ else:
1327
+ impact_score = float(direction) * _clip((abs(tone) * 0.35) + 0.08, 0.08, 0.25)
1328
+
1329
+ relevance = 0.10 if event_type == "non_copper" else 0.45
1330
+ confidence = 0.18 if direction == 0 else _clip(0.22 + (abs(tone) * 0.22), 0.22, 0.45)
1331
+
1332
+ return {
1333
+ "id": int(article["id"]),
1334
+ "label": _label_from_impact_score(impact_score),
1335
+ "impact_score": impact_score,
1336
+ "confidence": confidence,
1337
+ "relevance": relevance,
1338
+ "event_type": event_type,
1339
+ "reasoning": "deterministic_fallback",
1340
+ "llm_model": model_fast,
1341
+ "model_fast": model_fast,
1342
+ "model_reliable": model_reliable,
1343
+ "fallback_used": True,
1344
+ }
1345
+
1346
+
1347
+ def score_unscored_processed_articles(
1348
+ session: Session,
1349
+ *,
1350
+ chunk_size: int = 12,
1351
+ backfill_days: Optional[int] = None,
1352
+ ) -> dict[str, int]:
1353
+ """
1354
+ Score unscored `news_processed` articles into `news_sentiments_v2`.
1355
+ """
1356
+ settings = get_settings()
1357
+ horizon_days = max(1, int(getattr(settings, "sentiment_horizon_days", 5)))
1358
+ relevance_min = _clip(float(getattr(settings, "sentiment_relevance_min", 0.35)), 0.0, 1.0)
1359
+
1360
+ query = (
1361
+ session.query(
1362
+ NewsProcessed.id.label("processed_id"),
1363
+ NewsProcessed.canonical_title,
1364
+ NewsProcessed.cleaned_text,
1365
+ NewsRaw.title.label("raw_title"),
1366
+ NewsRaw.description.label("raw_description"),
1367
+ NewsRaw.published_at,
1368
+ )
1369
+ .join(NewsRaw, NewsProcessed.raw_id == NewsRaw.id)
1370
+ .outerjoin(
1371
+ NewsSentimentV2,
1372
+ (NewsProcessed.id == NewsSentimentV2.news_processed_id)
1373
+ & (NewsSentimentV2.horizon_days == horizon_days),
1374
+ )
1375
+ .filter(NewsSentimentV2.id.is_(None))
1376
+ .order_by(NewsRaw.published_at.asc(), NewsProcessed.id.asc())
1377
+ )
1378
+
1379
+ if backfill_days is not None:
1380
+ cutoff = datetime.now(timezone.utc) - timedelta(days=max(1, int(backfill_days)))
1381
+ query = query.filter(NewsRaw.published_at >= cutoff)
1382
+
1383
+ rows = query.all()
1384
+ if not rows:
1385
+ logger.info("No unscored processed articles found")
1386
+ return {
1387
+ "scored_count": 0,
1388
+ "parse_fail_count": 0,
1389
+ "escalation_count": 0,
1390
+ "fallback_count": 0,
1391
+ "finbert_used": 0,
1392
+ }
1393
+
1394
+ logger.info("Found %s unscored processed articles for V2 scoring", len(rows))
1395
+
1396
+ scored_count = 0
1397
+ parse_fail_count = 0
1398
+ escalation_count = 0
1399
+ fallback_count = 0
1400
+ finbert_used = 0
1401
+ llm_budget_remaining = max(0, int(settings.max_llm_articles_per_run))
1402
+ fast_model = settings.resolved_scoring_fast_model
1403
+ reliable_model = settings.resolved_scoring_reliable_model
1404
+
1405
+ for chunk_idx in range(0, len(rows), chunk_size):
1406
+ chunk_rows = rows[chunk_idx:chunk_idx + chunk_size]
1407
+ chunk_items: list[dict] = []
1408
+ for row in chunk_rows:
1409
+ title = str(row.raw_title or row.canonical_title or "")[:500]
1410
+ description = str(row.raw_description or "")[:1000]
1411
+ text = str(row.cleaned_text or f"{title} {description}")[:2000]
1412
+ chunk_items.append(
1413
+ {
1414
+ "id": int(row.processed_id),
1415
+ "title": title,
1416
+ "description": description,
1417
+ "text": text,
1418
+ "published_at": row.published_at,
1419
+ }
1420
+ )
1421
+
1422
+ finbert_by_id = score_batch_with_finbert_v2(chunk_items)
1423
+ finbert_used += len(finbert_by_id)
1424
+
1425
+ llm_results_by_id: dict[int, dict] = {}
1426
+ llm_candidates: list[dict] = []
1427
+ if settings.openrouter_api_key and llm_budget_remaining > 0:
1428
+ llm_take = min(len(chunk_items), llm_budget_remaining)
1429
+ llm_candidates = chunk_items[:llm_take]
1430
+ llm_budget_remaining -= llm_take
1431
+
1432
+ if llm_candidates:
1433
+ try:
1434
+ llm_bundle = run_async_from_sync(
1435
+ score_batch_with_llm_v2,
1436
+ llm_candidates,
1437
+ horizon_days=horizon_days,
1438
+ )
1439
+ for item in llm_bundle.get("results", []):
1440
+ llm_results_by_id[int(item["id"])] = item
1441
+ parse_fail_count += int(llm_bundle.get("parse_fail_count", 0))
1442
+ escalation_count += int(llm_bundle.get("escalation_count", 0))
1443
+ fast_model = str(llm_bundle.get("model_fast", fast_model))
1444
+ reliable_model = str(llm_bundle.get("model_reliable", reliable_model))
1445
+ except Exception as exc:
1446
+ logger.warning("V2 LLM scoring failed for chunk starting at %s: %s", chunk_idx, exc)
1447
+ parse_fail_count += len(llm_candidates)
1448
+
1449
+ for article in chunk_items:
1450
+ article_id = int(article["id"])
1451
+ finbert = finbert_by_id.get(article_id, _neutral_finbert_score())
1452
+ llm = llm_results_by_id.get(article_id)
1453
+
1454
+ if llm is None:
1455
+ llm = _build_article_fallback_v2(
1456
+ article=article,
1457
+ finbert=finbert if isinstance(finbert, dict) else {},
1458
+ model_fast=fast_model,
1459
+ model_reliable=reliable_model,
1460
+ )
1461
+ else:
1462
+ llm["model_fast"] = fast_model
1463
+ llm["model_reliable"] = reliable_model
1464
+ llm["fallback_used"] = False
1465
+
1466
+ if bool(llm.get("fallback_used", False)):
1467
+ fallback_count += 1
1468
+
1469
+ if float(llm.get("relevance", 0.0)) < relevance_min and llm.get("event_type") != "non_copper":
1470
+ llm["event_type"] = "non_copper"
1471
+ llm["label"] = "NEUTRAL"
1472
+ llm["impact_score"] = 0.0
1473
+
1474
+ metrics = compute_final_score_v2(
1475
+ impact_score_llm=float(llm.get("impact_score", 0.0)),
1476
+ confidence_llm=float(llm.get("confidence", 0.01)),
1477
+ relevance_score=float(llm.get("relevance", 0.01)),
1478
+ event_type=str(llm.get("event_type", "mixed_unclear")),
1479
+ prob_positive=float(finbert.get("prob_positive", 0.33)),
1480
+ prob_negative=float(finbert.get("prob_negative", 0.33)),
1481
+ )
1482
+
1483
+ payload = {
1484
+ "label": llm.get("label", "NEUTRAL"),
1485
+ "impact_score": round(float(llm.get("impact_score", 0.0)), 4),
1486
+ "confidence": round(float(llm.get("confidence", 0.01)), 4),
1487
+ "relevance": round(float(llm.get("relevance", 0.01)), 4),
1488
+ "event_type": llm.get("event_type", "mixed_unclear"),
1489
+ "reasoning": llm.get("reasoning", ""),
1490
+ "rule_sign": metrics["rule_sign"],
1491
+ "rule_strength": round(float(metrics["rule_strength"]), 4),
1492
+ "confidence_calibrated": round(float(metrics["confidence_calibrated"]), 4),
1493
+ "fallback_used": bool(llm.get("fallback_used", False)),
1494
+ "llm_model": llm.get("llm_model", fast_model),
1495
+ "scoring_version": SCORING_V2_VERSION,
1496
+ }
1497
+
1498
+ sentiment_v2 = NewsSentimentV2(
1499
+ news_processed_id=article_id,
1500
+ horizon_days=horizon_days,
1501
+ label=str(llm.get("label", "NEUTRAL")),
1502
+ impact_score_llm=float(llm.get("impact_score", 0.0)),
1503
+ confidence_llm=float(llm.get("confidence", 0.01)),
1504
+ confidence_calibrated=float(metrics["confidence_calibrated"]),
1505
+ relevance_score=float(llm.get("relevance", 0.01)),
1506
+ event_type=str(llm.get("event_type", "mixed_unclear")),
1507
+ rule_sign=int(metrics["rule_sign"]),
1508
+ final_score=float(metrics["final_score"]),
1509
+ finbert_pos=float(finbert.get("prob_positive", 0.33)),
1510
+ finbert_neu=float(finbert.get("prob_neutral", 0.34)),
1511
+ finbert_neg=float(finbert.get("prob_negative", 0.33)),
1512
+ reasoning_json=json.dumps(payload, ensure_ascii=True),
1513
+ model_fast=fast_model,
1514
+ model_reliable=reliable_model,
1515
+ scored_at=datetime.now(timezone.utc),
1516
+ )
1517
+ session.add(sentiment_v2)
1518
+ scored_count += 1
1519
+
1520
+ session.commit()
1521
+
1522
+ logger.info(
1523
+ "V2 scoring summary: scored=%s parse_fail=%s escalations=%s fallback=%s finbert_used=%s",
1524
+ scored_count,
1525
+ parse_fail_count,
1526
+ escalation_count,
1527
+ fallback_count,
1528
+ finbert_used,
1529
+ )
1530
+ return {
1531
+ "scored_count": scored_count,
1532
+ "parse_fail_count": parse_fail_count,
1533
+ "escalation_count": escalation_count,
1534
+ "fallback_count": fallback_count,
1535
+ "finbert_used": finbert_used,
1536
+ }
1537
+
1538
+
1539
+ def aggregate_daily_sentiment_v2(
1540
+ session: Session,
1541
+ *,
1542
+ tau_hours: float = 12.0,
1543
+ ) -> int:
1544
+ """Aggregate V2 article scores into daily_sentiments_v2."""
1545
+ settings = get_settings()
1546
+ tau_hours = tau_hours or settings.sentiment_tau_hours
1547
+ horizon_days = max(1, int(getattr(settings, "sentiment_horizon_days", 5)))
1548
+ relevance_min = _clip(float(getattr(settings, "sentiment_relevance_min", 0.35)), 0.0, 1.0)
1549
+
1550
+ rows = (
1551
+ session.query(
1552
+ NewsRaw.published_at,
1553
+ NewsSentimentV2.final_score,
1554
+ NewsSentimentV2.confidence_calibrated,
1555
+ NewsSentimentV2.relevance_score,
1556
+ )
1557
+ .join(NewsProcessed, NewsProcessed.raw_id == NewsRaw.id)
1558
+ .join(
1559
+ NewsSentimentV2,
1560
+ (NewsSentimentV2.news_processed_id == NewsProcessed.id)
1561
+ & (NewsSentimentV2.horizon_days == horizon_days),
1562
+ )
1563
+ .filter(NewsSentimentV2.relevance_score >= relevance_min)
1564
+ .all()
1565
+ )
1566
+
1567
+ if not rows:
1568
+ logger.info("No V2 scored articles available for daily aggregation")
1569
+ return 0
1570
+
1571
+ df = pd.DataFrame(
1572
+ rows,
1573
+ columns=["published_at", "final_score", "confidence_calibrated", "relevance_score"],
1574
+ )
1575
+ df["date"] = pd.to_datetime(df["published_at"]).dt.normalize()
1576
+
1577
+ def calc_weights(group):
1578
+ hours = (group["published_at"] - group["date"]).dt.total_seconds() / 3600.0
1579
+ weights = np.exp(hours / tau_hours)
1580
+ return weights / weights.sum()
1581
+
1582
+ daily_rows = []
1583
+ for date, group in df.groupby("date"):
1584
+ weights = calc_weights(group)
1585
+ daily_rows.append(
1586
+ {
1587
+ "date": date,
1588
+ "sentiment_index": float((group["final_score"] * weights).sum()),
1589
+ "news_count": int(len(group)),
1590
+ "avg_confidence": float(group["confidence_calibrated"].mean()),
1591
+ "avg_relevance": float(group["relevance_score"].mean()),
1592
+ }
1593
+ )
1594
+
1595
+ count = 0
1596
+ for row in daily_rows:
1597
+ date_dt = row["date"].to_pydatetime()
1598
+ if date_dt.tzinfo is None:
1599
+ date_dt = date_dt.replace(tzinfo=timezone.utc)
1600
+
1601
+ existing = session.query(DailySentimentV2).filter(
1602
+ func.date(DailySentimentV2.date) == func.date(date_dt)
1603
+ ).first()
1604
+ if existing:
1605
+ existing.sentiment_index = row["sentiment_index"]
1606
+ existing.news_count = row["news_count"]
1607
+ existing.avg_confidence = row["avg_confidence"]
1608
+ existing.avg_relevance = row["avg_relevance"]
1609
+ existing.source_version = "v2"
1610
+ existing.aggregated_at = datetime.now(timezone.utc)
1611
+ else:
1612
+ session.add(
1613
+ DailySentimentV2(
1614
+ date=date_dt,
1615
+ sentiment_index=row["sentiment_index"],
1616
+ news_count=row["news_count"],
1617
+ avg_confidence=row["avg_confidence"],
1618
+ avg_relevance=row["avg_relevance"],
1619
+ source_version="v2",
1620
+ aggregated_at=datetime.now(timezone.utc),
1621
+ )
1622
+ )
1623
+ count += 1
1624
+
1625
+ session.commit()
1626
+ logger.info("Aggregated V2 sentiment for %s days", count)
1627
+ return count
1628
+
1629
+
1630
+ def backfill_sentiment_v2(
1631
+ session: Session,
1632
+ *,
1633
+ days: int = 180,
1634
+ batch_size: int = 50,
1635
+ ) -> dict[str, int]:
1636
+ """Idempotent V2 backfill helper for last N days."""
1637
+ logger.info("Starting V2 backfill for last %s days (batch_size=%s)", days, batch_size)
1638
+ return score_unscored_processed_articles(
1639
+ session=session,
1640
+ chunk_size=batch_size,
1641
+ backfill_days=days,
1642
+ )
1643
+
1644
+
1645
  def score_unscored_articles(
1646
  session: Session,
1647
  chunk_size: int = 12
 
2324
  Returns:
2325
  Dict with results from each stage
2326
  """
2327
+ settings = get_settings()
2328
  results = {
2329
  "scored_articles": 0,
2330
+ "scored_articles_v2": 0,
2331
  "aggregated_days": 0,
2332
+ "aggregated_days_v2": 0,
2333
  "model_result": None,
2334
  "timestamp": datetime.now(timezone.utc).isoformat(),
2335
  }
2336
 
2337
  with SessionLocal() as session:
2338
  if score_sentiment:
2339
+ if settings.scoring_source == "news_processed":
2340
+ scoring_stats = score_unscored_processed_articles(session)
2341
+ results["scored_articles"] = int(scoring_stats.get("scored_count", 0))
2342
+ results["scored_articles_v2"] = int(scoring_stats.get("scored_count", 0))
2343
+ results["llm_parse_fail_count"] = int(scoring_stats.get("parse_fail_count", 0))
2344
+ results["escalation_count"] = int(scoring_stats.get("escalation_count", 0))
2345
+ results["fallback_count"] = int(scoring_stats.get("fallback_count", 0))
2346
+ else:
2347
+ results["scored_articles"] = score_unscored_articles(session)
2348
 
2349
  if aggregate_sentiment:
2350
+ if settings.scoring_source == "news_processed":
2351
+ results["aggregated_days_v2"] = aggregate_daily_sentiment_v2(session)
2352
  results["aggregated_days"] = aggregate_daily_sentiment(session)
2353
 
2354
  if train_model:
 
2389
  action="store_true",
2390
  help="Run sentiment scoring + daily aggregation (no training)"
2391
  )
2392
+ parser.add_argument(
2393
+ "--backfill-v2-days",
2394
+ type=int,
2395
+ default=0,
2396
+ help="Backfill unscored V2 sentiment for last N days (idempotent)"
2397
+ )
2398
+ parser.add_argument(
2399
+ "--backfill-v2-batch-size",
2400
+ type=int,
2401
+ default=50,
2402
+ help="Batch size for V2 backfill mode"
2403
+ )
2404
  parser.add_argument(
2405
  "--target-symbol",
2406
  type=str,
 
2427
  score = args.run_all or args.score_only or args.refresh_sentiment
2428
  aggregate = args.run_all or args.aggregate_only or args.refresh_sentiment
2429
  train = args.run_all or args.train_only
2430
+ backfill_v2 = args.backfill_v2_days > 0
2431
 
2432
+ if not (score or aggregate or train or backfill_v2):
2433
  parser.print_help()
2434
  return
2435
 
 
2439
 
2440
  # Run pipeline
2441
  def do_run():
2442
+ if backfill_v2:
2443
+ with SessionLocal() as session:
2444
+ scoring_stats = backfill_sentiment_v2(
2445
+ session,
2446
+ days=args.backfill_v2_days,
2447
+ batch_size=max(1, int(args.backfill_v2_batch_size)),
2448
+ )
2449
+ aggregated_days_v2 = aggregate_daily_sentiment_v2(session)
2450
+ return {
2451
+ "timestamp": datetime.now(timezone.utc).isoformat(),
2452
+ "scored_articles": int(scoring_stats.get("scored_count", 0)),
2453
+ "scored_articles_v2": int(scoring_stats.get("scored_count", 0)),
2454
+ "aggregated_days": 0,
2455
+ "aggregated_days_v2": int(aggregated_days_v2),
2456
+ "llm_parse_fail_count": int(scoring_stats.get("parse_fail_count", 0)),
2457
+ "escalation_count": int(scoring_stats.get("escalation_count", 0)),
2458
+ "fallback_count": int(scoring_stats.get("fallback_count", 0)),
2459
+ "model_result": None,
2460
+ "backfill_days": int(args.backfill_v2_days),
2461
+ }
2462
  return run_full_pipeline(
2463
  target_symbol=args.target_symbol,
2464
  score_sentiment=score,
 
2483
 
2484
  if score:
2485
  print(f"\nSentiment Scoring: {results['scored_articles']} articles")
2486
+ if "scored_articles_v2" in results:
2487
+ print(f"V2 Sentiment Scoring: {results.get('scored_articles_v2', 0)} articles")
2488
+ if "llm_parse_fail_count" in results:
2489
+ print(f" - LLM parse failures: {results.get('llm_parse_fail_count', 0)}")
2490
+ print(f" - Escalations: {results.get('escalation_count', 0)}")
2491
+ print(f" - Deterministic fallbacks: {results.get('fallback_count', 0)}")
2492
 
2493
  if aggregate:
2494
  print(f"Daily Aggregation: {results['aggregated_days']} days")
2495
+ if results.get("aggregated_days_v2") is not None:
2496
+ print(f"Daily Aggregation V2 (shadow): {results.get('aggregated_days_v2', 0)} days")
2497
+
2498
+ if backfill_v2:
2499
+ print(f"\nBackfill V2 Days: {results.get('backfill_days', 0)}")
2500
+ print(f"V2 Aggregation Days: {results.get('aggregated_days_v2', 0)}")
2501
 
2502
  if train and results.get("model_result"):
2503
  mr = results["model_result"]
app/db.py CHANGED
@@ -148,6 +148,38 @@ def _run_migrations(engine):
148
  except Exception as e:
149
  logger.debug(f"Migration check for ai_stance: {e}")
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  def init_db():
153
  """
 
148
  except Exception as e:
149
  logger.debug(f"Migration check for ai_stance: {e}")
150
 
151
+ # Migration: V2 stage-2 metrics columns
152
+ v2_metric_columns = [
153
+ ("articles_scored_v2", "INTEGER"),
154
+ ("llm_parse_fail_count", "INTEGER"),
155
+ ("escalation_count", "INTEGER"),
156
+ ("fallback_count", "INTEGER"),
157
+ ]
158
+ try:
159
+ if is_sqlite:
160
+ result = conn.execute(text("PRAGMA table_info(pipeline_run_metrics)"))
161
+ columns = [row[1] for row in result.fetchall()]
162
+ for column_name, column_type in v2_metric_columns:
163
+ if column_name not in columns:
164
+ conn.execute(
165
+ text(
166
+ f"ALTER TABLE pipeline_run_metrics ADD COLUMN {column_name} {column_type}"
167
+ )
168
+ )
169
+ conn.commit()
170
+ else:
171
+ for column_name, column_type in v2_metric_columns:
172
+ conn.execute(
173
+ text(
174
+ f"ALTER TABLE pipeline_run_metrics "
175
+ f"ADD COLUMN IF NOT EXISTS {column_name} {column_type}"
176
+ )
177
+ )
178
+ conn.commit()
179
+ logger.info("Migration: Ensured V2 Stage-2 metric columns exist")
180
+ except Exception as e:
181
+ logger.debug(f"Migration check for V2 metric columns: {e}")
182
+
183
 
184
  def init_db():
185
  """
app/features.py CHANGED
@@ -19,7 +19,7 @@ from sqlalchemy import func
19
  from sqlalchemy.orm import Session
20
 
21
  from app.db import SessionLocal
22
- from app.models import PriceBar, DailySentiment
23
  from app.settings import get_settings
24
 
25
  logger = logging.getLogger(__name__)
@@ -81,21 +81,37 @@ def load_sentiment_data(
81
  Returns:
82
  DataFrame with columns: date, sentiment_index, news_count
83
  """
84
- query = session.query(
85
- DailySentiment.date,
86
- DailySentiment.sentiment_index,
87
- DailySentiment.news_count
88
- )
89
-
90
- if start_date:
91
- query = query.filter(DailySentiment.date >= start_date)
92
- if end_date:
93
- query = query.filter(DailySentiment.date <= end_date)
94
-
95
- query = query.order_by(DailySentiment.date.asc())
96
-
97
- rows = query.all()
98
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  if not rows:
100
  return pd.DataFrame()
101
 
 
19
  from sqlalchemy.orm import Session
20
 
21
  from app.db import SessionLocal
22
+ from app.models import PriceBar, DailySentiment, DailySentimentV2
23
  from app.settings import get_settings
24
 
25
  logger = logging.getLogger(__name__)
 
81
  Returns:
82
  DataFrame with columns: date, sentiment_index, news_count
83
  """
84
+ settings = get_settings()
85
+ source = str(getattr(settings, "scoring_source", "news_articles")).strip().lower()
86
+ use_v2 = source == "news_processed"
87
+
88
+ rows = []
89
+ if use_v2:
90
+ query_v2 = session.query(
91
+ DailySentimentV2.date,
92
+ DailySentimentV2.sentiment_index,
93
+ DailySentimentV2.news_count
94
+ )
95
+ if start_date:
96
+ query_v2 = query_v2.filter(DailySentimentV2.date >= start_date)
97
+ if end_date:
98
+ query_v2 = query_v2.filter(DailySentimentV2.date <= end_date)
99
+ rows = query_v2.order_by(DailySentimentV2.date.asc()).all()
100
+ if not rows:
101
+ logger.warning("No rows in daily_sentiments_v2; falling back to daily_sentiments")
102
+
103
+ if not rows:
104
+ query = session.query(
105
+ DailySentiment.date,
106
+ DailySentiment.sentiment_index,
107
+ DailySentiment.news_count
108
+ )
109
+ if start_date:
110
+ query = query.filter(DailySentiment.date >= start_date)
111
+ if end_date:
112
+ query = query.filter(DailySentiment.date <= end_date)
113
+ rows = query.order_by(DailySentiment.date.asc()).all()
114
+
115
  if not rows:
116
  return pd.DataFrame()
117
 
app/inference.py CHANGED
@@ -25,7 +25,17 @@ from sqlalchemy import func
25
  from sqlalchemy.orm import Session
26
 
27
  from app.db import SessionLocal
28
- from app.models import PriceBar, DailySentiment, AnalysisSnapshot, NewsArticle, NewsSentiment
 
 
 
 
 
 
 
 
 
 
29
  from app.settings import get_settings
30
  from app.features import (
31
  load_price_data,
@@ -167,10 +177,20 @@ def get_current_price(session: Session, symbol: str) -> Optional[float]:
167
 
168
  def get_current_sentiment(session: Session) -> Optional[float]:
169
  """Get the most recent daily sentiment index."""
 
 
 
 
 
 
 
 
 
 
 
170
  latest = session.query(DailySentiment).order_by(
171
  DailySentiment.date.desc()
172
  ).first()
173
-
174
  return latest.sentiment_index if latest else None
175
 
176
 
@@ -180,18 +200,38 @@ def get_data_quality_stats(
180
  days: int = 7
181
  ) -> dict:
182
  """Get data quality statistics for the report."""
 
 
183
  cutoff = datetime.now(timezone.utc) - timedelta(days=days)
184
-
185
- # News count
186
- news_count = session.query(func.count(NewsArticle.id)).filter(
187
- NewsArticle.published_at >= cutoff
188
- ).scalar()
189
-
190
- # Scored news count
191
- scored_count = session.query(func.count(NewsSentiment.id)).join(
192
- NewsArticle,
193
- NewsSentiment.news_article_id == NewsArticle.id
194
- ).filter(NewsArticle.published_at >= cutoff).scalar()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
  # Price bar coverage
197
  expected_days = days
 
25
  from sqlalchemy.orm import Session
26
 
27
  from app.db import SessionLocal
28
+ from app.models import (
29
+ PriceBar,
30
+ DailySentiment,
31
+ DailySentimentV2,
32
+ AnalysisSnapshot,
33
+ NewsArticle,
34
+ NewsSentiment,
35
+ NewsRaw,
36
+ NewsProcessed,
37
+ NewsSentimentV2,
38
+ )
39
  from app.settings import get_settings
40
  from app.features import (
41
  load_price_data,
 
177
 
178
  def get_current_sentiment(session: Session) -> Optional[float]:
179
  """Get the most recent daily sentiment index."""
180
+ settings = get_settings()
181
+ source = str(getattr(settings, "scoring_source", "news_articles")).strip().lower()
182
+
183
+ if source == "news_processed":
184
+ latest_v2 = session.query(DailySentimentV2).order_by(
185
+ DailySentimentV2.date.desc()
186
+ ).first()
187
+ if latest_v2 is not None:
188
+ return latest_v2.sentiment_index
189
+ logger.warning("No rows in daily_sentiments_v2; falling back to legacy daily_sentiments")
190
+
191
  latest = session.query(DailySentiment).order_by(
192
  DailySentiment.date.desc()
193
  ).first()
 
194
  return latest.sentiment_index if latest else None
195
 
196
 
 
200
  days: int = 7
201
  ) -> dict:
202
  """Get data quality statistics for the report."""
203
+ settings = get_settings()
204
+ source = str(getattr(settings, "scoring_source", "news_articles")).strip().lower()
205
  cutoff = datetime.now(timezone.utc) - timedelta(days=days)
206
+
207
+ if source == "news_processed":
208
+ horizon_days = max(1, int(getattr(settings, "sentiment_horizon_days", 5)))
209
+ news_count = session.query(func.count(NewsProcessed.id)).join(
210
+ NewsRaw, NewsProcessed.raw_id == NewsRaw.id
211
+ ).filter(
212
+ NewsRaw.published_at >= cutoff
213
+ ).scalar()
214
+
215
+ scored_count = session.query(func.count(NewsSentimentV2.id)).join(
216
+ NewsProcessed,
217
+ NewsSentimentV2.news_processed_id == NewsProcessed.id
218
+ ).join(
219
+ NewsRaw,
220
+ NewsProcessed.raw_id == NewsRaw.id
221
+ ).filter(
222
+ NewsRaw.published_at >= cutoff,
223
+ NewsSentimentV2.horizon_days == horizon_days,
224
+ ).scalar()
225
+ else:
226
+ # Legacy article-level stats
227
+ news_count = session.query(func.count(NewsArticle.id)).filter(
228
+ NewsArticle.published_at >= cutoff
229
+ ).scalar()
230
+
231
+ scored_count = session.query(func.count(NewsSentiment.id)).join(
232
+ NewsArticle,
233
+ NewsSentiment.news_article_id == NewsArticle.id
234
+ ).filter(NewsArticle.published_at >= cutoff).scalar()
235
 
236
  # Price bar coverage
237
  expected_days = days
app/main.py CHANGED
@@ -22,7 +22,7 @@ from fastapi.middleware.cors import CORSMiddleware
22
  from sqlalchemy import func
23
 
24
  from app.db import init_db, SessionLocal, get_db_type
25
- from app.models import NewsArticle, PriceBar, DailySentiment, AnalysisSnapshot
26
  from app.settings import get_settings
27
  from app.lock import is_pipeline_locked
28
  # NOTE: Faz 1 - API is snapshot-only, no report generation
@@ -219,6 +219,9 @@ async def get_history(
219
  IMPORTANT: sentiment_index of 0.0 is a valid value (neutral sentiment),
220
  not the same as missing data. We return explicit 0.0 values.
221
  """
 
 
 
222
  with SessionLocal() as session:
223
  # Calculate date range
224
  end_date = datetime.now(timezone.utc)
@@ -239,14 +242,28 @@ async def get_history(
239
  detail=f"No price data found for {symbol}"
240
  )
241
 
242
- # Query sentiment
243
- sentiments = session.query(
244
- DailySentiment.date,
245
- DailySentiment.sentiment_index,
246
- DailySentiment.news_count
247
- ).filter(
248
- DailySentiment.date >= start_date
249
- ).order_by(DailySentiment.date.asc()).all()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
  # Create sentiment lookup (by date string for easy matching)
252
  sentiment_lookup = {}
 
22
  from sqlalchemy import func
23
 
24
  from app.db import init_db, SessionLocal, get_db_type
25
+ from app.models import NewsArticle, PriceBar, DailySentiment, DailySentimentV2, AnalysisSnapshot
26
  from app.settings import get_settings
27
  from app.lock import is_pipeline_locked
28
  # NOTE: Faz 1 - API is snapshot-only, no report generation
 
219
  IMPORTANT: sentiment_index of 0.0 is a valid value (neutral sentiment),
220
  not the same as missing data. We return explicit 0.0 values.
221
  """
222
+ settings = get_settings()
223
+ source = str(getattr(settings, "scoring_source", "news_articles")).strip().lower()
224
+
225
  with SessionLocal() as session:
226
  # Calculate date range
227
  end_date = datetime.now(timezone.utc)
 
242
  detail=f"No price data found for {symbol}"
243
  )
244
 
245
+ # Query sentiment (prefer V2 when scoring source is news_processed)
246
+ sentiments = []
247
+ if source == "news_processed":
248
+ sentiments = session.query(
249
+ DailySentimentV2.date,
250
+ DailySentimentV2.sentiment_index,
251
+ DailySentimentV2.news_count
252
+ ).filter(
253
+ DailySentimentV2.date >= start_date
254
+ ).order_by(DailySentimentV2.date.asc()).all()
255
+
256
+ if not sentiments:
257
+ logger.warning("No rows in daily_sentiments_v2 for history; falling back to daily_sentiments")
258
+
259
+ if not sentiments:
260
+ sentiments = session.query(
261
+ DailySentiment.date,
262
+ DailySentiment.sentiment_index,
263
+ DailySentiment.news_count
264
+ ).filter(
265
+ DailySentiment.date >= start_date
266
+ ).order_by(DailySentiment.date.asc()).all()
267
 
268
  # Create sentiment lookup (by date string for easy matching)
269
  sentiment_lookup = {}
app/models.py CHANGED
@@ -316,6 +316,10 @@ class PipelineRunMetrics(Base):
316
  news_raw_duplicates = Column(Integer, nullable=True)
317
  news_processed_inserted = Column(Integer, nullable=True)
318
  news_processed_duplicates = Column(Integer, nullable=True)
 
 
 
 
319
 
320
  # Snapshot info
321
  snapshot_generated = Column(Boolean, default=False)
@@ -424,6 +428,80 @@ class NewsProcessed(Base):
424
 
425
  # Relationship
426
  raw = relationship("NewsRaw", back_populates="processed_items")
 
427
 
428
  def __repr__(self):
429
  return f"<NewsProcessed(id={self.id}, dedup_key='{self.dedup_key[:16]}...')>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
  news_raw_duplicates = Column(Integer, nullable=True)
317
  news_processed_inserted = Column(Integer, nullable=True)
318
  news_processed_duplicates = Column(Integer, nullable=True)
319
+ articles_scored_v2 = Column(Integer, nullable=True)
320
+ llm_parse_fail_count = Column(Integer, nullable=True)
321
+ escalation_count = Column(Integer, nullable=True)
322
+ fallback_count = Column(Integer, nullable=True)
323
 
324
  # Snapshot info
325
  snapshot_generated = Column(Boolean, default=False)
 
428
 
429
  # Relationship
430
  raw = relationship("NewsRaw", back_populates="processed_items")
431
+ sentiment_v2_items = relationship("NewsSentimentV2", back_populates="processed")
432
 
433
  def __repr__(self):
434
  return f"<NewsProcessed(id={self.id}, dedup_key='{self.dedup_key[:16]}...')>"
435
+
436
+
437
+ class NewsSentimentV2(Base):
438
+ """
439
+ Commodity-aware sentiment scores generated from news_processed records.
440
+ """
441
+
442
+ __tablename__ = "news_sentiments_v2"
443
+
444
+ id = Column(BigInteger, primary_key=True, autoincrement=True)
445
+
446
+ news_processed_id = Column(
447
+ BigInteger,
448
+ ForeignKey("news_processed.id", ondelete="CASCADE"),
449
+ nullable=False,
450
+ index=True,
451
+ )
452
+ horizon_days = Column(Integer, nullable=False, default=5)
453
+
454
+ label = Column(String(20), nullable=False, index=True)
455
+ impact_score_llm = Column(Float, nullable=False)
456
+ confidence_llm = Column(Float, nullable=False)
457
+ confidence_calibrated = Column(Float, nullable=False, index=True)
458
+ relevance_score = Column(Float, nullable=False, index=True)
459
+ event_type = Column(String(50), nullable=False, index=True)
460
+ rule_sign = Column(Integer, nullable=False)
461
+ final_score = Column(Float, nullable=False, index=True)
462
+
463
+ finbert_pos = Column(Float, nullable=False)
464
+ finbert_neu = Column(Float, nullable=False)
465
+ finbert_neg = Column(Float, nullable=False)
466
+
467
+ reasoning_json = Column(Text, nullable=True)
468
+ model_fast = Column(String(100), nullable=True)
469
+ model_reliable = Column(String(100), nullable=True)
470
+ scored_at = Column(DateTime(timezone=True), nullable=False, default=datetime.utcnow, index=True)
471
+
472
+ processed = relationship("NewsProcessed", back_populates="sentiment_v2_items")
473
+
474
+ __table_args__ = (
475
+ UniqueConstraint("news_processed_id", "horizon_days", name="uq_news_sentiments_v2_processed_horizon"),
476
+ Index("ix_news_sentiments_v2_processed_scored", "news_processed_id", "scored_at"),
477
+ )
478
+
479
+ def __repr__(self):
480
+ return (
481
+ "<NewsSentimentV2(processed_id="
482
+ f"{self.news_processed_id}, horizon_days={self.horizon_days}, final_score={self.final_score:.3f})>"
483
+ )
484
+
485
+
486
+ class DailySentimentV2(Base):
487
+ """
488
+ Daily aggregate sentiment generated from NewsSentimentV2.
489
+ """
490
+
491
+ __tablename__ = "daily_sentiments_v2"
492
+
493
+ id = Column(BigInteger, primary_key=True, autoincrement=True)
494
+ date = Column(DateTime(timezone=True), nullable=False, unique=True, index=True)
495
+
496
+ sentiment_index = Column(Float, nullable=False, index=True)
497
+ news_count = Column(Integer, nullable=False, default=0)
498
+ avg_confidence = Column(Float, nullable=True)
499
+ avg_relevance = Column(Float, nullable=True)
500
+ source_version = Column(String(20), nullable=False, default="v2")
501
+ aggregated_at = Column(DateTime(timezone=True), nullable=False, default=datetime.utcnow, index=True)
502
+
503
+ def __repr__(self):
504
+ return (
505
+ "<DailySentimentV2(date="
506
+ f"{self.date}, sentiment_index={self.sentiment_index:.3f}, news_count={self.news_count})>"
507
+ )
app/settings.py CHANGED
@@ -53,6 +53,10 @@ class Settings(BaseSettings):
53
  sentiment_soft_neutral_polarity_threshold: float = 0.12
54
  sentiment_soft_neutral_max_mag: float = 0.25
55
  sentiment_soft_neutral_scale: float = 0.8
 
 
 
 
56
 
57
  # API settings
58
  analysis_ttl_minutes: int = 30
@@ -73,10 +77,12 @@ class Settings(BaseSettings):
73
  # OpenRouter AI Commentary
74
  openrouter_api_key: Optional[str] = None
75
  # Deprecated - kept for backward compatibility
76
- openrouter_model: str = "openai/gpt-oss-120b:free"
77
  # New primary config
78
- openrouter_model_scoring: str = "stepfun/step-3.5-flash:free"
79
- openrouter_model_commentary: str = "stepfun/step-3.5-flash:free"
 
 
80
  openrouter_rpm: int = 18
81
  openrouter_max_retries: int = 3
82
  max_llm_articles_per_run: int = 200
@@ -97,7 +103,7 @@ class Settings(BaseSettings):
97
 
98
  # LLM Sentiment Analysis
99
  # Deprecated - kept for backward compatibility
100
- llm_sentiment_model: str = "openai/gpt-oss-120b:free"
101
 
102
  # Pipeline trigger authentication
103
  pipeline_trigger_secret: Optional[str] = None
@@ -194,11 +200,30 @@ class Settings(BaseSettings):
194
  """Preferred scoring model with backward-compatible fallback chain."""
195
  return (
196
  self._first_non_empty(
 
197
  self.openrouter_model_scoring,
198
  self.llm_sentiment_model,
199
  self.openrouter_model,
200
  )
201
- or "stepfun/step-3.5-flash:free"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  )
203
 
204
  @property
@@ -210,7 +235,7 @@ class Settings(BaseSettings):
210
  self.openrouter_model,
211
  self.llm_sentiment_model,
212
  )
213
- or "stepfun/step-3.5-flash:free"
214
  )
215
 
216
  @property
 
53
  sentiment_soft_neutral_polarity_threshold: float = 0.12
54
  sentiment_soft_neutral_max_mag: float = 0.25
55
  sentiment_soft_neutral_scale: float = 0.8
56
+ sentiment_relevance_min: float = 0.35
57
+ sentiment_escalate_conflict_threshold: float = 0.55
58
+ sentiment_horizon_days: int = 5
59
+ scoring_source: str = "news_processed"
60
 
61
  # API settings
62
  analysis_ttl_minutes: int = 30
 
77
  # OpenRouter AI Commentary
78
  openrouter_api_key: Optional[str] = None
79
  # Deprecated - kept for backward compatibility
80
+ openrouter_model: str = "arcee-ai/trinity-large-preview:free"
81
  # New primary config
82
+ openrouter_model_scoring: str = "arcee-ai/trinity-large-preview:free"
83
+ openrouter_model_scoring_fast: Optional[str] = None
84
+ openrouter_model_scoring_reliable: Optional[str] = None
85
+ openrouter_model_commentary: str = "arcee-ai/trinity-large-preview:free"
86
  openrouter_rpm: int = 18
87
  openrouter_max_retries: int = 3
88
  max_llm_articles_per_run: int = 200
 
103
 
104
  # LLM Sentiment Analysis
105
  # Deprecated - kept for backward compatibility
106
+ llm_sentiment_model: str = "arcee-ai/trinity-large-preview:free"
107
 
108
  # Pipeline trigger authentication
109
  pipeline_trigger_secret: Optional[str] = None
 
200
  """Preferred scoring model with backward-compatible fallback chain."""
201
  return (
202
  self._first_non_empty(
203
+ self.openrouter_model_scoring_fast,
204
  self.openrouter_model_scoring,
205
  self.llm_sentiment_model,
206
  self.openrouter_model,
207
  )
208
+ or "arcee-ai/trinity-large-preview:free"
209
+ )
210
+
211
+ @property
212
+ def resolved_scoring_fast_model(self) -> str:
213
+ """Fast model used for primary sentiment scoring."""
214
+ return self.resolved_scoring_model
215
+
216
+ @property
217
+ def resolved_scoring_reliable_model(self) -> str:
218
+ """Reliable model used for escalation/retry on malformed outputs."""
219
+ return (
220
+ self._first_non_empty(
221
+ self.openrouter_model_scoring_reliable,
222
+ self.openrouter_model,
223
+ self.llm_sentiment_model,
224
+ self.openrouter_model_scoring,
225
+ )
226
+ or "arcee-ai/trinity-large-preview:free"
227
  )
228
 
229
  @property
 
235
  self.openrouter_model,
236
  self.llm_sentiment_model,
237
  )
238
+ or "arcee-ai/trinity-large-preview:free"
239
  )
240
 
241
  @property
app/stepfun_probe.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ StepFun / OpenRouter probe utility for sentiment V2 diagnostics.
3
+
4
+ Usage:
5
+ py -m app.stepfun_probe
6
+ py -m app.stepfun_probe --sample-size 24 --db-limit 24
7
+ py -m app.stepfun_probe --skip-db
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import argparse
13
+ import asyncio
14
+ import json
15
+ import time
16
+ from typing import Any
17
+
18
+ from sqlalchemy.orm import Session
19
+
20
+ from app.ai_engine import (
21
+ LLM_SCORING_PROVIDER_OPTIONS,
22
+ LLM_SCORING_RESPONSE_FORMAT_V2,
23
+ _build_llm_v2_user_prompt,
24
+ score_batch_with_llm_v2,
25
+ )
26
+ from app.db import SessionLocal, init_db
27
+ from app.models import NewsProcessed, NewsRaw
28
+ from app.openrouter_client import OpenRouterError, create_chat_completion
29
+ from app.settings import get_settings
30
+
31
+
32
+ def _build_handcrafted_articles() -> list[dict[str, Any]]:
33
+ return [
34
+ {
35
+ "id": 1,
36
+ "title": "Major copper mine outage in Chile removes 180k tonnes from expected supply",
37
+ "description": "Analysts expect lower exchange inventories and tighter concentrates market.",
38
+ "text": (
39
+ "Major copper mine outage in Chile removes 180k tonnes from expected supply. "
40
+ "Analysts expect lower exchange inventories and tighter concentrates market."
41
+ ),
42
+ },
43
+ {
44
+ "id": 2,
45
+ "title": "China property slowdown deepens and cable demand weakens",
46
+ "description": "Fabricators report softer orders and lower cathode premiums.",
47
+ "text": (
48
+ "China property slowdown deepens and cable demand weakens. "
49
+ "Fabricators report softer orders and lower cathode premiums."
50
+ ),
51
+ },
52
+ {
53
+ "id": 3,
54
+ "title": "Semiconductor patent lawsuit update",
55
+ "description": "No direct discussion of copper demand or supply.",
56
+ "text": "Semiconductor patent lawsuit update with no direct copper market linkage.",
57
+ },
58
+ ]
59
+
60
+
61
+ def _load_db_articles(session: Session, limit: int) -> list[dict[str, Any]]:
62
+ rows = (
63
+ session.query(
64
+ NewsProcessed.id.label("processed_id"),
65
+ NewsRaw.title,
66
+ NewsRaw.description,
67
+ NewsProcessed.cleaned_text,
68
+ NewsRaw.published_at,
69
+ )
70
+ .join(NewsRaw, NewsProcessed.raw_id == NewsRaw.id)
71
+ .order_by(NewsRaw.published_at.desc(), NewsProcessed.id.desc())
72
+ .limit(max(1, int(limit)))
73
+ .all()
74
+ )
75
+
76
+ articles: list[dict[str, Any]] = []
77
+ for row in rows:
78
+ title = str(row.title or "")[:500]
79
+ description = str(row.description or "")[:800]
80
+ text = str(row.cleaned_text or f"{title} {description}")[:1800]
81
+ articles.append(
82
+ {
83
+ "id": int(row.processed_id),
84
+ "title": title,
85
+ "description": description,
86
+ "text": text,
87
+ }
88
+ )
89
+ return articles
90
+
91
+
92
+ async def _run_strict_probe(
93
+ *,
94
+ model: str,
95
+ articles: list[dict[str, Any]],
96
+ ) -> dict[str, Any]:
97
+ settings = get_settings()
98
+ user_prompt = _build_llm_v2_user_prompt(articles, horizon_days=5)
99
+ started = time.perf_counter()
100
+ try:
101
+ data = await create_chat_completion(
102
+ api_key=settings.openrouter_api_key or "",
103
+ model=model,
104
+ messages=[
105
+ {
106
+ "role": "system",
107
+ "content": (
108
+ "You are a Senior Copper Futures Analyst. Return only valid JSON array with keys: "
109
+ "id,label,impact_score,confidence,relevance,event_type,reasoning."
110
+ ),
111
+ },
112
+ {"role": "user", "content": user_prompt},
113
+ ],
114
+ max_tokens=1800,
115
+ temperature=0.0,
116
+ timeout_seconds=60.0,
117
+ max_retries=settings.openrouter_max_retries,
118
+ rpm=settings.openrouter_rpm,
119
+ fallback_models=settings.openrouter_fallback_models_list,
120
+ response_format=LLM_SCORING_RESPONSE_FORMAT_V2,
121
+ provider=LLM_SCORING_PROVIDER_OPTIONS,
122
+ extra_payload={"reasoning": {"exclude": True}},
123
+ )
124
+ elapsed = time.perf_counter() - started
125
+ choice = data.get("choices", [{}])[0]
126
+ content = choice.get("message", {}).get("content")
127
+ return {
128
+ "ok": True,
129
+ "elapsed_sec": round(elapsed, 2),
130
+ "finish_reason": choice.get("finish_reason"),
131
+ "content_len": len(content or ""),
132
+ "preview": str(content)[:240],
133
+ }
134
+ except OpenRouterError as exc:
135
+ elapsed = time.perf_counter() - started
136
+ return {
137
+ "ok": False,
138
+ "elapsed_sec": round(elapsed, 2),
139
+ "error_type": "OpenRouterError",
140
+ "status_code": exc.status_code,
141
+ "message": str(exc)[:280],
142
+ }
143
+ except Exception as exc: # noqa: BLE001
144
+ elapsed = time.perf_counter() - started
145
+ return {
146
+ "ok": False,
147
+ "elapsed_sec": round(elapsed, 2),
148
+ "error_type": type(exc).__name__,
149
+ "message": str(exc)[:280],
150
+ }
151
+
152
+
153
+ async def _run_v2_probe(
154
+ *,
155
+ articles: list[dict[str, Any]],
156
+ horizon_days: int,
157
+ ) -> dict[str, Any]:
158
+ started = time.perf_counter()
159
+ bundle = await score_batch_with_llm_v2(articles, horizon_days=horizon_days)
160
+ elapsed = time.perf_counter() - started
161
+
162
+ sample_results = []
163
+ for item in bundle.get("results", [])[:3]:
164
+ sample_results.append(
165
+ {
166
+ "id": item.get("id"),
167
+ "label": item.get("label"),
168
+ "impact_score": item.get("impact_score"),
169
+ "confidence": item.get("confidence"),
170
+ "relevance": item.get("relevance"),
171
+ "event_type": item.get("event_type"),
172
+ }
173
+ )
174
+
175
+ return {
176
+ "elapsed_sec": round(elapsed, 2),
177
+ "result_count": len(bundle.get("results", [])),
178
+ "fallback_count": int(bundle.get("fallback_count", 0)),
179
+ "parse_fail_count": int(bundle.get("parse_fail_count", 0)),
180
+ "escalation_count": int(bundle.get("escalation_count", 0)),
181
+ "failed_ids": bundle.get("failed_ids", []),
182
+ "model_fast": bundle.get("model_fast"),
183
+ "model_reliable": bundle.get("model_reliable"),
184
+ "sample_results": sample_results,
185
+ }
186
+
187
+
188
+ async def _run_probe(sample_size: int, db_limit: int, skip_db: bool) -> None:
189
+ settings = get_settings()
190
+ fast_model = settings.resolved_scoring_fast_model
191
+ reliable_model = settings.resolved_scoring_reliable_model
192
+
193
+ print("=== StepFun Probe ===")
194
+ print(f"fast_model={fast_model}")
195
+ print(f"reliable_model={reliable_model}")
196
+ print(f"openrouter_rpm={settings.openrouter_rpm} max_retries={settings.openrouter_max_retries}")
197
+
198
+ handcrafted = _build_handcrafted_articles()
199
+
200
+ strict_summary = await _run_strict_probe(model=fast_model, articles=handcrafted)
201
+ print("\n[1] strict_schema_probe")
202
+ print(json.dumps(strict_summary, ensure_ascii=True, indent=2))
203
+
204
+ v2_smoke = await _run_v2_probe(articles=handcrafted, horizon_days=5)
205
+ print("\n[2] v2_smoke_probe_handcrafted")
206
+ print(json.dumps(v2_smoke, ensure_ascii=True, indent=2))
207
+
208
+ if skip_db:
209
+ return
210
+
211
+ with SessionLocal() as session:
212
+ db_articles = _load_db_articles(session, limit=db_limit)
213
+ if not db_articles:
214
+ print("\n[3] v2_db_probe: no articles found in news_processed")
215
+ return
216
+
217
+ db_probe_articles = db_articles[: max(1, min(sample_size, len(db_articles)))]
218
+ v2_db = await _run_v2_probe(articles=db_probe_articles, horizon_days=5)
219
+ print("\n[3] v2_db_probe")
220
+ print(f"sampled_articles={len(db_probe_articles)}")
221
+ print(json.dumps(v2_db, ensure_ascii=True, indent=2))
222
+
223
+
224
+ def main() -> None:
225
+ parser = argparse.ArgumentParser(description="Probe StepFun/OpenRouter behavior for sentiment V2.")
226
+ parser.add_argument("--sample-size", type=int, default=12, help="How many DB articles to score in DB probe.")
227
+ parser.add_argument("--db-limit", type=int, default=24, help="How many latest DB articles to fetch before sampling.")
228
+ parser.add_argument("--skip-db", action="store_true", help="Skip DB probe; run only strict + handcrafted tests.")
229
+ args = parser.parse_args()
230
+
231
+ init_db()
232
+ asyncio.run(_run_probe(sample_size=args.sample_size, db_limit=args.db_limit, skip_db=args.skip_db))
233
+
234
+
235
+ if __name__ == "__main__":
236
+ main()
237
+
migrations/002_sentiment_v2.sql ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -- Migration: 002_sentiment_v2.sql
2
+ -- Commodity-aware sentiment pipeline (V2)
3
+ --
4
+ -- Adds:
5
+ -- 1. news_sentiments_v2 (article-level scores on news_processed)
6
+ -- 2. daily_sentiments_v2 (daily aggregate from V2 scores)
7
+ -- 3. Stage-2 V2 run metrics columns on pipeline_run_metrics
8
+
9
+ BEGIN;
10
+
11
+ CREATE TABLE IF NOT EXISTS news_sentiments_v2 (
12
+ id BIGSERIAL PRIMARY KEY,
13
+ news_processed_id BIGINT NOT NULL REFERENCES news_processed(id) ON DELETE CASCADE,
14
+ horizon_days INTEGER NOT NULL DEFAULT 5,
15
+
16
+ label VARCHAR(20) NOT NULL,
17
+ impact_score_llm FLOAT NOT NULL,
18
+ confidence_llm FLOAT NOT NULL,
19
+ confidence_calibrated FLOAT NOT NULL,
20
+ relevance_score FLOAT NOT NULL,
21
+ event_type VARCHAR(50) NOT NULL,
22
+ rule_sign INTEGER NOT NULL,
23
+ final_score FLOAT NOT NULL,
24
+
25
+ finbert_pos FLOAT NOT NULL,
26
+ finbert_neu FLOAT NOT NULL,
27
+ finbert_neg FLOAT NOT NULL,
28
+
29
+ reasoning_json TEXT,
30
+ model_fast VARCHAR(100),
31
+ model_reliable VARCHAR(100),
32
+ scored_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
33
+
34
+ CONSTRAINT uq_news_sentiments_v2_processed_horizon UNIQUE (news_processed_id, horizon_days)
35
+ );
36
+
37
+ CREATE INDEX IF NOT EXISTS ix_news_sentiments_v2_processed_scored
38
+ ON news_sentiments_v2 (news_processed_id, scored_at);
39
+
40
+ CREATE INDEX IF NOT EXISTS ix_news_sentiments_v2_final_score
41
+ ON news_sentiments_v2 (final_score);
42
+
43
+ CREATE INDEX IF NOT EXISTS ix_news_sentiments_v2_label
44
+ ON news_sentiments_v2 (label);
45
+
46
+ CREATE TABLE IF NOT EXISTS daily_sentiments_v2 (
47
+ id BIGSERIAL PRIMARY KEY,
48
+ date TIMESTAMPTZ NOT NULL UNIQUE,
49
+ sentiment_index FLOAT NOT NULL,
50
+ news_count INTEGER NOT NULL DEFAULT 0,
51
+ avg_confidence FLOAT,
52
+ avg_relevance FLOAT,
53
+ source_version VARCHAR(20) NOT NULL DEFAULT 'v2',
54
+ aggregated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
55
+ );
56
+
57
+ CREATE INDEX IF NOT EXISTS ix_daily_sentiments_v2_date
58
+ ON daily_sentiments_v2 (date);
59
+
60
+ CREATE INDEX IF NOT EXISTS ix_daily_sentiments_v2_index
61
+ ON daily_sentiments_v2 (sentiment_index);
62
+
63
+ ALTER TABLE pipeline_run_metrics
64
+ ADD COLUMN IF NOT EXISTS articles_scored_v2 INTEGER;
65
+
66
+ ALTER TABLE pipeline_run_metrics
67
+ ADD COLUMN IF NOT EXISTS llm_parse_fail_count INTEGER;
68
+
69
+ ALTER TABLE pipeline_run_metrics
70
+ ADD COLUMN IF NOT EXISTS escalation_count INTEGER;
71
+
72
+ ALTER TABLE pipeline_run_metrics
73
+ ADD COLUMN IF NOT EXISTS fallback_count INTEGER;
74
+
75
+ COMMIT;
76
+
worker/tasks.py CHANGED
@@ -386,16 +386,30 @@ async def _execute_pipeline_stages_v2(
386
  session.rollback()
387
 
388
  # -------------------------------------------------------------------------
389
- # Stage 2: Sentiment scoring (existing - uses news_articles for now)
390
  # -------------------------------------------------------------------------
391
  logger.info(f"[run_id={run_id}] Stage 2: Sentiment scoring")
392
  try:
393
- from app.ai_engine import score_unscored_articles
394
-
395
- scored = score_unscored_articles(session)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
  session.commit()
397
-
398
- result["articles_scored"] = scored
399
 
400
  except Exception as e:
401
  logger.error(f"[run_id={run_id}] Stage 2 failed: {e}")
@@ -407,12 +421,14 @@ async def _execute_pipeline_stages_v2(
407
  # -------------------------------------------------------------------------
408
  logger.info(f"[run_id={run_id}] Stage 3: Sentiment aggregation")
409
  try:
410
- from app.ai_engine import aggregate_daily_sentiment
411
-
 
412
  days_aggregated = aggregate_daily_sentiment(session)
413
  session.commit()
414
 
415
  result["days_aggregated"] = days_aggregated
 
416
 
417
  except Exception as e:
418
  logger.error(f"[run_id={run_id}] Stage 3 failed: {e}")
 
386
  session.rollback()
387
 
388
  # -------------------------------------------------------------------------
389
+ # Stage 2: Sentiment scoring (V2 - news_processed based)
390
  # -------------------------------------------------------------------------
391
  logger.info(f"[run_id={run_id}] Stage 2: Sentiment scoring")
392
  try:
393
+ from app.ai_engine import score_unscored_processed_articles
394
+
395
+ scoring_stats = score_unscored_processed_articles(session)
396
+ session.commit()
397
+
398
+ result["articles_scored"] = int(scoring_stats.get("scored_count", 0))
399
+ result["articles_scored_v2"] = int(scoring_stats.get("scored_count", 0))
400
+ result["llm_parse_fail_count"] = int(scoring_stats.get("parse_fail_count", 0))
401
+ result["escalation_count"] = int(scoring_stats.get("escalation_count", 0))
402
+ result["fallback_count"] = int(scoring_stats.get("fallback_count", 0))
403
+
404
+ update_run_metrics(
405
+ session,
406
+ run_id,
407
+ articles_scored_v2=result["articles_scored_v2"],
408
+ llm_parse_fail_count=result["llm_parse_fail_count"],
409
+ escalation_count=result["escalation_count"],
410
+ fallback_count=result["fallback_count"],
411
+ )
412
  session.commit()
 
 
413
 
414
  except Exception as e:
415
  logger.error(f"[run_id={run_id}] Stage 2 failed: {e}")
 
421
  # -------------------------------------------------------------------------
422
  logger.info(f"[run_id={run_id}] Stage 3: Sentiment aggregation")
423
  try:
424
+ from app.ai_engine import aggregate_daily_sentiment, aggregate_daily_sentiment_v2
425
+
426
+ days_aggregated_v2 = aggregate_daily_sentiment_v2(session)
427
  days_aggregated = aggregate_daily_sentiment(session)
428
  session.commit()
429
 
430
  result["days_aggregated"] = days_aggregated
431
+ result["days_aggregated_v2"] = days_aggregated_v2
432
 
433
  except Exception as e:
434
  logger.error(f"[run_id={run_id}] Stage 3 failed: {e}")