navaneethkrishnan commited on
Commit
cc48f68
·
verified ·
1 Parent(s): cdd5fe7

Create nlp/detectors.py

Browse files
Files changed (1) hide show
  1. nlp/detectors.py +110 -0
nlp/detectors.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re, math
2
+ from typing import List, Dict
3
+
4
+ LOWER = lambda s: (s or "").lower()
5
+
6
+ HEDGING = {
7
+ "may", "might", "could", "possibly", "likely", "uncertain", "approx", "around"
8
+ }
9
+ ABSOLUTES = {
10
+ "guaranteed", "risk-free", "always", "never", "certain", "100%", "will absolutely"
11
+ }
12
+ RISK_TERMS = {
13
+ "volatility", "drawdown", "inflation", "credit risk", "liquidity", "default", "market risk"
14
+ }
15
+ DIVERSIFY_TERMS = {"diversify", "diversification", "spread across", "asset allocation"}
16
+ PRODUCT_PUSH = {"our fund", "proprietary", "exclusive product", "sign up", "click here"}
17
+ FIDUCIARY_TERMS = {"in your best interest", "suitable given", "fiduciary"}
18
+ PERSONAL_TERMS = {"your income", "your goals", "your family", "your age", "your debts", "time horizon"}
19
+ DISCLOSURE_LEADS = {"disclosure", "warning", "important", "note:"}
20
+
21
+ FINANCE_JARGON = {
22
+ "alpha", "beta", "sharpe", "sortino", "duration", "convexity", "basis points",
23
+ "drawdown", "liquidity", "hedge", "derivative", "yield curve", "contango", "backwardation",
24
+ "terminal value", "cap rate", "wacc", "ebitda", "cagr", "irr", "annuity", "treasury spread"
25
+ }
26
+
27
+ def count_terms(text: str, vocab: set) -> int:
28
+ t = LOWER(text)
29
+ n = 0
30
+ for term in vocab:
31
+ n += t.count(term)
32
+ return n
33
+
34
+
35
+ def disclaimer_present(text: str) -> int:
36
+ t = LOWER(text)
37
+ if "disclaimer" in t or "this is not financial advice" in t:
38
+ return 1
39
+ return 0
40
+
41
+
42
+ def disclaimer_position(text: str) -> str:
43
+ t = LOWER(text)
44
+ if t.startswith("disclaimer") or t.startswith("warning"):
45
+ return "start"
46
+ if "disclaimer" in t or "warning" in t:
47
+ return "middle"
48
+ return "end"
49
+
50
+
51
+ def risk_confidence_ratio(text: str) -> float:
52
+ h = count_terms(text, HEDGING)
53
+ a = count_terms(text, ABSOLUTES)
54
+ if (h + a) == 0:
55
+ return 0.5
56
+ return h / float(h + a)
57
+
58
+
59
+ def overconfidence_hits(text: str) -> int:
60
+ return count_terms(text, ABSOLUTES)
61
+
62
+
63
+ def conflict_terms_present(text: str) -> int:
64
+ return 1 if count_terms(text, PRODUCT_PUSH) > 0 else 0
65
+
66
+
67
+ def risk_disclosure_present(text: str) -> int:
68
+ return 1 if count_terms(text, RISK_TERMS) > 0 else 0
69
+
70
+
71
+ def diversification_present(text: str) -> int:
72
+ return 1 if count_terms(text, DIVERSIFY_TERMS) > 0 else 0
73
+
74
+
75
+ def personalization_density(text: str) -> float:
76
+ words = max(1, len(text.split()))
77
+ hits = count_terms(text, PERSONAL_TERMS)
78
+ return min(1.0, hits / max(1.0, words / 200.0)) # crude density per ~200 words
79
+
80
+
81
+ def fiduciary_flag(text: str) -> int:
82
+ return 1 if count_terms(text, FIDUCIARY_TERMS) > 0 else 0
83
+
84
+
85
+ def product_push_penalty(text: str) -> int:
86
+ return 1 if count_terms(text, PRODUCT_PUSH) > 0 else 0
87
+
88
+
89
+ def jargon_density(text: str) -> float:
90
+ words = max(1, len(text.split()))
91
+ hits = count_terms(text, FINANCE_JARGON)
92
+ return min(1.0, hits / max(1.0, words))
93
+
94
+ # Readability proxy (FKGL). If textstat is available, use it; else crude proxy.
95
+ try:
96
+ import textstat
97
+ def risk_readability_index(text: str) -> float:
98
+ try:
99
+ score = textstat.flesch_kincaid_grade(text)
100
+ return max(0.0, min(20.0, float(score)))
101
+ except Exception:
102
+ return 12.0
103
+ except Exception:
104
+ def risk_readability_index(text: str) -> float:
105
+ # very rough: sentence length proxy
106
+ sents = max(1, text.count('.') + text.count('!') + text.count('?'))
107
+ words = max(1, len(text.split()))
108
+ avg = words / sents
109
+ # map avg 10->12 grade roughly
110
+ return max(0.0, min(20.0, 0.8 * avg))