a5-628 commited on
Commit
c8e5fa0
·
0 Parent(s):

Initial FraudLens AI MCP demo

Browse files
.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ANTHROPIC_API_KEY=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
2
+ FRAUDLENS_DEBUG=true
README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FraudLens AI - Multimodal Fraud Detection Agent
2
+
3
+ Initial scaffold for the FraudLens AI hackathon project.
4
+
5
+ ## Quick start (dev)
6
+
7
+ 1. Create and activate a virtual environment.
8
+ 2. Install dependencies:
9
+ ```bash
10
+ pip install -r requirements.txt
11
+ ```
12
+ 3. Create a `.env` file with the required API keys (see `fraudlens_app/config.py`).
13
+ 4. Run the main Gradio app:
14
+ ```bash
15
+ python -m fraudlens_app.main_app
16
+ ```
17
+ 5. Run the Vision Forensics MCP server (stub):
18
+ ```bash
19
+ python -m mcp_servers.vision_forensics.server
20
+ ```
21
+
22
+ Further documentation, MCP wiring, and fine-tuning pipeline will be added as we implement features.
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gradio as gr
4
+
5
+ from fraudlens_app.main_app import build_interface as build_main_interface
6
+ from mcp_servers.vision_forensics.server import (
7
+ build_vision_forensics_interface,
8
+ )
9
+ from mcp_servers.fraud_intelligence.server import (
10
+ build_fraud_intel_interface,
11
+ )
12
+
13
+
14
+ with gr.Blocks(title="FraudLens AI - MCP Demo") as app:
15
+ gr.Markdown("# FraudLens AI – Multimodal Fraud Detection Agent")
16
+ gr.Markdown(
17
+ "Unified demo Space exposing the main FraudLens agent and two custom "
18
+ "MCP-style servers (Vision Forensics and Fraud Intelligence)."
19
+ )
20
+
21
+ with gr.Tab("FraudLens Agent"):
22
+ # Embed the main app layout inside this tab
23
+ main_iface = build_main_interface()
24
+
25
+ with gr.Tab("Vision Forensics MCP"):
26
+ vision_iface = build_vision_forensics_interface()
27
+
28
+ with gr.Tab("Fraud Intelligence MCP"):
29
+ intel_iface = build_fraud_intel_interface()
30
+
31
+
32
+ if __name__ == "__main__":
33
+ app.launch()
fraudlens_app/__init__.py ADDED
File without changes
fraudlens_app/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (156 Bytes). View file
 
fraudlens_app/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
fraudlens_app/__pycache__/main_app.cpython-310.pyc ADDED
Binary file (2.94 kB). View file
 
fraudlens_app/__pycache__/reasoning.cpython-310.pyc ADDED
Binary file (4.83 kB). View file
 
fraudlens_app/config.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from dataclasses import dataclass
5
+ from typing import Optional
6
+
7
+ from dotenv import load_dotenv
8
+
9
+
10
+ load_dotenv()
11
+
12
+
13
+ @dataclass
14
+ class APIConfig:
15
+ anthropic_api_key: Optional[str] = os.getenv("ANTHROPIC_API_KEY")
16
+ openai_api_key: Optional[str] = os.getenv("OPENAI_API_KEY")
17
+ gemini_api_key: Optional[str] = os.getenv("GEMINI_API_KEY")
18
+ hf_api_token: Optional[str] = os.getenv("HF_API_TOKEN")
19
+ modal_token_id: Optional[str] = os.getenv("MODAL_TOKEN_ID")
20
+ modal_token_secret: Optional[str] = os.getenv("MODAL_TOKEN_SECRET")
21
+ elevenlabs_api_key: Optional[str] = os.getenv("ELEVENLABS_API_KEY")
22
+
23
+
24
+ @dataclass
25
+ class AppConfig:
26
+ debug: bool = os.getenv("FRAUDLENS_DEBUG", "false").lower() == "true"
27
+ app_title: str = "FraudLens AI - Marketplace Fraud Detection"
28
+ app_description: str = (
29
+ "Multimodal fraud detection for second-hand marketplace listings "
30
+ "using vision-language models and Chain-of-Thought reasoning."
31
+ )
32
+
33
+
34
+ api_config = APIConfig()
35
+ app_config = AppConfig()
fraudlens_app/main_app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict
4
+
5
+ import gradio as gr
6
+
7
+ from .config import app_config, api_config
8
+ from .reasoning import ReasoningEngine
9
+ from mcp_servers.vision_forensics.server import (
10
+ detect_image_manipulation,
11
+ extract_image_metadata,
12
+ reverse_image_search,
13
+ compare_image_quality,
14
+ detect_ai_generated_image,
15
+ )
16
+
17
+
18
+ def build_interface() -> gr.Blocks:
19
+ with gr.Blocks(title=app_config.app_title) as demo:
20
+ gr.Markdown(f"# {app_config.app_title}")
21
+ gr.Markdown(app_config.app_description)
22
+
23
+ with gr.Row():
24
+ with gr.Column(scale=1):
25
+ image_input = gr.Image(label="Listing Images", type="filepath")
26
+ description_input = gr.Textbox(
27
+ label="Listing Description",
28
+ placeholder="Paste the marketplace ad description...",
29
+ lines=5,
30
+ )
31
+ price_input = gr.Textbox(label="Price", placeholder="e.g. 120.00")
32
+ category_input = gr.Textbox(label="Category", placeholder="e.g. Smartphone")
33
+ seller_info_input = gr.Textbox(
34
+ label="Seller Info",
35
+ placeholder="Seller ID, rating, history, etc.",
36
+ lines=3,
37
+ )
38
+ analyze_btn = gr.Button("Analyze Fraud Risk", variant="primary")
39
+
40
+ with gr.Column(scale=1):
41
+ risk_score_output = gr.Number(label="Risk Score (0-100)")
42
+ reasoning_output = gr.JSON(label="Reasoning Chain")
43
+
44
+ status_output = gr.Markdown("Ready.")
45
+
46
+ def _analyze(image_path: str, desc: str, price: str, category: str, seller: str) -> Dict[str, Any]:
47
+ if not api_config.anthropic_api_key:
48
+ return 0, {"error": "ANTHROPIC_API_KEY not configured"}, "Missing Anthropic API key."
49
+
50
+ try:
51
+ vision_signals: Dict[str, Any] = {}
52
+ if image_path:
53
+ # Call local Vision Forensics tools; in a full MCP setup these
54
+ # would be remote tool calls instead of direct imports.
55
+ vision_signals = {
56
+ "manipulation": detect_image_manipulation(image_path),
57
+ "metadata": extract_image_metadata(image_path),
58
+ "reverse_search": reverse_image_search(image_path),
59
+ "ai_generated": detect_ai_generated_image(image_path),
60
+ # category-aware quality placeholder
61
+ "quality": compare_image_quality(image_path, category or ""),
62
+ }
63
+
64
+ payload: Dict[str, Any] = {
65
+ "image_path": image_path,
66
+ "description": desc,
67
+ "price": price,
68
+ "category": category,
69
+ "seller_info": seller,
70
+ "vision_forensics": vision_signals,
71
+ }
72
+
73
+ engine = ReasoningEngine()
74
+ result = engine.analyze_listing(payload)
75
+ # Pass through structured reasoning (steps, confidence, flags, etc.)
76
+ structured = {
77
+ "steps": result.get("steps", {}),
78
+ "risk_score": result.get("risk_score", 0),
79
+ "confidence": result.get("confidence", 0.0),
80
+ "flags": result.get("flags", []),
81
+ }
82
+ return (
83
+ structured["risk_score"],
84
+ structured,
85
+ "Analysis complete.",
86
+ )
87
+ except Exception as exc: # pylint: disable=broad-except
88
+ return 0, {"error": str(exc)}, "Error during analysis. Check logs."
89
+
90
+ analyze_btn.click(
91
+ _analyze,
92
+ inputs=[image_input, description_input, price_input, category_input, seller_info_input],
93
+ outputs=[risk_score_output, reasoning_output, status_output],
94
+ )
95
+
96
+ return demo
97
+
98
+
99
+ if __name__ == "__main__":
100
+ iface = build_interface()
101
+ iface.launch()
fraudlens_app/reasoning.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict
4
+
5
+ import json
6
+
7
+ import anthropic
8
+
9
+ from .config import api_config
10
+
11
+
12
+ class ReasoningEngine:
13
+ """Wrapper around Anthropic Claude for structured Chain-of-Thought reasoning.
14
+
15
+ Enforces a 4-step JSON schema:
16
+
17
+ {
18
+ "steps": {
19
+ "visual_analysis": {...},
20
+ "textual_analysis": {...},
21
+ "cross_validation": {...},
22
+ "risk_scoring": {...}
23
+ },
24
+ "risk_score": int, # 0-100
25
+ "confidence": float, # 0-1
26
+ "flags": [ {"type": str, "severity": str, "message": str} ]
27
+ }
28
+ """
29
+
30
+ def __init__(self) -> None:
31
+ if not api_config.anthropic_api_key:
32
+ raise RuntimeError("ANTHROPIC_API_KEY is not set")
33
+
34
+ # Use official Anthropic client for correct base URL, headers, and
35
+ # versioning.
36
+ self._client = anthropic.Anthropic(api_key=api_config.anthropic_api_key)
37
+
38
+ def _default_response(self, raw_text: str | None, raw: Dict[str, Any]) -> Dict[str, Any]:
39
+ """Fallback response if Claude does not return valid JSON."""
40
+
41
+ reasoning = raw_text or "Model response missing or unparsable; using fallback schema."
42
+ return {
43
+ "steps": {
44
+ "visual_analysis": {
45
+ "summary": "Not available - JSON parse failed.",
46
+ "signals": [],
47
+ },
48
+ "textual_analysis": {
49
+ "summary": "Not available - JSON parse failed.",
50
+ "signals": [],
51
+ },
52
+ "cross_validation": {
53
+ "summary": "Not available - JSON parse failed.",
54
+ "inconsistencies": [],
55
+ },
56
+ "risk_scoring": {
57
+ "summary": reasoning,
58
+ "score": 50,
59
+ "confidence": 0.3,
60
+ "explanation": reasoning,
61
+ },
62
+ },
63
+ "risk_score": 50,
64
+ "confidence": 0.3,
65
+ "flags": [
66
+ {
67
+ "type": "model_output_error",
68
+ "severity": "medium",
69
+ "message": "Claude did not return valid JSON in the expected schema.",
70
+ }
71
+ ],
72
+ "raw_response": raw,
73
+ }
74
+
75
+ def analyze_listing(self, payload: Dict[str, Any]) -> Dict[str, Any]:
76
+ """Call Claude with a strict JSON schema for listing analysis."""
77
+
78
+ schema_instructions = """
79
+ You MUST respond with a single JSON object only, no prose, following this schema:
80
+ {
81
+ "steps": {
82
+ "visual_analysis": {
83
+ "summary": "string - short explanation of visual findings",
84
+ "signals": [
85
+ {"name": "string", "weight": "low|medium|high", "evidence": "string"}
86
+ ]
87
+ },
88
+ "textual_analysis": {
89
+ "summary": "string - short explanation of textual findings",
90
+ "signals": [
91
+ {"name": "string", "weight": "low|medium|high", "evidence": "string"}
92
+ ]
93
+ },
94
+ "cross_validation": {
95
+ "summary": "string - how visuals, text, price, and seller info align or conflict",
96
+ "inconsistencies": [
97
+ {"description": "string", "impact": "low|medium|high"}
98
+ ]
99
+ },
100
+ "risk_scoring": {
101
+ "summary": "string - overall rationale",
102
+ "score": 0-100,
103
+ "confidence": 0.0-1.0,
104
+ "explanation": "string - concise reasoning chain"
105
+ }
106
+ },
107
+ "risk_score": 0-100,
108
+ "confidence": 0.0-1.0,
109
+ "flags": [
110
+ {"type": "string", "severity": "low|medium|high|critical", "message": "string"}
111
+ ]
112
+ }
113
+ """
114
+
115
+ system_prompt = (
116
+ "You are FraudLens AI, an expert fraud detection agent for "
117
+ "second-hand marketplaces. Perform a 4-step analysis: "
118
+ "(1) visual analysis, (2) textual analysis, (3) cross-validation "
119
+ "between visuals/text/price/seller, and (4) risk scoring. "
120
+ "Return ONLY valid JSON, no markdown, following the provided schema."
121
+ )
122
+
123
+ user_content = (
124
+ "Analyze the following marketplace listing and respond strictly "
125
+ "with JSON in the required schema. Listing payload (JSON):\n" f"{payload}\n\n" + schema_instructions
126
+ )
127
+
128
+ try:
129
+ msg = self._client.messages.create(
130
+ # Use a stable, current Claude Sonnet alias
131
+ model="claude-3-7-sonnet-latest",
132
+ max_tokens=900,
133
+ temperature=0.2,
134
+ system=system_prompt,
135
+ messages=[
136
+ {"role": "user", "content": user_content},
137
+ ],
138
+ )
139
+ except Exception as exc: # pragma: no cover - network / API errors
140
+ # Surface the error in a structured way to the UI
141
+ return {
142
+ "steps": {},
143
+ "risk_score": 0,
144
+ "confidence": 0.0,
145
+ "flags": [
146
+ {
147
+ "type": "anthropic_error",
148
+ "severity": "high",
149
+ "message": str(exc),
150
+ }
151
+ ],
152
+ "raw_response": {},
153
+ }
154
+
155
+ # Anthropic SDK returns an object with .content as a list of blocks
156
+ data: Dict[str, Any] = msg.model_dump() # for logging / debugging
157
+
158
+ raw_text: str | None = None
159
+ if msg.content and isinstance(msg.content, list):
160
+ first_block = msg.content[0]
161
+ # For TextBlock, attribute is .text
162
+ raw_text = getattr(first_block, "text", None)
163
+
164
+ if not raw_text:
165
+ return self._default_response(None, data)
166
+
167
+ try:
168
+ parsed = json.loads(raw_text)
169
+ except json.JSONDecodeError:
170
+ return self._default_response(raw_text, data)
171
+
172
+ # Ensure top-level keys exist; fill safe defaults if missing
173
+ parsed.setdefault("steps", {})
174
+ steps = parsed["steps"]
175
+ steps.setdefault("visual_analysis", {"summary": "", "signals": []})
176
+ steps.setdefault("textual_analysis", {"summary": "", "signals": []})
177
+ steps.setdefault("cross_validation", {"summary": "", "inconsistencies": []})
178
+ steps.setdefault(
179
+ "risk_scoring",
180
+ {"summary": "", "score": parsed.get("risk_score", 50), "confidence": parsed.get("confidence", 0.5), "explanation": ""},
181
+ )
182
+
183
+ parsed.setdefault("risk_score", steps["risk_scoring"].get("score", 50))
184
+ parsed.setdefault("confidence", steps["risk_scoring"].get("confidence", 0.5))
185
+ parsed.setdefault("flags", [])
186
+
187
+ parsed["raw_response"] = data
188
+ return parsed
mcp_servers/fraud_intelligence/__init__.py ADDED
File without changes
mcp_servers/fraud_intelligence/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (173 Bytes). View file
 
mcp_servers/fraud_intelligence/__pycache__/server.cpython-310.pyc ADDED
Binary file (3.33 kB). View file
 
mcp_servers/fraud_intelligence/server.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict
4
+
5
+ import gradio as gr
6
+
7
+
8
+ def check_price_anomaly(price: float, category: str, condition: str) -> Dict[str, Any]:
9
+ # TODO: replace with real price benchmark logic
10
+ return {
11
+ "is_anomalous": False,
12
+ "z_score": 0.0,
13
+ "benchmark_range": {
14
+ "min": None,
15
+ "max": None,
16
+ },
17
+ "details": "Stub price anomaly check.",
18
+ }
19
+
20
+
21
+ def analyze_seller_history(seller_id: str) -> Dict[str, Any]:
22
+ # TODO: integrate with real seller history / reputation data
23
+ return {
24
+ "seller_id": seller_id,
25
+ "risk_level": "unknown",
26
+ "metrics": {
27
+ "total_listings": 0,
28
+ "fraud_reports": 0,
29
+ "account_age_days": None,
30
+ },
31
+ "details": "Stub seller history analysis.",
32
+ }
33
+
34
+
35
+ def detect_listing_patterns(seller_id: str, timeframe_days: int = 30) -> Dict[str, Any]:
36
+ # TODO: detect velocity, category concentration, repeated templates, etc.
37
+ return {
38
+ "seller_id": seller_id,
39
+ "timeframe_days": timeframe_days,
40
+ "patterns": [],
41
+ "details": "Stub listing pattern detection.",
42
+ }
43
+
44
+
45
+ def cross_reference_listings(ad_content: str) -> Dict[str, Any]:
46
+ # TODO: cross-reference against known fraud corpus / duplicate listings
47
+ return {
48
+ "matches": [],
49
+ "details": "Stub cross-reference - no external corpus yet.",
50
+ }
51
+
52
+
53
+ def get_fraud_risk_score(ad_data: Dict[str, Any]) -> Dict[str, Any]:
54
+ # TODO: combine multiple intelligence signals into a composite score
55
+ return {
56
+ "risk_score": 0,
57
+ "confidence": 0.0,
58
+ "factors": [],
59
+ "details": "Stub fraud risk scorer.",
60
+ }
61
+
62
+
63
+ def build_fraud_intel_interface() -> gr.Blocks:
64
+ with gr.Blocks(title="Fraud Intelligence MCP Server") as demo:
65
+ gr.Markdown("# Fraud Intelligence MCP Server (Stub)")
66
+ gr.Markdown(
67
+ "Tools for price anomaly detection, seller reputation, and pattern "
68
+ "analysis. This Gradio app will later be wrapped as an MCP server."
69
+ )
70
+
71
+ with gr.Tab("Price Anomaly"):
72
+ price_in = gr.Number(label="Price")
73
+ cat_in = gr.Textbox(label="Category")
74
+ cond_in = gr.Textbox(label="Condition")
75
+ price_btn = gr.Button("Check Price")
76
+ price_out = gr.JSON(label="Price Anomaly Result")
77
+ price_btn.click(
78
+ check_price_anomaly,
79
+ inputs=[price_in, cat_in, cond_in],
80
+ outputs=[price_out],
81
+ )
82
+
83
+ with gr.Tab("Seller History"):
84
+ seller_id_in = gr.Textbox(label="Seller ID")
85
+ seller_btn = gr.Button("Analyze Seller")
86
+ seller_out = gr.JSON(label="Seller History")
87
+ seller_btn.click(analyze_seller_history, inputs=[seller_id_in], outputs=[seller_out])
88
+
89
+ with gr.Tab("Listing Patterns"):
90
+ seller_id_pat = gr.Textbox(label="Seller ID")
91
+ timeframe_in = gr.Number(label="Timeframe (days)", value=30)
92
+ pattern_btn = gr.Button("Detect Patterns")
93
+ pattern_out = gr.JSON(label="Patterns")
94
+ pattern_btn.click(
95
+ detect_listing_patterns,
96
+ inputs=[seller_id_pat, timeframe_in],
97
+ outputs=[pattern_out],
98
+ )
99
+
100
+ with gr.Tab("Cross Reference"):
101
+ ad_content_in = gr.Textbox(label="Ad Content", lines=4)
102
+ cross_btn = gr.Button("Cross Reference Listings")
103
+ cross_out = gr.JSON(label="Cross Reference Result")
104
+ cross_btn.click(cross_reference_listings, inputs=[ad_content_in], outputs=[cross_out])
105
+
106
+ return demo
107
+
108
+
109
+ if __name__ == "__main__":
110
+ app = build_fraud_intel_interface()
111
+ app.launch()
mcp_servers/vision_forensics/__init__.py ADDED
File without changes
mcp_servers/vision_forensics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (171 Bytes). View file
 
mcp_servers/vision_forensics/__pycache__/server.cpython-310.pyc ADDED
Binary file (2.64 kB). View file
 
mcp_servers/vision_forensics/server.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict
4
+
5
+ import gradio as gr
6
+
7
+
8
+ def detect_image_manipulation(image_path: str) -> Dict[str, Any]:
9
+ # TODO: integrate OpenCV / ML-based manipulation detection
10
+ return {
11
+ "manipulated": False,
12
+ "confidence": 0.0,
13
+ "details": "Stub implementation - analysis not yet implemented.",
14
+ }
15
+
16
+
17
+ def extract_image_metadata(image_path: str) -> Dict[str, Any]:
18
+ # TODO: add EXIF parsing and metadata checks
19
+ return {"metadata": {}, "details": "Stub metadata extractor."}
20
+
21
+
22
+ def reverse_image_search(image_path: str) -> Dict[str, Any]:
23
+ # TODO: integrate with a reverse image search API/service
24
+ return {"matches": [], "details": "Stub reverse image search."}
25
+
26
+
27
+ def compare_image_quality(image_path: str, category: str) -> Dict[str, Any]:
28
+ # TODO: implement quality heuristics per category
29
+ return {"quality_score": 0.5, "details": "Stub quality comparison."}
30
+
31
+
32
+ def detect_ai_generated_image(image_path: str) -> Dict[str, Any]:
33
+ # TODO: integrate with AI-generated image detection model
34
+ return {"ai_generated": False, "confidence": 0.0, "details": "Stub AI detection."}
35
+
36
+
37
+ def build_vision_forensics_interface() -> gr.Blocks:
38
+ with gr.Blocks(title="Vision Forensics MCP Server") as demo:
39
+ gr.Markdown("# Vision Forensics MCP Server (Stub)")
40
+ gr.Markdown(
41
+ "Exposes image forensics tools for FraudLens AI. "
42
+ "This Gradio app will later be wrapped as an MCP server."
43
+ )
44
+
45
+ with gr.Tab("Manipulation Detection"):
46
+ img_in = gr.Image(type="filepath")
47
+ btn = gr.Button("Detect Manipulation")
48
+ out = gr.JSON(label="Result")
49
+ btn.click(detect_image_manipulation, inputs=[img_in], outputs=[out])
50
+
51
+ with gr.Tab("Metadata"):
52
+ img_meta_in = gr.Image(type="filepath")
53
+ meta_btn = gr.Button("Extract Metadata")
54
+ meta_out = gr.JSON(label="Metadata")
55
+ meta_btn.click(extract_image_metadata, inputs=[img_meta_in], outputs=[meta_out])
56
+
57
+ with gr.Tab("Reverse Search"):
58
+ img_rev_in = gr.Image(type="filepath")
59
+ rev_btn = gr.Button("Reverse Search")
60
+ rev_out = gr.JSON(label="Matches")
61
+ rev_btn.click(reverse_image_search, inputs=[img_rev_in], outputs=[rev_out])
62
+
63
+ with gr.Tab("AI Detection"):
64
+ img_ai_in = gr.Image(type="filepath")
65
+ ai_btn = gr.Button("Detect AI")
66
+ ai_out = gr.JSON(label="AI Detection Result")
67
+ ai_btn.click(detect_ai_generated_image, inputs=[img_ai_in], outputs=[ai_out])
68
+
69
+ return demo
70
+
71
+
72
+ if __name__ == "__main__":
73
+ app = build_vision_forensics_interface()
74
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ anthropic>=0.34.0
3
+ openai>=1.0.0
4
+ google-generativeai>=0.8.0
5
+ httpx>=0.27.0
6
+ pydantic>=2.7.0
7
+ python-dotenv>=1.0.1
8
+ opencv-python>=4.10.0.84
9
+ Pillow>=10.0.0
10
+ modal>=0.64.0
11
+ huggingface_hub>=0.24.0