AnalyzrAI / apps /copilot /views.py
thejagstudio's picture
Upload 92 files
0310410 verified
import requests as http_requests
import json
from django.http import HttpResponse, StreamingHttpResponse
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.decorators import parser_classes
from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.response import Response
from .analysis_service import run_agentic_analysis
from .financial_models import ALL_MODELS, run_model as run_financial_model
from .risk_management_service import run_portfolio_risk_analysis
from .agent_tools_service import run_agent_tool_chat
from .claude_agent_service import run_agent_chat
from .services import run_chat_completion, run_chat_completion_stream
from .excel_export_service import generate_xlsx
from .report_export_service import generate_docx
from .slides_export_service import generate_pptx
@api_view(["POST"])
def chat_completion(request):
user_message = str(request.data.get("message", "")).strip()
if not user_message:
return Response(
{"detail": "Request body must include a non-empty 'message' field."},
status=status.HTTP_400_BAD_REQUEST,
)
try:
result = run_chat_completion(user_message)
except ValueError as exc:
return Response({"detail": str(exc)}, status=status.HTTP_400_BAD_REQUEST)
except Exception as exc:
return Response(
{"detail": "Cerebras chat completion failed.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
return Response(
{
"status": "ok",
"model": result["model"],
"content": result["content"],
}
)
@api_view(["POST"])
def chat_completion_stream(request):
user_message = str(request.data.get("message", "")).strip()
if not user_message:
return Response(
{"detail": "Request body must include a non-empty 'message' field."},
status=status.HTTP_400_BAD_REQUEST,
)
try:
stream = run_chat_completion_stream(user_message)
except ValueError as exc:
return Response({"detail": str(exc)}, status=status.HTTP_400_BAD_REQUEST)
except Exception as exc:
return Response(
{"detail": "Cerebras stream initialization failed.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
response = StreamingHttpResponse(stream, content_type="text/plain; charset=utf-8")
response["Cache-Control"] = "no-cache"
response["X-Accel-Buffering"] = "no"
return response
@api_view(["POST"])
@parser_classes([MultiPartParser, FormParser])
def analyze_document(request):
uploaded_file = request.FILES.get("file")
if uploaded_file is None:
return Response(
{"detail": "Request must include a PDF file in the 'file' form field."},
status=status.HTTP_400_BAD_REQUEST,
)
if uploaded_file.content_type and uploaded_file.content_type != "application/pdf":
return Response(
{"detail": "Only PDF uploads are supported for analysis."},
status=status.HTTP_400_BAD_REQUEST,
)
headline = str(request.data.get("headline", "")).strip()
try:
payload = run_agentic_analysis(uploaded_file.read(), headline)
except Exception as exc:
return Response(
{"detail": "Agentic analysis failed.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
return Response(
{
"status": "ok",
"analysis": payload,
}
)
def _extract_json_payload(raw_text: str):
raw_text = (raw_text or "").strip()
if not raw_text:
return None
if raw_text.startswith("```"):
raw_text = raw_text.strip("`")
if "\n" in raw_text:
raw_text = raw_text.split("\n", 1)[1]
raw_text = raw_text.rsplit("```", 1)[0].strip()
try:
return json.loads(raw_text)
except json.JSONDecodeError:
pass
start = raw_text.find("{")
end = raw_text.rfind("}")
if start >= 0 and end > start:
candidate = raw_text[start : end + 1]
try:
return json.loads(candidate)
except json.JSONDecodeError:
return None
return None
@api_view(["POST"])
def fix_analysis_json(request):
component_name = str(request.data.get("componentName", "")).strip()
component_data = request.data.get("componentData")
full_analysis = request.data.get("fullAnalysis", {})
attempt = int(request.data.get("attempt", 1))
if not component_name:
return Response(
{"detail": "Request body must include 'componentName'."},
status=status.HTTP_400_BAD_REQUEST,
)
# Fast local path: if the malformed value is just a JSON string, parse it directly.
if isinstance(component_data, str):
local_candidate = _extract_json_payload(component_data)
if local_candidate is not None:
return Response(
{
"status": "ok",
"componentName": component_name,
"componentData": local_candidate,
"resolved": True,
"source": "local-parser",
}
)
prompt = (
"You are a strict JSON repair engine.\n"
"TASK: Repair the target component so it becomes valid JSON for dashboard rendering.\n\n"
"OUTPUT RULES (MANDATORY):\n"
"1) Return exactly one JSON object and nothing else.\n"
"2) No markdown, no code fences, no explanation.\n"
"3) Use this exact envelope: {\"componentData\": <object-or-array-or-null>}.\n"
"4) If you cannot repair safely, return {\"componentData\": null}.\n"
"5) Preserve semantic meaning and field names whenever possible.\n"
"6) Ensure result parses with strict JSON parser.\n\n"
f"Target component: {component_name}\n"
f"Attempt number: {attempt}\n"
f"Malformed component input:\n{json.dumps(component_data, ensure_ascii=True)}\n\n"
f"Full analysis context:\n{json.dumps(full_analysis, ensure_ascii=True)[:18000]}"
)
try:
llm_result = run_chat_completion(prompt)
parsed = _extract_json_payload(llm_result.get("content", ""))
except Exception as exc:
return Response(
{"detail": "JSON fixture failed.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
fixed = parsed.get("componentData") if isinstance(parsed, dict) else None
return Response(
{
"status": "ok",
"componentName": component_name,
"componentData": fixed,
"resolved": fixed is not None,
"source": "llm-fixture",
}
)
@api_view(["POST"])
def agent_tool_chat(request, tool_slug: str):
message = str(request.data.get("message", "")).strip()
if not message:
return Response(
{"detail": "Request body must include a non-empty 'message' field."},
status=status.HTTP_400_BAD_REQUEST,
)
history = request.data.get("history", [])
artifact = request.data.get("artifact", {})
data_context = request.data.get("data_context", None)
try:
payload = run_agent_tool_chat(
tool_slug=tool_slug,
message=message,
history=history,
current_artifact=artifact,
data_context=data_context,
)
except ValueError as exc:
return Response({"detail": str(exc)}, status=status.HTTP_400_BAD_REQUEST)
except Exception as exc:
return Response(
{"detail": "Agent tool request failed.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
return Response({"status": "ok", **payload})
@api_view(["POST"])
def agent_chat(request):
"""Conversational agent with real-time tool calling (Claude Agent SDK)."""
message = str(request.data.get("message", "")).strip()
if not message:
return Response(
{"detail": "Request body must include a non-empty 'message' field."},
status=status.HTTP_400_BAD_REQUEST,
)
history = request.data.get("history", [])
data_context = request.data.get("data_context", None)
try:
payload = run_agent_chat(
message=message,
history=history if isinstance(history, list) else [],
data_context=data_context if isinstance(data_context, dict) else None,
)
except ValueError as exc:
return Response({"detail": str(exc)}, status=status.HTTP_400_BAD_REQUEST)
except Exception as exc:
return Response(
{"detail": "Agent chat failed.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
return Response({"status": "ok", **payload})
@api_view(["POST"])
def export_slides_pptx(request):
"""Accept a slides artifact JSON and return a downloadable .pptx file."""
artifact = request.data.get("artifact")
if not isinstance(artifact, dict) or not artifact.get("slides"):
return Response(
{"detail": "Request body must include an 'artifact' object with a 'slides' array."},
status=status.HTTP_400_BAD_REQUEST,
)
try:
pptx_bytes = generate_pptx(artifact)
except Exception as exc:
return Response(
{"detail": "PPTX generation failed.", "error": str(exc)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
title_slug = (artifact.get("title") or "deck").replace(" ", "_")[:40]
filename = f"{title_slug}.pptx"
response = HttpResponse(
pptx_bytes,
content_type="application/vnd.openxmlformats-officedocument.presentationml.presentation",
)
response["Content-Disposition"] = f'attachment; filename="{filename}"'
return response
@api_view(["POST"])
def export_report_docx(request):
"""Accept a report artifact JSON and return a downloadable .docx file."""
artifact = request.data.get("artifact")
if not isinstance(artifact, dict) or not artifact.get("sections"):
return Response(
{"detail": "Request body must include an 'artifact' object with a 'sections' array."},
status=status.HTTP_400_BAD_REQUEST,
)
try:
docx_bytes = generate_docx(artifact)
except Exception as exc:
return Response(
{"detail": "DOCX generation failed.", "error": str(exc)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
title_slug = (artifact.get("title") or "report").replace(" ", "_")[:40]
filename = f"{title_slug}.docx"
response = HttpResponse(
docx_bytes,
content_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
)
response["Content-Disposition"] = f'attachment; filename="{filename}"'
return response
@api_view(["POST"])
def export_excel_xlsx(request):
"""Accept an Excel artifact JSON and return a downloadable .xlsx file."""
artifact = request.data.get("artifact")
if not isinstance(artifact, dict) or not artifact.get("sheets"):
return Response(
{"detail": "Request body must include an 'artifact' object with a 'sheets' array."},
status=status.HTTP_400_BAD_REQUEST,
)
try:
xlsx_bytes = generate_xlsx(artifact)
except Exception as exc:
return Response(
{"detail": "XLSX generation failed.", "error": str(exc)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
title_slug = (artifact.get("title") or "model").replace(" ", "_")[:40]
filename = f"{title_slug}.xlsx"
response = HttpResponse(
xlsx_bytes,
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
)
response["Content-Disposition"] = f'attachment; filename="{filename}"'
return response
@api_view(["POST"])
def multi_agent_run(request):
"""Run the multi-agent financial analysis pipeline with SSE streaming."""
user_prompt = str(request.data.get("prompt", "")).strip()
if not user_prompt:
return Response(
{"detail": "Request body must include a non-empty 'prompt' field."},
status=status.HTTP_400_BAD_REQUEST,
)
data_context = request.data.get("data_context", None)
try:
from .multi_agent_service import run_multi_agent_pipeline
stream = run_multi_agent_pipeline(user_prompt, data_context)
except Exception as exc:
return Response(
{"detail": "Multi-agent pipeline failed to start.", "error": str(exc)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
response = StreamingHttpResponse(stream, content_type="text/event-stream")
response["Cache-Control"] = "no-cache"
response["X-Accel-Buffering"] = "no"
return response
_TV_NEWS_URL = "https://news-mediator.tradingview.com/public/news-flow/v2/news"
_TV_HEADERS = {
"accept": "*/*",
"origin": "https://in.tradingview.com",
"referer": "https://in.tradingview.com/",
"user-agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/145.0.0.0 Safari/537.36"
),
}
_COMMON_EXCHANGES = ["NYSE", "NASDAQ", "AMEX"]
@api_view(["GET"])
def stock_news(request):
"""Proxy TradingView news for a given symbol or ticker."""
symbol = request.query_params.get("symbol", "").strip()
if not symbol:
return Response(
{"detail": "Query parameter 'symbol' is required (e.g. NYSE:DELL or DELL)."},
status=status.HTTP_400_BAD_REQUEST,
)
if ":" in symbol:
symbol_filters = [f"symbol:{symbol}"]
else:
symbol_filters = [f"symbol:{ex}:{symbol}" for ex in _COMMON_EXCHANGES]
all_items = []
seen_ids = set()
for sym_filter in symbol_filters:
params = {
"filter": ["lang:en", sym_filter],
"client": "landing",
"streaming": "false",
"user_prostatus": "non_pro",
}
try:
resp = http_requests.get(
_TV_NEWS_URL, params=params, headers=_TV_HEADERS, timeout=10,
)
resp.raise_for_status()
items = resp.json().get("items", [])
for item in items:
if item["id"] not in seen_ids:
seen_ids.add(item["id"])
all_items.append(item)
except http_requests.RequestException:
continue
all_items.sort(key=lambda x: x.get("published", 0), reverse=True)
return Response({"status": "ok", "items": all_items})
@api_view(["GET"])
def breaking_news(request):
"""Proxy TradingView top-story / market-index breaking news."""
params = {
'filter': [
'lang:en_IN',
'priority:important',
],
'client': 'screener',
'streaming': 'true',
'user_prostatus': 'non_pro',
}
try:
resp = http_requests.get(
_TV_NEWS_URL, params=params, headers=_TV_HEADERS, timeout=10,
)
resp.raise_for_status()
items = resp.json().get("items", [])
except http_requests.RequestException as exc:
return Response(
{"detail": "Failed to fetch breaking news.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
return Response({"status": "ok", "items": items})
_TV_SCREENER_URL = "https://screener-facade.tradingview.com/screener-facade/api/v1/screener-table/scan"
_TV_SCREENER_HEADERS = {
"accept": "application/json",
"content-type": "text/plain;charset=UTF-8",
"origin": "https://in.tradingview.com",
"referer": "https://in.tradingview.com/",
"user-agent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/145.0.0.0 Safari/537.36"
),
}
@api_view(["POST"])
def portfolio_risk_analysis(request):
"""Run portfolio risk analysis with real computations (correlation, VaR, optimal weights, stress tests)."""
portfolio = request.data.get("portfolio", [])
weights = request.data.get("weights", {})
portfolio_value = float(request.data.get("portfolioValue", 100_000))
if not portfolio or not isinstance(portfolio, list):
return Response(
{"detail": "Request body must include a non-empty 'portfolio' array of ticker symbols."},
status=status.HTTP_400_BAD_REQUEST,
)
portfolio = [str(t).strip().upper() for t in portfolio if t]
if not portfolio:
return Response(
{"detail": "Portfolio must contain at least one valid ticker."},
status=status.HTTP_400_BAD_REQUEST,
)
if not isinstance(weights, dict):
weights = {}
try:
result = run_portfolio_risk_analysis(
portfolio=portfolio,
weights=weights,
portfolio_value=portfolio_value,
)
except Exception as exc:
return Response(
{"detail": "Risk analysis failed.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
if "error" in result:
return Response(
{"detail": result["error"]},
status=status.HTTP_400_BAD_REQUEST,
)
return Response({"status": "ok", **result})
@api_view(["GET"])
def market_benchmarks(request):
"""Fetch US index technicals from TradingView screener."""
params = {
"table_id": "indices_quotes.us",
"version": "52",
"columnset_id": "technicals",
}
body = '{"lang":"en","range":[0,5],"scanner_product_label":"markets-screener"}'
try:
resp = http_requests.post(
_TV_SCREENER_URL,
params=params,
headers=_TV_SCREENER_HEADERS,
data=body,
timeout=10,
)
resp.raise_for_status()
raw = resp.json()
except http_requests.RequestException as exc:
return Response(
{"detail": "Failed to fetch market benchmarks.", "error": str(exc)},
status=status.HTTP_502_BAD_GATEWAY,
)
symbols = raw.get("symbols", [])
columns = raw.get("data", [])
count = len(symbols)
col_map = {}
for col in columns:
cid = col["id"]
if cid in col_map:
col_map[cid + "_2"] = col
else:
col_map[cid] = col
benchmarks = []
for i in range(count):
ticker_col = col_map.get("TickerUniversal", {})
raw_vals = ticker_col.get("rawValues", [])
info = raw_vals[i] if i < len(raw_vals) else {}
tech_rating = col_map.get("TechnicalRating", {}).get("rawValues", [])
ma_rating = col_map.get("RatingMa", {}).get("rawValues", [])
osc_rating = col_map.get("RatingOscillators", {}).get("rawValues", [])
rsi_vals = col_map.get("RelativeStrengthIndex", {}).get("rawValues", [])
benchmarks.append({
"symbol": symbols[i] if i < len(symbols) else "",
"name": info.get("description", ""),
"exchange": info.get("exchange", ""),
"ticker": info.get("name", ""),
"logoid": info.get("logoid", ""),
"technicalRating": tech_rating[i] if i < len(tech_rating) else None,
"maRating": ma_rating[i] if i < len(ma_rating) else None,
"oscillatorsRating": osc_rating[i] if i < len(osc_rating) else None,
"rsi": round(rsi_vals[i], 2) if i < len(rsi_vals) and isinstance(rsi_vals[i], (int, float)) else None,
})
return Response({"status": "ok", "benchmarks": benchmarks})
@api_view(["GET"])
def quant_models_list(_request):
"""List available quantitative/AutoML models."""
models = [
{"id": k, "name": k.replace("_", " ").title(), "description": _MODEL_DESCRIPTIONS.get(k, "")}
for k in ALL_MODELS.keys()
]
return Response({"models": models})
@api_view(["POST"])
def quant_model_run(request):
"""Run a quantitative model on a ticker. Returns metrics, chart_data, signals, interpretation."""
ticker = str(request.data.get("ticker", "")).strip().upper()
if not ticker:
return Response(
{"detail": "Request body must include a non-empty 'ticker' field."},
status=status.HTTP_400_BAD_REQUEST,
)
model_name = str(request.data.get("model", "")).strip().lower().replace(" ", "_")
if not model_name or model_name not in ALL_MODELS:
return Response(
{"detail": f"Unknown model. Available: {', '.join(ALL_MODELS.keys())}"},
status=status.HTTP_400_BAD_REQUEST,
)
period = str(request.data.get("period", "1y")).strip() or "1y"
horizon = str(request.data.get("horizon", "1-Week")).strip()
scenario = str(request.data.get("scenario", "Base Case")).strip()
scenario_adjust = _scenario_adjustments(scenario)
kwargs = {}
if model_name == "monte_carlo":
kwargs["period"] = period
days_map = {"1-Day": 1, "1-Week": 7, "1-Month": 30}
kwargs["days"] = days_map.get(horizon, 30)
kwargs["drift_adjustment"] = scenario_adjust["growth"] / 252
elif model_name == "dcf":
base_growth = float(request.data.get("growth_rate", 8)) / 100
base_discount = float(request.data.get("discount_rate", 10)) / 100
base_terminal = float(request.data.get("terminal_growth", 3)) / 100
kwargs["growth_rate"] = base_growth + scenario_adjust["growth"]
kwargs["discount_rate"] = base_discount + scenario_adjust["discount"]
kwargs["terminal_growth"] = base_terminal + scenario_adjust["terminal_growth"]
elif model_name == "correlation":
extra_tickers = request.data.get("tickers")
if isinstance(extra_tickers, str):
extra_tickers = [t.strip().upper() for t in extra_tickers.split(",") if t.strip()]
elif not isinstance(extra_tickers, list):
extra_tickers = []
second_ticker = "QQQ" if ticker.upper() == "SPY" else "SPY"
tickers_list = list(dict.fromkeys([ticker] + [t for t in extra_tickers if t and str(t).strip().upper() != ticker]))[:8]
if len(tickers_list) < 2:
tickers_list = list(dict.fromkeys([ticker, second_ticker]))
kwargs["tickers"] = tickers_list
kwargs["period"] = period
elif model_name == "regression":
kwargs["period"] = period
kwargs["forecast_days"] = {"1-Day": 1, "1-Week": 7, "1-Month": 30}.get(horizon, 30)
else:
kwargs["period"] = period
try:
result = run_financial_model(model_name, ticker, **kwargs)
except Exception as exc:
return Response(
{"detail": str(exc), "model": model_name, "ticker": ticker},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
if "error" in result:
return Response(
{"detail": result["error"], "model": model_name, "ticker": ticker},
status=status.HTTP_422_UNPROCESSABLE_ENTITY,
)
return Response(result)
@api_view(["POST"])
def quant_models_run_all(request):
"""Run all quantitative models on a ticker and generate an LLM executive summary."""
ticker = str(request.data.get("ticker", "")).strip().upper()
if not ticker:
return Response(
{"detail": "Request body must include a non-empty 'ticker' field."},
status=status.HTTP_400_BAD_REQUEST,
)
period = str(request.data.get("period", "1y")).strip() or "1y"
horizon = str(request.data.get("horizon", "1-Week")).strip()
scenario = str(request.data.get("scenario", "Base Case")).strip()
scenario_adjust = _scenario_adjustments(scenario)
growth = float(request.data.get("growth_rate", 8)) / 100
discount = float(request.data.get("discount_rate", 10)) / 100
terminal_growth = float(request.data.get("terminal_growth", 3)) / 100
days_map = {"1-Day": 1, "1-Week": 7, "1-Month": 30}
days = days_map.get(horizon, 30)
forecast_days = days
results = {}
interpretations = []
signals_list = []
for model_name in ALL_MODELS:
kwargs = {"period": period}
if model_name == "monte_carlo":
kwargs["days"] = days
kwargs["drift_adjustment"] = scenario_adjust["growth"] / 252
elif model_name == "dcf":
kwargs["growth_rate"] = growth + scenario_adjust["growth"]
kwargs["discount_rate"] = discount + scenario_adjust["discount"]
kwargs["terminal_growth"] = terminal_growth + scenario_adjust["terminal_growth"]
elif model_name == "correlation":
second_ticker = "QQQ" if ticker == "SPY" else "SPY"
kwargs["tickers"] = [ticker, second_ticker]
elif model_name == "regression":
kwargs["forecast_days"] = forecast_days
try:
result = run_financial_model(model_name, ticker, **kwargs)
if "error" not in result:
results[model_name] = result
if result.get("interpretation"):
interpretations.append(f"[{model_name}]: {result['interpretation']}")
for sig in result.get("signals", []):
signals_list.append(f"[{model_name}] {sig}")
except Exception:
results[model_name] = {"error": "Model failed", "model": model_name}
summary_prompt = (
f"You are a concise financial analyst. Write a 3-5 sentence executive summary for {ticker}.\n\n"
f"Scenario: {scenario}\n"
f"Models run: {', '.join(results.keys())}\n\n"
f"Key signals:\n" + "\n".join(signals_list[:15]) + "\n\n"
f"Model interpretations:\n" + "\n".join(interpretations[:10]) + "\n\n"
"Synthesize the findings into a clear, actionable executive summary for stakeholders."
)
try:
llm_result = run_chat_completion(summary_prompt)
executive_summary = (llm_result.get("content") or "").strip()
except Exception:
executive_summary = (
f"Analysis completed for {ticker}. "
f"Ran {len([r for r in results.values() if 'error' not in r])} models. "
"Review individual model outputs for details."
)
return Response({
"ticker": ticker,
"results": results,
"executive_summary": executive_summary,
"models_run": list(results.keys()),
})
def _scenario_adjustments(scenario: str) -> dict:
"""Return growth/discount/terminal_growth adjustments based on scenario."""
s = (scenario or "").lower()
if "bull" in s or "soft landing" in s:
return {"growth": 0.005, "discount": -0.005, "terminal_growth": 0.005}
if "bear" in s or "recession" in s:
return {"growth": -0.005, "discount": 0.005, "terminal_growth": -0.005}
return {"growth": 0.0, "discount": 0.0, "terminal_growth": 0.0}
_MODEL_DESCRIPTIONS = {
"moving_averages": "SMA/EMA crossovers and trend signals",
"rsi": "Relative Strength Index overbought/oversold",
"macd": "MACD momentum and crossover signals",
"bollinger_bands": "Volatility bands and squeeze detection",
"monte_carlo": "Probabilistic price forecast",
"dcf": "Discounted cash flow valuation",
"beta_capm": "Beta vs benchmark and CAPM expected return",
"risk_metrics": "Sharpe, Sortino, drawdown, VaR",
"correlation": "Pairwise correlation matrix (multi-ticker)",
"regression": "Linear trend and forecast",
}