github-actions[bot] commited on
Commit
abf702c
·
0 Parent(s):

Deploy to Hugging Face Space

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .github/workflows/hf_deploy.yml +83 -0
  3. .gitignore +140 -0
  4. Dockerfile +47 -0
  5. LICENSE +21 -0
  6. README.md +92 -0
  7. backend/README.md +48 -0
  8. backend/app/__init__.py +0 -0
  9. backend/app/__pycache__/__init__.cpython-310.pyc +0 -0
  10. backend/app/__pycache__/__init__.cpython-312.pyc +0 -0
  11. backend/app/__pycache__/main.cpython-310.pyc +0 -0
  12. backend/app/__pycache__/main.cpython-312.pyc +0 -0
  13. backend/app/agentic_analyst/__init__.py +4 -0
  14. backend/app/agentic_analyst/__pycache__/__init__.cpython-312.pyc +0 -0
  15. backend/app/agentic_analyst/__pycache__/team.cpython-312.pyc +0 -0
  16. backend/app/agentic_analyst/agents/__init__.py +1 -0
  17. backend/app/agentic_analyst/agents/__pycache__/__init__.cpython-312.pyc +0 -0
  18. backend/app/agentic_analyst/agents/__pycache__/market_analyst.cpython-312.pyc +0 -0
  19. backend/app/agentic_analyst/agents/__pycache__/risk_manager.cpython-312.pyc +0 -0
  20. backend/app/agentic_analyst/agents/__pycache__/sentiment_analyst.cpython-312.pyc +0 -0
  21. backend/app/agentic_analyst/agents/__pycache__/strategy_advisor.cpython-312.pyc +0 -0
  22. backend/app/agentic_analyst/agents/market_analyst.py +36 -0
  23. backend/app/agentic_analyst/agents/risk_manager.py +28 -0
  24. backend/app/agentic_analyst/agents/sentiment_analyst.py +29 -0
  25. backend/app/agentic_analyst/agents/strategy_advisor.py +41 -0
  26. backend/app/agentic_analyst/team.py +42 -0
  27. backend/app/agentic_analyst/tools/__init__.py +1 -0
  28. backend/app/agentic_analyst/tools/__pycache__/__init__.cpython-312.pyc +0 -0
  29. backend/app/agentic_analyst/tools/__pycache__/market_data.cpython-312.pyc +0 -0
  30. backend/app/agentic_analyst/tools/__pycache__/news_data.cpython-312.pyc +0 -0
  31. backend/app/agentic_analyst/tools/market_data.py +228 -0
  32. backend/app/agentic_analyst/tools/news_data.py +154 -0
  33. backend/app/api/__init__.py +0 -0
  34. backend/app/api/__pycache__/__init__.cpython-310.pyc +0 -0
  35. backend/app/api/__pycache__/__init__.cpython-312.pyc +0 -0
  36. backend/app/api/__pycache__/ai_builder.cpython-310.pyc +0 -0
  37. backend/app/api/__pycache__/ai_builder.cpython-312.pyc +0 -0
  38. backend/app/api/__pycache__/auth.cpython-310.pyc +0 -0
  39. backend/app/api/__pycache__/auth.cpython-312.pyc +0 -0
  40. backend/app/api/__pycache__/market.cpython-310.pyc +0 -0
  41. backend/app/api/__pycache__/market.cpython-312.pyc +0 -0
  42. backend/app/api/__pycache__/user.cpython-310.pyc +0 -0
  43. backend/app/api/__pycache__/user.cpython-312.pyc +0 -0
  44. backend/app/api/ai_builder.py +105 -0
  45. backend/app/api/auth.py +90 -0
  46. backend/app/api/market.py +340 -0
  47. backend/app/api/user.py +133 -0
  48. backend/app/core/__init__.py +0 -0
  49. backend/app/core/__pycache__/__init__.cpython-310.pyc +0 -0
  50. backend/app/core/__pycache__/__init__.cpython-312.pyc +0 -0
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.db filter=lfs diff=lfs merge=lfs -text
.github/workflows/hf_deploy.yml ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy to Hugging Face Spaces
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ workflow_dispatch:
7
+
8
+ jobs:
9
+ deploy:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v3
13
+ with:
14
+ fetch-depth: 0
15
+ lfs: true
16
+
17
+ - name: Set up Python
18
+ uses: actions/setup-python@v4
19
+ with:
20
+ python-version: '3.9'
21
+
22
+ - name: Install dependencies
23
+ run: pip install huggingface_hub
24
+
25
+ - name: Ensure Space Exists
26
+ env:
27
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
28
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
29
+ SPACE_NAME: mlstocks
30
+ run: |
31
+ python -c "
32
+ from huggingface_hub import create_repo
33
+ import os
34
+
35
+ token = os.environ['HF_TOKEN']
36
+ user = os.environ['HF_USERNAME']
37
+ space_name = os.environ['SPACE_NAME']
38
+ repo_id = f'{user}/{space_name}'
39
+
40
+ print(f'Checking/Creating Space: {repo_id}')
41
+ try:
42
+ create_repo(
43
+ repo_id=repo_id,
44
+ token=token,
45
+ repo_type='space',
46
+ space_sdk='docker',
47
+ exist_ok=True
48
+ )
49
+ print('Space is ready.')
50
+ except Exception as e:
51
+ print(f'Error creating space: {e}')
52
+ exit(1)
53
+ "
54
+
55
+ - name: Push to Hugging Face Hub
56
+ env:
57
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
58
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
59
+ SPACE_NAME: mlstocks
60
+ run: |
61
+ # 1. Configure Identity
62
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
63
+ git config --global user.name "github-actions[bot]"
64
+
65
+ # 2. DELETE existing git history to destroy any traces of binary blobs
66
+ rm -rf .git
67
+
68
+ # 3. Initialize fresh repo for deployment
69
+ git init
70
+ git lfs install
71
+
72
+ # 4. Enforce LFS for .db files
73
+ echo "*.db filter=lfs diff=lfs merge=lfs -text" > .gitattributes
74
+
75
+ # 5. Add files (LFS filter will apply now because .gitattributes exists before add)
76
+ git add .
77
+
78
+ # 6. Commit
79
+ git commit -m "Deploy to Hugging Face Space"
80
+
81
+ # 7. Push
82
+ git remote add space https://$HF_USERNAME:$HF_TOKEN@huggingface.co/spaces/$HF_USERNAME/$SPACE_NAME
83
+ git push --force space master:main
.gitignore ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ lerna-debug.log*
8
+
9
+ # Diagnostic reports (https://nodejs.org/api/report.html)
10
+ report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
11
+
12
+ # Runtime data
13
+ pids
14
+ *.pid
15
+ *.seed
16
+ *.pid.lock
17
+
18
+ # Directory for instrumented libs generated by jscoverage/JSCover
19
+ lib-cov
20
+
21
+ # Coverage directory used by tools like istanbul
22
+ coverage
23
+ *.lcov
24
+
25
+ # nyc test coverage
26
+ .nyc_output
27
+
28
+ # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
29
+ .grunt
30
+
31
+ # Bower dependency directory (https://bower.io/)
32
+ bower_components
33
+
34
+ # node-waf configuration
35
+ .lock-wscript
36
+
37
+ # Compiled binary addons (https://nodejs.org/api/addons.html)
38
+ build/Release
39
+
40
+ # Dependency directories
41
+ node_modules/
42
+ jspm_packages/
43
+
44
+ # Snowpack dependency directory (https://snowpack.dev/)
45
+ web_modules/
46
+
47
+ # TypeScript cache
48
+ *.tsbuildinfo
49
+
50
+ # Optional npm cache directory
51
+ .npm
52
+
53
+ # Optional eslint cache
54
+ .eslintcache
55
+
56
+ # Optional stylelint cache
57
+ .stylelintcache
58
+
59
+ # Optional REPL history
60
+ .node_repl_history
61
+
62
+ # Output of 'npm pack'
63
+ *.tgz
64
+
65
+ # Yarn Integrity file
66
+ .yarn-integrity
67
+
68
+ # dotenv environment variable files
69
+ .env
70
+ .env.*
71
+ .venv
72
+ !.env.example
73
+
74
+ # parcel-bundler cache (https://parceljs.org/)
75
+ .cache
76
+ .parcel-cache
77
+
78
+ # Next.js build output
79
+ .next
80
+ out
81
+
82
+ # Nuxt.js build / generate output
83
+ .nuxt
84
+ dist
85
+
86
+ # Gatsby files
87
+ .cache/
88
+ # Comment in the public line in if your project uses Gatsby and not Next.js
89
+ # https://nextjs.org/blog/next-9-1#public-directory-support
90
+ # public
91
+
92
+ # vuepress build output
93
+ .vuepress/dist
94
+
95
+ # vuepress v2.x temp and cache directory
96
+ .temp
97
+ .cache
98
+
99
+ # Sveltekit cache directory
100
+ .svelte-kit/
101
+
102
+ # vitepress build output
103
+ **/.vitepress/dist
104
+
105
+ # vitepress cache directory
106
+ **/.vitepress/cache
107
+
108
+ # Docusaurus cache and generated files
109
+ .docusaurus
110
+
111
+ # Serverless directories
112
+ .serverless/
113
+
114
+ # FuseBox cache
115
+ .fusebox/
116
+
117
+ # DynamoDB Local files
118
+ .dynamodb/
119
+
120
+ # Firebase cache directory
121
+ .firebase/
122
+
123
+ # TernJS port file
124
+ .tern-port
125
+
126
+ # Stores VSCode versions used for testing VSCode extensions
127
+ .vscode-test
128
+
129
+ # yarn v3
130
+ .pnp.*
131
+ .yarn/*
132
+ !.yarn/patches
133
+ !.yarn/plugins
134
+ !.yarn/releases
135
+ !.yarn/sdks
136
+ !.yarn/versions
137
+
138
+ # Vite logs files
139
+ vite.config.js.timestamp-*
140
+ vite.config.ts.timestamp-*
Dockerfile ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Stage 1: Build the frontend
2
+ FROM node:20 as frontend-builder
3
+
4
+ WORKDIR /app/frontend
5
+ COPY frontend/package*.json ./
6
+ RUN npm install
7
+ COPY frontend/ ./
8
+ RUN npm run build
9
+
10
+ # Stage 2: Build the backend and serve everything
11
+ FROM python:3.11-slim
12
+
13
+ WORKDIR /app
14
+
15
+ # Copy backend code
16
+ COPY backend/ ./backend
17
+
18
+ # Corrected: Copy built frontend assets to backend/static (where main.py expects them)
19
+ COPY --from=frontend-builder /app/frontend/dist ./backend/static
20
+
21
+ # Install backend dependencies
22
+ # Install uv
23
+ COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
24
+
25
+ # Install backend dependencies using uv into a virtual environment
26
+ # We install from the /app/backend directory where pyproject.toml is located
27
+ WORKDIR /app/backend
28
+ RUN uv venv /app/.venv
29
+ RUN uv pip install .
30
+
31
+ # Reset workdir
32
+ WORKDIR /app
33
+
34
+ # Enable venv
35
+ ENV VIRTUAL_ENV=/app/.venv
36
+ ENV PATH="/app/.venv/bin:$PATH"
37
+
38
+ # Environment variables
39
+ ENV PORT=7860
40
+ ENV FRONTEND_URL=""
41
+ ENV PYTHONPATH="/app/backend"
42
+
43
+ # Expose the port
44
+ EXPOSE 7860
45
+
46
+ # Command to run the application
47
+ CMD ["sh", "-c", "uvicorn app.main:app --host 0.0.0.0 --port $PORT"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2026 startrek
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: AI Model Driven Trading Intelligence
3
+ emoji: 📝
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: docker
7
+ pinned: true
8
+ ---
9
+
10
+
11
+ # 🧠 NEXUS Trading Intelligence Platform
12
+
13
+ **AI-Driven Financial Analysis & Automated Model Architecture**
14
+
15
+ MLStocks (NEXUS) is an advanced, open-source trading intelligence platform that unifies **Classical Machine Learning**, **Deep Learning**, and **Agentic AI** into a single workflow. It allows traders and developers to build, backtest, and deploy predictive models without writing complex pipelines from scratch.
16
+
17
+ ---
18
+
19
+ ## � Key Intelligence Engines
20
+
21
+ ### 1. 🏛️ NEXUS Quadrant (Classical Intelligence)
22
+ Dedicated to robust, statistical market modeling.
23
+ - **Algorithms**: Random Forest, XGBoost, Linear Regression, SVM.
24
+ - **Use Case**: Identifying rigid price structures, support/resistance clustering, and factor-based alpha generation.
25
+ - **Workflow**: Auto-training pipelines with hyperparameter optimization.
26
+
27
+ ### 2. � NEXUS Neural (Deep Intelligence)
28
+ A cutting-edge deep learning environment for non-linear pattern recognition.
29
+ - **Architectures**: LSTM (Long Short-Term Memory), GRU, and Transformer-based time-series models.
30
+ - **Use Case**: Predicting complex sequential patterns, volatility shifts, and sentiment-market correlations.
31
+
32
+ ### 3. 🤖 Option Strategy Architect (Agentic AI)
33
+ A multi-agent autonomous team that debates and structures option trades in real-time.
34
+ - **The Team**:
35
+ - `📊 Market Analyst`: Scrapes technicals and volume profiles.
36
+ - `📰 Sentiment Analyst`: Parses news for event-driven risks.
37
+ - `🧠 Strategy Advisor`: Proposes complex option spreads (Iron Condors, Verticals).
38
+ - `🛡️ Risk Manager`: Validates capital exposure and final probability.
39
+ - **Output**: A final "Trade" or "Wait" decision with specific entry/exit criteria.
40
+
41
+ ---
42
+
43
+ ## 🛠️ Technology Stack
44
+
45
+ - **Frontend**: Vue 3, Vite, TailwindCSS (Glassmorphism UI).
46
+ - **Backend**: FastAPI (Python 3.10+), Uvicorn.
47
+ - **Database**: Neon PostgreSQL (Async SQLAlchemy).
48
+ - **AI/LLM**: Google Gemini 2.0 Flash / Llama 3 (via Ollama/Groq), Scikit-Learn, PyTorch.
49
+ - **Deployment**: Docker (Monolithic), Hugging Face Spaces.
50
+
51
+ ---
52
+
53
+ ## 🚀 Getting Started
54
+
55
+ ### Local Development
56
+ ```bash
57
+ # 1. Clone the repository
58
+ git clone https://github.com/mishrabp/mlstocks.git
59
+ cd mlstocks
60
+
61
+ # 2. Setup Backend
62
+ cd backend
63
+ python -m venv .venv
64
+ source .venv/bin/activate # or .venv\Scripts\activate on Windows
65
+ pip install -r requirements.txt
66
+ # Create .env file with NEON_DB_CONNECTION and GOOGLE_API_KEY
67
+
68
+ # 3. Setup Frontend
69
+ cd ../frontend
70
+ npm install
71
+ npm run dev
72
+ ```
73
+
74
+ ### Docker Deployment
75
+ The project is configured for a single-container deployment suitable for Hugging Face Spaces.
76
+
77
+ ```bash
78
+ docker build -t nexus-platform .
79
+ docker run -p 7860:7860 --env-file backend/.env nexus-platform
80
+ ```
81
+
82
+ ---
83
+
84
+ ## 🔒 Security & Privacy
85
+ - **No Data Storage**: Market data is processed in-memory or ephemeral sessions.
86
+ - **API Keys**: All keys (OpenAI, Gemini, HF) are stored in secure environment variables.
87
+ - **Compliance**: This tool is for **educational and research purposes only**.
88
+
89
+ ---
90
+
91
+ ## ⚠️ Disclaimer
92
+ **Using this software does not guarantee profits.** Trading stocks, options, and futures involves substantial risk of loss. The AI agents and models provided in NEXUS are for informational assistance only and should not be construed as financial advice. Always perform your own due diligence.
backend/README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MLStocks Backend
2
+
3
+ The AI-driven backbone of the MLStocks intelligence platform, orchestrating multi-agent analysis and automated model building.
4
+
5
+ ## 🚀 Key Features
6
+ - **Agentic Analyst**: A multi-agent team (Market, Sentiment, Strategy, Risk) powered by AutoGen and Google Gemini for real-time options analysis.
7
+ - **NEXUS Model Builders**:
8
+ - **NEXUS Quadrant**: Classic ML pipeline (Scikit-Learn, XGBoost).
9
+ - **NEXUS Neural**: Deep Learning engine (PyTorch, LSTMs, Transformers).
10
+ - **Hugging Face Hub Integration**: Automated publishing of trained models with dynamic README/Model Card generation.
11
+ - **Data Engine**: Real-time market data synchronization and technical indicator calculation via Yahoo Finance and `ta`.
12
+
13
+ ## 🛠️ Technology Stack
14
+ - **Framework**: FastAPI (Python 3.10+)
15
+ - **LLM Orchestration**: AutoGen (Microsoft)
16
+ - **AI Models**: Google Gemini 2.0/1.5 Flash
17
+ - **Database**: SQLite with SQLAlchemy (Asynchronous)
18
+ - **ML Frameworks**: Scikit-Learn, PyTorch, XGBoost
19
+
20
+ ## 📦 Setup & Installation
21
+
22
+ 1. **Environment Setup**:
23
+ ```bash
24
+ python -m venv .venv
25
+ source .venv/bin/activate # Linux/WSL
26
+ pip install -e .
27
+ ```
28
+
29
+ 2. **Configuration**:
30
+ Create a `.env` file in the `backend/` directory:
31
+ ```env
32
+ GOOGLE_API_KEY=your_gemini_key
33
+ HF_TOKEN=your_huggingface_token
34
+ SECRET_KEY=your_jwt_secret
35
+ ```
36
+
37
+ ## 🏃 Running the Server
38
+ ```bash
39
+ python main.py
40
+ ```
41
+ The API documentation will be available at `http://localhost:8000/docs`.
42
+
43
+ ### 📂 Structure
44
+ - `app/api/`: FastAPI route definitions.
45
+ - `app/agentic_analyst/`: Multi-agent team logic and tool definitions.
46
+ - `app/model_builder/`: ML training and publishing orchestration.
47
+ - `app/core/`: Configuration, authentication, and model factory.
48
+ - `app/models/`: Database schema and Pydantic models.
backend/app/__init__.py ADDED
File without changes
backend/app/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (138 Bytes). View file
 
backend/app/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (141 Bytes). View file
 
backend/app/__pycache__/main.cpython-310.pyc ADDED
Binary file (2.48 kB). View file
 
backend/app/__pycache__/main.cpython-312.pyc ADDED
Binary file (4.31 kB). View file
 
backend/app/agentic_analyst/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Agentic Analyst Module
2
+ from .team import get_trading_team, extract_json
3
+
4
+ __all__ = ['get_trading_team', 'extract_json']
backend/app/agentic_analyst/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (265 Bytes). View file
 
backend/app/agentic_analyst/__pycache__/team.cpython-312.pyc ADDED
Binary file (2.53 kB). View file
 
backend/app/agentic_analyst/agents/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Agents module
backend/app/agentic_analyst/agents/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (163 Bytes). View file
 
backend/app/agentic_analyst/agents/__pycache__/market_analyst.cpython-312.pyc ADDED
Binary file (2.2 kB). View file
 
backend/app/agentic_analyst/agents/__pycache__/risk_manager.cpython-312.pyc ADDED
Binary file (1.44 kB). View file
 
backend/app/agentic_analyst/agents/__pycache__/sentiment_analyst.cpython-312.pyc ADDED
Binary file (1.6 kB). View file
 
backend/app/agentic_analyst/agents/__pycache__/strategy_advisor.cpython-312.pyc ADDED
Binary file (2.22 kB). View file
 
backend/app/agentic_analyst/agents/market_analyst.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from autogen_agentchat.agents import AssistantAgent
2
+ import datetime
3
+ from autogen_core.tools import FunctionTool
4
+ from app.agentic_analyst.tools.market_data import get_current_price, get_historical_volatility, get_option_chain_snapshot, get_technical_indicators
5
+
6
+ def get_market_analyst(model_client):
7
+
8
+ # Wrap tools
9
+ price_tool = FunctionTool(get_current_price, description="Get current price of a stock.")
10
+ vol_tool = FunctionTool(get_historical_volatility, description="Get historical volatility and VIX context.")
11
+ chain_tool = FunctionTool(get_option_chain_snapshot, description="Get option chain snapshot for near-term expiry.")
12
+ tech_tool = FunctionTool(get_technical_indicators, description="Calculate SMA (20/50/200) and RSI (14) technical indicators.")
13
+
14
+ return AssistantAgent(
15
+ name="MarketAnalyst",
16
+ model_client=model_client,
17
+ tools=[price_tool, vol_tool, chain_tool, tech_tool],
18
+ system_message=f"""
19
+ You are a Market Technician. Today is {datetime.date.today()}.
20
+ 1. Fetch Price, Volatility, Option Chain, AND Technical Indicators (SMA, RSI) for the ticker.
21
+ 2. Analyze the Trend (Bullish/Bearish/Neutral) based on price action and SMA alignment (Price vs SMA200).
22
+ 3. Analyze the Volatility Regime (Low/Normal/High) using HV and VIX.
23
+ 4. Analyze Momentum: Check RSI levels (Overbought > 70 / Oversold < 30).
24
+ 5. Output a JSON similar to:
25
+ {{
26
+ "ticker": "...",
27
+ "price": ...,
28
+ "trend": "...",
29
+ "volatility": "...",
30
+ "rsi_status": "Overbought/Neutral/Oversold",
31
+ "liquidity_check": "Pass/Fail based on option chain availability",
32
+ "notes": "..."
33
+ }}
34
+ Do NOT recommend a trade yet. Just analyze the context.
35
+ """
36
+ )
backend/app/agentic_analyst/agents/risk_manager.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from autogen_agentchat.agents import AssistantAgent
2
+ import datetime
3
+
4
+ def get_risk_manager(model_client):
5
+
6
+ return AssistantAgent(
7
+ name="RiskManager",
8
+ model_client=model_client,
9
+ system_message=f"""
10
+ You are the Chief Risk Officer. Today is {datetime.date.today()}.
11
+ 1. Review the proposed strategy and confidence score.
12
+ 2. STRICT RULE: If confidence < 70, reject the trade and recommend "WAIT".
13
+ 3. Validate the score calculation against the Rubric (Start 50 + Trend/Vol/Sentiment addons).
14
+ 4. Event Risk Check: If Earnings/CPI imminent, override and recommend "WAIT".
15
+
16
+ Output final JSON:
17
+ {{
18
+ "final_decision": "TRADE | WAIT",
19
+ "confidence": ..., // The final validated score
20
+ "actionable_recommendation": "Execute Bull Call Spread... / Stay in Cash",
21
+ "entry_signal": "Net Credit | Net Debit",
22
+ "entry_price": 1.50,
23
+ "risk_warning": "..."
24
+ }}
25
+
26
+ End your response with the word TERMINATE to signal completion.
27
+ """
28
+ )
backend/app/agentic_analyst/agents/sentiment_analyst.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from autogen_agentchat.agents import AssistantAgent
2
+ import datetime
3
+ from autogen_core.tools import FunctionTool
4
+ from app.agentic_analyst.tools.news_data import search_news
5
+
6
+ def get_sentiment_analyst(model_client):
7
+
8
+ news_tool = FunctionTool(search_news, description="Search for recent news about the ticker.")
9
+
10
+ return AssistantAgent(
11
+ name="SentimentAnalyst",
12
+ model_client=model_client,
13
+ tools=[news_tool],
14
+ system_message=f"""
15
+ You are a Sentiment Analyst. Today is {datetime.date.today()}.
16
+ 1. Search for recent news. The results now include **FinBERT Sentiment Scores** (e.g. [FinBERT: positive (0.95)]).
17
+ 2. Aggregate these FinBERT scores to determine the overall sentiment (Bullish/Bearish/Neutral).
18
+ 3. Assign a 'Sentiment Confidence' score based on the FinBERT probability scores.
19
+ - If multiple articles have >0.90 positive/negative, confidence is HIGH.
20
+ - If signals are mixed or low probability, confidence is LOW/MEDIUM.
21
+ 4. Output JSON:
22
+ {{
23
+ "sentiment": "...",
24
+ "confidence": "...",
25
+ "key_events": ["..."],
26
+ "risk_factors": ["..."]
27
+ }}
28
+ """
29
+ )
backend/app/agentic_analyst/agents/strategy_advisor.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from autogen_agentchat.agents import AssistantAgent
2
+ import datetime
3
+
4
+ def get_strategy_advisor(model_client):
5
+
6
+ return AssistantAgent(
7
+ name="StrategyAdvisor",
8
+ model_client=model_client,
9
+ system_message=f"""
10
+ You are an Option Strategist. Today is {datetime.date.today()}.
11
+ 1. Review the Market Analysis (Trend, Volatility, Option Chain) and Sentiment Analysis.
12
+ 2. Select a specific strategy with EXPLICIT STRIKES and EXPIRY from the provided option chain.
13
+
14
+ RULES:
15
+ - HIGH Volatility + Range Bound -> Iron Condor (Credit)
16
+ - HIGH Volatility + Directional -> Credit Spread (Bull Put / Bear Call)
17
+ - LOW Volatility + Directional -> Debit Spread (Bull Call / Bear Put)
18
+ - LOW Volatility + Range Bound -> Calendar Spread (or WAIT)
19
+
20
+ CONFIDENCE SCORE RUBRIC (Start at 50):
21
+ 1. Technical Trend Aligns with Strategy (Price vs SMA200): +20
22
+ 2. Volatility Regime Aligns (e.g. High Vol/VIX > 20 for Credit): +10
23
+ 3. Sentiment Analysis is Confirming (Same direction): +10
24
+ 4. Technical Momentum Confluence (RSI isn't fighting the trade): +10
25
+ 5. Option Liquidity is Sufficient: +10
26
+ 6. Conflicting Signals (Trend vs Sentiment mismatch): -20
27
+
28
+ CALCULATE the score explicitly based on this rubric.
29
+ - Target > 70% confidence for a trade recommendation.
30
+
31
+ Output JSON:
32
+ {{
33
+ "strategy": "Bull Call Spread | Iron Condor | WAIT",
34
+ "confidence_score": 95, // Integer 0-100
35
+ "reasoning": "...",
36
+ "proposed_legs": "Buy 100 Call, Sell 105 Call (Exp: YYYY-MM-DD)", // MUST use actual strikes from chain
37
+ "entry_signal": "Net Credit | Net Debit",
38
+ "estimated_entry_price": 1.50 // Midpoint estimate of the spread
39
+ }}
40
+ """
41
+ )
backend/app/agentic_analyst/team.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ from typing import Dict, Any
4
+
5
+ from autogen_agentchat.teams import RoundRobinGroupChat
6
+ from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination
7
+
8
+ from app.agentic_analyst.agents.market_analyst import get_market_analyst
9
+ from app.agentic_analyst.agents.sentiment_analyst import get_sentiment_analyst
10
+ from app.agentic_analyst.agents.strategy_advisor import get_strategy_advisor
11
+ from app.agentic_analyst.agents.risk_manager import get_risk_manager
12
+
13
+ def get_trading_team(model_client):
14
+ """
15
+ Creates and returns the RoundRobinGroupChat team.
16
+ """
17
+ print("[DEBUG] Initializing trading team agents...")
18
+ market = get_market_analyst(model_client)
19
+ sentiment = get_sentiment_analyst(model_client)
20
+ strategy = get_strategy_advisor(model_client)
21
+ risk = get_risk_manager(model_client)
22
+
23
+ print("[DEBUG] Creating RoundRobinGroupChat team...")
24
+ team = RoundRobinGroupChat(
25
+ participants=[market, sentiment, strategy, risk],
26
+ termination_condition=TextMentionTermination("TERMINATE") | MaxMessageTermination(20)
27
+ )
28
+ return team
29
+
30
+ def extract_json(text: str) -> Dict[str, Any]:
31
+ """Helper to extract JSON from markdown code blocks or raw text."""
32
+ print("[DEBUG] Attempting to extract JSON from agent response...")
33
+ try:
34
+ match = re.search(r"```json\s*(.*?)\s*```", text, re.DOTALL)
35
+ if match:
36
+ print("[DEBUG] Found JSON matching markdown block.")
37
+ return json.loads(match.group(1))
38
+ print("[DEBUG] No markdown block found, trying raw parse.")
39
+ return json.loads(text)
40
+ except Exception as e:
41
+ print(f"[DEBUG] JSON extraction failed: {str(e)}")
42
+ return {}
backend/app/agentic_analyst/tools/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Tools module
backend/app/agentic_analyst/tools/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (162 Bytes). View file
 
backend/app/agentic_analyst/tools/__pycache__/market_data.cpython-312.pyc ADDED
Binary file (10.7 kB). View file
 
backend/app/agentic_analyst/tools/__pycache__/news_data.cpython-312.pyc ADDED
Binary file (8.25 kB). View file
 
backend/app/agentic_analyst/tools/market_data.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yfinance as yf
2
+ import pandas as pd
3
+ import math
4
+ from datetime import datetime, timedelta
5
+
6
+ def check_and_fix_ticker(symbol: str) -> str:
7
+ """
8
+ Checks if the ticker has data. If not, tries appending '.NS' (for NSE India).
9
+ Returns the working ticker or the original if neither works.
10
+ """
11
+ print(f"[DEBUG] check_and_fix_ticker called using: {symbol}")
12
+ symbol = symbol.upper().strip()
13
+
14
+ # Try original
15
+ ticker = yf.Ticker(symbol)
16
+ try:
17
+ # fast_info is quick way to check validity
18
+ if ticker.fast_info.last_price is not None:
19
+ return symbol
20
+ except:
21
+ pass
22
+
23
+ # Try .NS
24
+ ns_symbol = symbol + ".NS"
25
+ ns_ticker = yf.Ticker(ns_symbol)
26
+ try:
27
+ if ns_ticker.fast_info.last_price is not None:
28
+ return ns_symbol
29
+ except:
30
+ pass
31
+
32
+ return symbol
33
+
34
+ def get_current_price(symbol: str) -> float:
35
+ """Fetches the current market price of the stock."""
36
+ print(f"[DEBUG] get_current_price called for: {symbol}")
37
+ try:
38
+ # Auto-fix ticker if needed
39
+ symbol = check_and_fix_ticker(symbol)
40
+
41
+ ticker = yf.Ticker(symbol)
42
+ price = ticker.fast_info.last_price
43
+ if price is None:
44
+ hist = ticker.history(period="1d")
45
+ if not hist.empty:
46
+ price = hist["Close"].iloc[-1]
47
+
48
+ if price is None:
49
+ print(f"[DEBUG] No price data found for {symbol}")
50
+ return f"Error: No price data for {symbol}"
51
+
52
+ print(f"[DEBUG] Current price for {symbol}: {price}")
53
+ return price
54
+ except Exception as e:
55
+ print(f"[DEBUG] Error fetching price for {symbol}: {str(e)}")
56
+ return f"Error fetching price for {symbol}: {str(e)}"
57
+
58
+ def get_historical_volatility(symbol: str, period: str = "1mo") -> dict:
59
+ """
60
+ Calculates historical volatility and returns VIX context if available.
61
+ """
62
+ print(f"[DEBUG] get_historical_volatility called for: {symbol}")
63
+ try:
64
+ symbol = check_and_fix_ticker(symbol)
65
+ ticker = yf.Ticker(symbol)
66
+ hist = ticker.history(period=period)
67
+ if hist.empty:
68
+ return {"error": "No historical data found"}
69
+
70
+ hist['Returns'] = hist['Close'].pct_change()
71
+ volatility = hist['Returns'].std() * math.sqrt(252)
72
+
73
+ vix_price = None
74
+ try:
75
+ vix = yf.Ticker("^VIX")
76
+ vix_price = vix.fast_info.last_price
77
+ except:
78
+ pass
79
+
80
+ result = {
81
+ "ticker_used": symbol,
82
+ "annualized_volatility": round(volatility * 100, 2),
83
+ "period": period,
84
+ "vix_reference": round(vix_price, 2) if vix_price else "N/A"
85
+ }
86
+ print(f"[DEBUG] Historical Volatility for {symbol}: {result['annualized_volatility']}% (VIX: {result['vix_reference']})")
87
+ return result
88
+ except Exception as e:
89
+ print(f"[DEBUG] Error calculating volatility for {symbol}: {str(e)}")
90
+ return {"error": str(e)}
91
+
92
+ def get_option_chain_snapshot(symbol: str) -> str:
93
+ """
94
+ Fetches a snapshot of the option chain.
95
+ """
96
+ print(f"[DEBUG] get_option_chain_snapshot called for: {symbol}")
97
+ try:
98
+ symbol = check_and_fix_ticker(symbol)
99
+ ticker = yf.Ticker(symbol)
100
+ expirations = ticker.options
101
+
102
+ if not expirations:
103
+ return f"No options data found for {symbol}."
104
+
105
+ target_date = None
106
+ today = datetime.now()
107
+
108
+ for exp in expirations:
109
+ exp_date = datetime.strptime(exp, "%Y-%m-%d")
110
+ days_to_exp = (exp_date - today).days
111
+ # Adjusted window: 7 to 45 days to capture monthly expiries for better liquidity
112
+ if 7 <= days_to_exp <= 45:
113
+ target_date = exp
114
+ break
115
+
116
+ if not target_date:
117
+ target_date = expirations[0]
118
+
119
+ opt = ticker.option_chain(target_date)
120
+ calls = opt.calls
121
+ puts = opt.puts
122
+
123
+ price_info = get_current_price(symbol)
124
+ if isinstance(price_info, str): return price_info
125
+ current_price = float(price_info)
126
+
127
+ # Widen to +/- 15% to capture OTM strikes for credit spreads
128
+ lower_bound = current_price * 0.85
129
+ upper_bound = current_price * 1.15
130
+
131
+ ntm_calls = calls[(calls['strike'] >= lower_bound) & (calls['strike'] <= upper_bound)]
132
+ ntm_puts = puts[(puts['strike'] >= lower_bound) & (puts['strike'] <= upper_bound)]
133
+
134
+ summary = f"Option Chain Snapshot for {symbol} (Expiry: {target_date})\n"
135
+ summary += f"Current Spot Price: {round(current_price, 2)}\n\n"
136
+
137
+ summary += "--- CALLS (Ask | Strike | IV) ---\n"
138
+ for _, row in ntm_calls.iterrows():
139
+ summary += f"Strike: {row['strike']} | Ask: {row['ask']} | IV: {round(row['impliedVolatility']*100, 1)}%\n"
140
+
141
+ summary += "\n--- PUTS (Ask | Strike | IV) ---\n"
142
+ for _, row in ntm_puts.iterrows():
143
+ summary += f"Strike: {row['strike']} | Ask: {row['ask']} | IV: {round(row['impliedVolatility']*100, 1)}%\n"
144
+
145
+ print(f"[DEBUG] Generated option chain snapshot for {symbol} with {len(ntm_calls)} calls and {len(ntm_puts)} puts.")
146
+ return summary
147
+
148
+ except Exception as e:
149
+ print(f"[DEBUG] Error in get_option_chain_snapshot: {str(e)}")
150
+ return f"Error fetching option chain: {str(e)}"
151
+
152
+ def get_technical_indicators(symbol: str) -> dict:
153
+ """
154
+ Calculates key technical indicators: SMA (20, 50, 200) and RSI (14).
155
+ Returns a dictionary of values and signals.
156
+ """
157
+ print(f"[DEBUG] get_technical_indicators called for: {symbol}")
158
+ try:
159
+ symbol = check_and_fix_ticker(symbol)
160
+ ticker = yf.Ticker(symbol)
161
+
162
+ # Fetch enough history for SMA 200
163
+ hist = ticker.history(period="1y")
164
+ if hist.empty:
165
+ return {"error": "No historical data found"}
166
+
167
+ # Safe calculations
168
+ # SMA
169
+ hist['SMA_20'] = hist['Close'].rolling(window=20).mean()
170
+ hist['SMA_50'] = hist['Close'].rolling(window=50).mean()
171
+
172
+ # Only calc SMA_200 if we actually have 200 points
173
+ if len(hist) >= 200:
174
+ hist['SMA_200'] = hist['Close'].rolling(window=200).mean()
175
+ else:
176
+ hist['SMA_200'] = pd.Series([None] * len(hist), index=hist.index)
177
+
178
+ # RSI Calculation (14 periods) - needs at least 15 points
179
+ if len(hist) >= 15:
180
+ delta = hist['Close'].diff()
181
+ gain = (delta.where(delta > 0, 0)).rolling(window=14).mean()
182
+ loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean()
183
+ rs = gain / loss
184
+ hist['RSI_14'] = 100 - (100 / (1 + rs))
185
+ else:
186
+ hist['RSI_14'] = pd.Series([None] * len(hist), index=hist.index)
187
+
188
+ current_data = hist.iloc[-1]
189
+
190
+ # Interpretation RSI
191
+ rsi_val = current_data.get('RSI_14')
192
+ rsi_signal = "Insufficient Data"
193
+ if rsi_val is not None and not pd.isna(rsi_val):
194
+ rsi_val = round(rsi_val, 2)
195
+ if rsi_val > 70: rsi_signal = "Overbought"
196
+ elif rsi_val < 30: rsi_signal = "Oversold"
197
+ else: rsi_signal = "Neutral"
198
+ else:
199
+ rsi_val = "N/A"
200
+
201
+ price = current_data['Close']
202
+ trend = "Neutral"
203
+ sma200 = current_data.get('SMA_200')
204
+
205
+ if sma200 is not None and not pd.isna(sma200):
206
+ if price > sma200:
207
+ trend = "Bullish (Above SMA200)"
208
+ else:
209
+ trend = "Bearish (Below SMA200)"
210
+ else:
211
+ trend = "Unknown (No SMA200 Data)"
212
+
213
+ result = {
214
+ "ticker": symbol,
215
+ "current_price": round(price, 2),
216
+ "sma_20": round(current_data['SMA_20'], 2) if not pd.isna(current_data.get('SMA_20')) else None,
217
+ "sma_50": round(current_data['SMA_50'], 2) if not pd.isna(current_data.get('SMA_50')) else None,
218
+ "sma_200": round(sma200, 2) if sma200 is not None and not pd.isna(sma200) else None,
219
+ "rsi_14": rsi_val,
220
+ "rsi_signal": rsi_signal,
221
+ "trend_signal": trend
222
+ }
223
+ print(f"[DEBUG] Technical Indicators for {symbol}: Price={result['current_price']}, Trend={trend}, RSI={rsi_signal}")
224
+ return result
225
+
226
+ except Exception as e:
227
+ print(f"[DEBUG] Error calculating technical indicators for {symbol}: {str(e)}")
228
+ return {"error": str(e)}
backend/app/agentic_analyst/tools/news_data.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from duckduckgo_search import DDGS
2
+ from transformers import pipeline
3
+ import torch
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
+ from typing import Optional
7
+ from pydantic import BaseModel, Field
8
+ import concurrent.futures
9
+
10
+ # Global variable for lazy loading
11
+ _sentiment_pipeline = None
12
+
13
+ def get_sentiment_pipeline():
14
+ """
15
+ Lazy loads the FinBERT pipeline.
16
+ """
17
+ print(f"[DEBUG] get_sentiment_pipeline called")
18
+ global _sentiment_pipeline
19
+ if _sentiment_pipeline is None:
20
+ try:
21
+ print("Loading FinBERT model...")
22
+ # Use CPU to avoid CUDA complexity if not needed, or let torch decide if fast
23
+ _sentiment_pipeline = pipeline("sentiment-analysis", model="ProsusAI/finbert")
24
+ except Exception as e:
25
+ print(f"Failed to load FinBERT: {e}")
26
+ return None
27
+ return _sentiment_pipeline
28
+
29
+ def _fetch_page_content(url: str, timeout: int = 3) -> Optional[str]:
30
+ """Fetch and extract text content from a web page."""
31
+ print(f"[DEBUG] Fetching content: {url} (timeout: {timeout}s)...")
32
+ try:
33
+ headers = {
34
+ 'User-Agent': (
35
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
36
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
37
+ 'Chrome/91.0.4472.124 Safari/537.36'
38
+ )
39
+ }
40
+ response = requests.get(url, headers=headers, timeout=timeout)
41
+ response.raise_for_status()
42
+
43
+ soup = BeautifulSoup(response.content, 'html.parser')
44
+
45
+ # Remove irrelevant elements
46
+ for tag in soup(["script", "style", "nav", "footer", "header", "aside"]):
47
+ tag.decompose()
48
+
49
+ # Extract text
50
+ text = soup.get_text(separator='\n', strip=True)
51
+
52
+ # Clean whitespace
53
+ lines = (line.strip() for line in text.splitlines())
54
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
55
+ text = '\n'.join(chunk for chunk in chunks if chunk)
56
+
57
+ return text
58
+ except Exception as e:
59
+ print(f"[WARNING] Failed to fetch content from {url}: {str(e)}")
60
+ return None
61
+
62
+ # Validation Model
63
+ class NewsArticle(BaseModel):
64
+ title: str = Field(..., description="The headline of the news article.")
65
+ link: str = Field(..., description="The direct URL to the full article.")
66
+ snippet: str = Field(..., description="A brief summary or body text.")
67
+ datetime: Optional[str] = Field(None, description="Publication date if available.")
68
+
69
+ def search_news(ticker: str) -> str:
70
+ """
71
+ Searches for the latest news regarding the stock ticker using DuckDuckGo.
72
+ Analyzes each headline with FinBERT for sentiment.
73
+ Returns a summary string of the top 5 results with sentiment scores.
74
+ """
75
+ print(f"[DEBUG] search_news called with: {ticker}")
76
+ try:
77
+ if not ticker:
78
+ return "No ticker provided for news search."
79
+
80
+ ticker = ticker.upper().strip()
81
+ query = f"{ticker} stock news financial"
82
+
83
+ results = []
84
+ sentiment_pipe = get_sentiment_pipeline()
85
+
86
+ with DDGS() as ddgs:
87
+ # Use 'news' backend
88
+ raw_results = list(ddgs.news(query, max_results=5))
89
+
90
+ if not raw_results:
91
+ return f"No recent news found for {ticker}."
92
+
93
+ # Helper to process one item (fetch + analyze)
94
+ def process_news_item(raw_item):
95
+ try:
96
+ # Validate / Map Raw Dict to Pydantic Model
97
+ # DDGS returns: 'title', 'url', 'body', 'date', 'source'
98
+ # We map them to our requested schema
99
+ article = NewsArticle(
100
+ title=raw_item.get('title', 'No Title'),
101
+ link=raw_item.get('url', ''),
102
+ snippet=raw_item.get('body', ''),
103
+ datetime=raw_item.get('date', 'Unknown Date')
104
+ )
105
+ except Exception as validation_err:
106
+ print(f"[WARNING] Skipping invalid news item: {validation_err}")
107
+ return None
108
+
109
+ # Processing using Validated Object
110
+ source = raw_item.get('source', 'Unknown Source') # Keep source for display
111
+ print(f"[DEBUG] Processing article: {article.title[:50]}... from {source}")
112
+
113
+ # FinBERT Analysis
114
+ sentiment_tag = ""
115
+ if sentiment_pipe:
116
+ try:
117
+ # 1. Try to fetch full content
118
+ content_to_analyze = article.title
119
+ analysis_type = "Headline"
120
+
121
+ if article.link:
122
+ full_text = _fetch_page_content(article.link)
123
+ if full_text and len(full_text) > 100:
124
+ content_to_analyze = full_text
125
+ analysis_type = "Full Text"
126
+
127
+ # 2. Truncate for FinBERT
128
+ print(f"[DEBUG] Running FinBERT on {analysis_type} for: {article.title[:50]}...")
129
+ score = sentiment_pipe(content_to_analyze[:2000])[0]
130
+ label = score['label']
131
+ conf = round(score['score'], 2)
132
+
133
+ sentiment_tag = f" [FinBERT ({analysis_type}): {label} ({conf})]"
134
+ print(f"[DEBUG] Sentiment Result for {article.title[:50]}... -> {label} ({conf})")
135
+ except Exception as e:
136
+ print(f"[DEBUG] FinBERT/Scraping Error for {article.title[:50]}...: {str(e)}")
137
+ sentiment_tag = f" [FinBERT: Error ({str(e)[:50]})]"
138
+
139
+ return f"- [{source} | {article.datetime}] {article.title}{sentiment_tag}"
140
+
141
+ # Run in parallel
142
+ with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
143
+ # filter out None results from validation failures
144
+ results = [r for r in executor.map(process_news_item, raw_results) if r is not None]
145
+
146
+ if not results:
147
+ print(f"[DEBUG] No news results processed for {ticker}")
148
+ return f"No recent news found for {ticker}."
149
+
150
+ print(f"[DEBUG] Finished news search for {ticker}. Found {len(results)} valid articles.")
151
+ return f"Recent News for {ticker} (with FinBERT Analysis):\n" + "\n".join(results)
152
+
153
+ except Exception as e:
154
+ return f"Error fetching news for {ticker}: {str(e)}"
backend/app/api/__init__.py ADDED
File without changes
backend/app/api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (142 Bytes). View file
 
backend/app/api/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (145 Bytes). View file
 
backend/app/api/__pycache__/ai_builder.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
backend/app/api/__pycache__/ai_builder.cpython-312.pyc ADDED
Binary file (6.84 kB). View file
 
backend/app/api/__pycache__/auth.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
backend/app/api/__pycache__/auth.cpython-312.pyc ADDED
Binary file (5.01 kB). View file
 
backend/app/api/__pycache__/market.cpython-310.pyc ADDED
Binary file (840 Bytes). View file
 
backend/app/api/__pycache__/market.cpython-312.pyc ADDED
Binary file (14.1 kB). View file
 
backend/app/api/__pycache__/user.cpython-310.pyc ADDED
Binary file (3.73 kB). View file
 
backend/app/api/__pycache__/user.cpython-312.pyc ADDED
Binary file (8.31 kB). View file
 
backend/app/api/ai_builder.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException
2
+ from sqlalchemy.ext.asyncio import AsyncSession
3
+ from sqlalchemy import select
4
+ from typing import List, Dict, Any
5
+ import json
6
+
7
+ from app.db.session import get_db
8
+ from app.services.user_service import user_service
9
+ from app.models.database import UserAIModel, User as DBUser
10
+ from app.core.auth import get_user_or_demo
11
+
12
+ # Import from our new model_builder package
13
+ from app.model_builder import (
14
+ ModelTrainer,
15
+ ModelPublisher,
16
+ UniverseItemSchema,
17
+ ModelSaveSchema,
18
+ ModelResponseSchema,
19
+ TrainRequestSchema
20
+ )
21
+
22
+ router = APIRouter()
23
+
24
+ # --- Universe Management ---
25
+
26
+ @router.get("/universe", response_model=List[str])
27
+ async def get_universe(db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
28
+ items = await user_service.get_universe(db, user.id)
29
+ return [item.symbol for item in items]
30
+
31
+ @router.post("/universe")
32
+ async def add_to_universe(item: UniverseItemSchema, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
33
+ await user_service.add_to_universe(db, user.id, item.symbol, item.name)
34
+ return {"status": "success"}
35
+
36
+ @router.post("/universe/sync-all")
37
+ async def sync_all(db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
38
+ items = await user_service.get_universe(db, user.id)
39
+ for item in items:
40
+ await user_service.sync_historical_data(db, item.symbol)
41
+ return {"status": "success"}
42
+
43
+ @router.get("/features-status")
44
+ async def get_features_status(db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
45
+ return await user_service.get_features_status(db, user.id)
46
+
47
+ # --- AI Model Management ---
48
+
49
+ @router.get("/models", response_model=List[ModelResponseSchema])
50
+ async def get_models(db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
51
+ result = await db.execute(
52
+ select(UserAIModel)
53
+ .where(UserAIModel.user_id == user.id)
54
+ .order_by(UserAIModel.created_at.desc())
55
+ )
56
+ models = result.scalars().all()
57
+
58
+ return [
59
+ {
60
+ "id": m.id,
61
+ "name": m.name,
62
+ "model_type": m.model_type,
63
+ "target_symbol": m.target_symbol,
64
+ "created_at": m.created_at.isoformat(),
65
+ "metrics": json.loads(m.metrics) if m.metrics else {}
66
+ }
67
+ for m in models
68
+ ]
69
+
70
+ @router.post("/models")
71
+ async def save_model(data: ModelSaveSchema, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
72
+ model = UserAIModel(
73
+ user_id=user.id,
74
+ name=data.name,
75
+ model_type=data.model_type,
76
+ target_symbol=data.target_symbol,
77
+ parameters=json.dumps(data.parameters),
78
+ metrics=json.dumps(data.metrics)
79
+ )
80
+ db.add(model)
81
+ await db.commit()
82
+ return {"status": "success", "id": model.id}
83
+
84
+ @router.delete("/models/{model_id}")
85
+ async def delete_model(model_id: int, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
86
+ publisher = ModelPublisher(db, user.id)
87
+ return await publisher.delete_model(model_id)
88
+
89
+ # --- Training & Publishing ---
90
+
91
+ @router.post("/train")
92
+ async def train_model(data: TrainRequestSchema, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
93
+ trainer = ModelTrainer(db, user.id)
94
+ result = await trainer.train(
95
+ target_symbol=data.target_symbol,
96
+ model_type=data.model_type,
97
+ features=data.features,
98
+ test_size=data.test_size
99
+ )
100
+ return result
101
+
102
+ @router.post("/models/{model_id}/upload-to-hf")
103
+ async def upload_model_to_hf(model_id: int, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
104
+ publisher = ModelPublisher(db, user.id)
105
+ return await publisher.upload_to_hf(model_id)
backend/app/api/auth.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException, Request
2
+ from fastapi_sso.sso.google import GoogleSSO
3
+ from app.core.config import settings
4
+ from app.core.auth import create_access_token
5
+ from app.db.session import get_db
6
+ from app.services.user_service import user_service
7
+ from sqlalchemy.ext.asyncio import AsyncSession
8
+ from starlette.responses import RedirectResponse
9
+
10
+ router = APIRouter()
11
+
12
+ google_sso = GoogleSSO(
13
+ client_id=settings.GOOGLE_CLIENT_ID,
14
+ client_secret=settings.GOOGLE_CLIENT_SECRET,
15
+ redirect_uri=settings.GOOGLE_REDIRECT_URI,
16
+ allow_insecure_http=True # Set to False in production
17
+ )
18
+
19
+ @router.get("/google/login")
20
+ async def google_login():
21
+ with google_sso:
22
+ return await google_sso.get_login_redirect()
23
+
24
+ @router.get("/google/callback")
25
+ async def google_callback(request: Request, db: AsyncSession = Depends(get_db)):
26
+ with google_sso:
27
+ try:
28
+ user_data = await google_sso.verify_and_process(request)
29
+ except Exception as e:
30
+ raise HTTPException(status_code=400, detail=f"Google Authentication failed: {str(e)}")
31
+
32
+ if not user_data:
33
+ raise HTTPException(status_code=400, detail="Failed to get user data from Google")
34
+
35
+ # Check if user exists
36
+ user = await user_service.get_user_by_email(db, user_data.email)
37
+ if not user:
38
+ user = await user_service.create_user(
39
+ db=db,
40
+ email=user_data.email,
41
+ full_name=user_data.display_name or user_data.email,
42
+ avatar_url=user_data.picture,
43
+ google_id=user_data.id
44
+ )
45
+
46
+ # Record Session
47
+ from app.models.database import UserSession
48
+ session_record = UserSession(
49
+ user_id=user.id,
50
+ ip_address=request.client.host if request.client else None,
51
+ user_agent=request.headers.get("user-agent")
52
+ )
53
+ db.add(session_record)
54
+ await db.commit()
55
+
56
+ # Create access token
57
+ access_token = create_access_token(data={"sub": user.email})
58
+
59
+ # Redirect back to frontend with token
60
+ # In a real app, you might use a more secure way to pass the token
61
+ import os
62
+ frontend_url = os.getenv("FRONTEND_URL", "http://localhost:5173")
63
+ return RedirectResponse(url=f"{frontend_url}/auth/callback?token={access_token}")
64
+
65
+
66
+
67
+ # Fixing imports and implementing logout properly
68
+ from app.core.auth import get_user_or_demo
69
+ from app.models.database import UserSession, User as DBUser
70
+ from sqlalchemy import select
71
+ import datetime
72
+
73
+ @router.post("/logout")
74
+ async def logout_endpoint(db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
75
+ # Find the latest open session for this user
76
+ result = await db.execute(
77
+ select(UserSession)
78
+ .where(UserSession.user_id == user.id)
79
+ .where(UserSession.logout_at == None)
80
+ .order_by(UserSession.login_at.desc())
81
+ .limit(1)
82
+ )
83
+ last_session = result.scalar_one_or_none()
84
+
85
+ if last_session:
86
+ last_session.logout_at = datetime.datetime.utcnow()
87
+ await db.commit()
88
+ return {"status": "success", "message": "Logged out successfully"}
89
+
90
+ return {"status": "success", "message": "No active session found"}
backend/app/api/market.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Query, HTTPException, Depends
2
+ from app.services.yahoo_finance import yahoo_finance_service
3
+ from app.models.market import MarketIndicesResponse, SearchResponse
4
+ from app.models.database import User, UserAgenticScan
5
+ from app.db.session import get_db
6
+ from app.core.auth import get_user_or_demo
7
+ from sqlalchemy.ext.asyncio import AsyncSession
8
+ from pydantic import BaseModel
9
+ import json
10
+ from typing import Optional, List, Dict, Any
11
+
12
+ router = APIRouter()
13
+
14
+ @router.get("/market-indices", response_model=MarketIndicesResponse)
15
+ async def get_market_indices():
16
+ results = await yahoo_finance_service.get_market_indices()
17
+ return {"results": results}
18
+
19
+ @router.get("/search", response_model=SearchResponse)
20
+ async def search_ticker(q: str = Query(..., min_length=1)):
21
+ results = await yahoo_finance_service.search_tickers(q)
22
+ return {"results": results}
23
+
24
+
25
+ class OptionsAnalysisRequest(BaseModel):
26
+ symbol: str
27
+ provider: str = "openai"
28
+
29
+
30
+ class AgentLog(BaseModel):
31
+ source: str
32
+ content: str
33
+ avatar: str
34
+
35
+
36
+ class OptionsAnalysisResponse(BaseModel):
37
+ logs: List[AgentLog]
38
+ result: Optional[Dict[str, Any]] = None
39
+
40
+
41
+ @router.post("/options-analysis", response_model=OptionsAnalysisResponse)
42
+ async def analyze_options_strategy(
43
+ request: OptionsAnalysisRequest,
44
+ db: AsyncSession = Depends(get_db),
45
+ user: User = Depends(get_user_or_demo)
46
+ ):
47
+ """
48
+ Runs multi-agent analysis for option strategy recommendations.
49
+ Uses the local agentic_analyst module.
50
+ """
51
+ print(f"[DEBUG] POST /api/options-analysis for symbol: {request.symbol} (provider: {request.provider})")
52
+ try:
53
+ # Import required modules from local agentic_analyst
54
+ from app.core.model_factory import AutoGenModelFactory
55
+ from app.agentic_analyst.team import get_trading_team, extract_json
56
+
57
+ # Agent icon mapping
58
+ AGENT_ICONS = {
59
+ "MarketAnalyst": "📊",
60
+ "SentimentAnalyst": "📰",
61
+ "StrategyAdvisor": "🧠",
62
+ "RiskManager": "🛡️",
63
+ "System": "🤖",
64
+ "User": "🕵️‍♂️"
65
+ }
66
+
67
+ # Setup model based on provider
68
+ if request.provider == "gemini" or request.provider == "google" or request.provider == "openai":
69
+ # Revert to 2.0-flash as it was confirmed working previously
70
+ model_name = "gemini-2.0-flash"
71
+ family = "gemini"
72
+ request.provider = "google"
73
+ elif request.provider == "groq":
74
+ model_name = "llama-3.3-70b-versatile"
75
+ family = "groq"
76
+ elif request.provider == "ollama":
77
+ model_name = "llama3.2:3b"
78
+ family = "llama"
79
+ else:
80
+ # Default to Gemini
81
+ model_name = "gemini-2.0-flash"
82
+ family = "gemini"
83
+ request.provider = "google"
84
+
85
+ print(f"[DEBUG] Using provider: {request.provider}, model: {model_name}")
86
+
87
+ try:
88
+ model_client = AutoGenModelFactory.get_model(
89
+ provider=request.provider,
90
+ model_name=model_name,
91
+ temperature=0.2,
92
+ model_info={
93
+ "family": family,
94
+ "function_calling": True,
95
+ }
96
+ )
97
+ except Exception as e:
98
+ print(f"[DEBUG] Failed to initialize model client: {str(e)}")
99
+ raise HTTPException(status_code=500, detail=f"Error initializing model: {str(e)}")
100
+
101
+ # Get trading team
102
+ print("[DEBUG] Getting trading team...")
103
+ team = get_trading_team(model_client)
104
+
105
+ # Construct task
106
+ task = f"""
107
+ Perform a real-time trade analysis for {request.symbol}.
108
+ 1. MarketAnalyst: detailed technicals.
109
+ 2. SentimentAnalyst: news sentiment (Top 5 stories).
110
+ 3. StrategyAdvisor: recommend a spread with >70% confidence.
111
+ 4. RiskManager: validate. Output JSON with "final_decision" (TRADE/WAIT), "confidence", and "actionable_recommendation".
112
+ """
113
+
114
+ logs = []
115
+ final_output = {}
116
+
117
+ # Add user request log
118
+ logs.append({
119
+ "source": "User",
120
+ "content": f"Analyze {request.symbol} ({request.provider})",
121
+ "avatar": AGENT_ICONS["User"]
122
+ })
123
+
124
+ print(f"[DEBUG] Starting team stream for {request.symbol}...")
125
+ print(f"[DEBUG] Task being sent to team: {task[:100]}...")
126
+
127
+ try:
128
+ # Run team and collect messages
129
+ async for message in team.run_stream(task=task):
130
+ raw_source = getattr(message, 'source', 'System')
131
+ source = raw_source
132
+
133
+ # Normalize source
134
+ if source.lower() == 'user':
135
+ source = 'User'
136
+
137
+ # Skip echoing the task prompt
138
+ if source == 'User' and "Perform a real-time trade analysis" in getattr(message, 'content', ''):
139
+ continue
140
+
141
+ # Progress tracking for tool calls
142
+ message_type = type(message).__name__
143
+ # print(f"[DEBUG] Processing message from {source} (Type: {message_type})")
144
+
145
+ # Check for tool calls specifically to show progress
146
+ if "ToolCall" in message_type:
147
+ print(f"[DEBUG] {source} is executing tools...")
148
+
149
+ content = getattr(message, 'content', '')
150
+
151
+ # Handle non-string content (like tool calls)
152
+ if not isinstance(content, str):
153
+ if isinstance(content, list):
154
+ content = "\n".join([str(c) for c in content])
155
+ else:
156
+ content = str(content)
157
+
158
+ if not content:
159
+ continue
160
+
161
+ print(f"[DEBUG] Agent Message -> Source: {source}, Length: {len(content)}")
162
+
163
+ # Add to logs
164
+ avatar_icon = AGENT_ICONS.get(source, "🤖")
165
+ logs.append({
166
+ "source": source,
167
+ "content": content,
168
+ "avatar": avatar_icon
169
+ })
170
+
171
+ # Extract final output from RiskManager
172
+ if source == "RiskManager":
173
+ print("[DEBUG] RiskManager produced content, attempting extraction...")
174
+ parsed = extract_json(content)
175
+ if parsed:
176
+ print(f"[DEBUG] Successfully extracted final decision: {parsed.get('final_decision')}")
177
+ final_output = parsed
178
+ except Exception as stream_err:
179
+ print(f"[ERROR] Stream error during analysis: {stream_err}")
180
+ # Log the error but continue to return whatever we have
181
+ logs.append({
182
+ "source": "System",
183
+ "content": f"ERROR: Analysis interrupted. {str(stream_err)}",
184
+ "avatar": "⚠️"
185
+ })
186
+
187
+ # Prepare final output logic with fallback
188
+ if not final_output and logs:
189
+ print("[DEBUG] No structured final output found. Attempting fallback save.")
190
+ # Try to find any message that looks like a conclusion or just save as incomplete
191
+ final_output = {
192
+ "final_decision": "INCOMPLETE",
193
+ "confidence": 0.0,
194
+ "actionable_recommendation": "Analysis terminated early. Check logs for partial details.",
195
+ "entry_price": "N/A",
196
+ "risk_warning": "Abrupt termination detected."
197
+ }
198
+
199
+ if final_output:
200
+ try:
201
+ # Save to database
202
+ db_scan = UserAgenticScan(
203
+ user_id=user.id,
204
+ symbol=request.symbol,
205
+ decision=final_output.get("final_decision", "WAIT"),
206
+ confidence=float(final_output.get("confidence", 0.0)),
207
+ recommendation=final_output.get("actionable_recommendation", "Analysis complete"),
208
+ entry_price=str(final_output.get("entry_price", "")),
209
+ raw_logs=json.dumps([{"source": l["source"], "content": l["content"]} for l in logs]),
210
+ risk_warning=final_output.get("risk_warning", "")
211
+ )
212
+ db.add(db_scan)
213
+ await db.commit()
214
+ print(f"[DEBUG] Saved Agentic Scan to DB: ID {db_scan.id}")
215
+ except Exception as db_err:
216
+ print(f"[ERROR] Failed to save scan to DB: {db_err}")
217
+
218
+ print(f"[DEBUG] Analysis results ready for {request.symbol}. Logs: {len(logs)}")
219
+ return {
220
+ "logs": logs,
221
+ "result": final_output if final_output else None
222
+ }
223
+
224
+ except ImportError as e:
225
+ raise HTTPException(
226
+ status_code=500,
227
+ detail=f"Market analyst modules not available: {str(e)}"
228
+ )
229
+ except Exception as e:
230
+ raise HTTPException(
231
+ status_code=500,
232
+ detail=f"Analysis failed: {str(e)}"
233
+ )
234
+
235
+ from app.models.database import HistoricalData
236
+ from sqlalchemy import select, desc
237
+
238
+ class FeatureDataPoint(BaseModel):
239
+ date: str
240
+ close: float
241
+ volume: float
242
+ indicators: Optional[Dict[str, Any]] = None
243
+
244
+ class AgenticScanSummary(BaseModel):
245
+ id: int
246
+ decision: str
247
+ confidence: float
248
+ recommendation: str
249
+ created_at: str
250
+ risk_warning: Optional[str] = None
251
+ entry_price: Optional[str] = None
252
+
253
+ class TickerDetailResponse(BaseModel):
254
+ symbol: str
255
+ price: float
256
+ change: float
257
+ features: List[FeatureDataPoint]
258
+ latest_strategy: Optional[AgenticScanSummary] = None
259
+
260
+ @router.get("/ticker/{symbol}", response_model=TickerDetailResponse)
261
+ async def get_ticker_details(
262
+ symbol: str,
263
+ db: AsyncSession = Depends(get_db),
264
+ user: User = Depends(get_user_or_demo)
265
+ ):
266
+ # 1. Get Live Quote
267
+ raw_symbol = symbol.upper()
268
+ try:
269
+ quotes = await yahoo_finance_service.get_quotes([raw_symbol])
270
+ quote = quotes[0] if quotes else None
271
+
272
+ price_val = 0.0
273
+ change_val = 0.0
274
+ if quote:
275
+ # Handle string formatting like "+1.23", "1,234.56"
276
+ p = quote.price.replace(',', '')
277
+ if p != "N/A": price_val = float(p)
278
+
279
+ c = quote.change.replace('+', '').replace(',', '')
280
+ if c != "N/A": change_val = float(c)
281
+ except Exception:
282
+ price_val = 0.0
283
+ change_val = 0.0
284
+
285
+ # 2. Get Historical Data (All records for full trend)
286
+ # Note: We need to import HistoricalData from database models first
287
+ hist_result = await db.execute(
288
+ select(HistoricalData)
289
+ .where(HistoricalData.symbol == raw_symbol)
290
+ .order_by(desc(HistoricalData.date))
291
+ # .limit(30) removed to show all history
292
+ )
293
+ hist_rows = hist_result.scalars().all()
294
+
295
+ features = []
296
+ for row in hist_rows:
297
+ indicators = {}
298
+ if row.indicators:
299
+ try:
300
+ indicators = json.loads(row.indicators)
301
+ except:
302
+ pass
303
+
304
+ features.append({
305
+ "date": row.date.strftime("%Y-%m-%d"),
306
+ "close": row.close,
307
+ "volume": row.volume,
308
+ "indicators": indicators
309
+ })
310
+ features.reverse() # Oldest to Newest for charting
311
+
312
+ # 3. Get Latest Strategy
313
+ scan_result = await db.execute(
314
+ select(UserAgenticScan)
315
+ .where(UserAgenticScan.user_id == user.id)
316
+ .where(UserAgenticScan.symbol == raw_symbol)
317
+ .order_by(desc(UserAgenticScan.created_at))
318
+ .limit(1)
319
+ )
320
+ latest_scan = scan_result.scalar_one_or_none()
321
+
322
+ scan_summary = None
323
+ if latest_scan:
324
+ scan_summary = {
325
+ "id": latest_scan.id,
326
+ "decision": latest_scan.decision,
327
+ "confidence": latest_scan.confidence,
328
+ "recommendation": latest_scan.recommendation,
329
+ "created_at": latest_scan.created_at.isoformat(),
330
+ "risk_warning": latest_scan.risk_warning,
331
+ "entry_price": latest_scan.entry_price
332
+ }
333
+
334
+ return {
335
+ "symbol": raw_symbol,
336
+ "price": price_val,
337
+ "change": change_val,
338
+ "features": features,
339
+ "latest_strategy": scan_summary
340
+ }
backend/app/api/user.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException
2
+ from sqlalchemy.ext.asyncio import AsyncSession
3
+ from app.db.session import get_db
4
+ from app.services.user_service import user_service
5
+ from app.services.yahoo_finance import yahoo_finance_service
6
+ from pydantic import BaseModel
7
+ from typing import List, Optional
8
+ from app.core.auth import get_current_user, get_authenticated_user, get_user_or_demo
9
+ from app.models.database import User as DBUser
10
+
11
+ router = APIRouter()
12
+
13
+ class WatchlistItemSchema(BaseModel):
14
+ symbol: str
15
+ name: str
16
+ exchange: str
17
+
18
+ class WatchlistEnrichedSchema(WatchlistItemSchema):
19
+ price: str
20
+ change: str
21
+ percentage: str
22
+ isPositive: bool
23
+
24
+ class UserProfileSchema(BaseModel):
25
+ username: str
26
+ full_name: str
27
+ avatar_url: Optional[str]
28
+ theme: Optional[str] = "dark"
29
+
30
+ class ThemeUpdateSchema(BaseModel):
31
+ theme: str
32
+
33
+ class IndicatorsUpdateSchema(BaseModel):
34
+ indicators: List[str]
35
+
36
+ @router.get("/profile", response_model=UserProfileSchema)
37
+ async def get_profile(user: DBUser = Depends(get_user_or_demo)):
38
+ return {
39
+ "username": user.username or user.email.split('@')[0],
40
+ "full_name": user.full_name,
41
+ "avatar_url": user.avatar_url,
42
+ "theme": user.theme or "dark"
43
+ }
44
+
45
+ @router.put("/profile/theme")
46
+ async def update_theme(data: ThemeUpdateSchema, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_authenticated_user)):
47
+ user.theme = data.theme
48
+ await db.commit()
49
+ return {"status": "success", "theme": user.theme}
50
+
51
+ @router.get("/watchlist", response_model=List[WatchlistEnrichedSchema])
52
+ async def get_watchlist(db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
53
+ items = await user_service.get_watchlist(db, user.id)
54
+
55
+ if not items:
56
+ return []
57
+
58
+ # Fetch live data for these symbols
59
+ symbols = [item.symbol for item in items]
60
+ quotes = await yahoo_finance_service.get_quotes(symbols)
61
+ quote_map = {q.symbol: q for q in quotes}
62
+
63
+ results = []
64
+ for item in items:
65
+ quote = quote_map.get(item.symbol)
66
+ results.append({
67
+ "symbol": item.symbol,
68
+ "name": item.name,
69
+ "exchange": item.exchange,
70
+ "price": quote.price if quote else "0.00",
71
+ "change": quote.change if quote else "0.00",
72
+ "percentage": quote.percentage if quote else "0.00%",
73
+ "isPositive": quote.isPositive if quote else True
74
+ })
75
+ return results
76
+
77
+ @router.post("/watchlist")
78
+ async def add_to_watchlist(item: WatchlistItemSchema, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_authenticated_user)):
79
+ await user_service.add_to_watchlist(
80
+ db, user.id, item.symbol, item.name, item.exchange
81
+ )
82
+ return {"status": "success"}
83
+
84
+ @router.delete("/watchlist/{symbol}")
85
+ async def remove_from_watchlist(symbol: str, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_authenticated_user)):
86
+ await user_service.remove_from_watchlist(db, user.id, symbol)
87
+ return {"status": "success"}
88
+
89
+ @router.get("/indicators", response_model=List[str])
90
+ async def get_indicators(db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
91
+ return await user_service.get_indicators(db, user.id)
92
+
93
+ @router.post("/indicators")
94
+ async def update_indicators(data: IndicatorsUpdateSchema, db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_authenticated_user)):
95
+ await user_service.update_indicators(db, user.id, data.indicators)
96
+ return {"status": "success"}
97
+
98
+ from app.models.database import UserAgenticScan
99
+ from sqlalchemy import select, desc
100
+
101
+ class AgenticScanSchema(BaseModel):
102
+ id: int
103
+ symbol: str
104
+ decision: str
105
+ confidence: float
106
+ recommendation: str
107
+ created_at: str
108
+ risk_warning: Optional[str] = None
109
+ # We might want entry_price too if available
110
+ entry_price: Optional[str] = None
111
+
112
+ @router.get("/scans", response_model=List[AgenticScanSchema])
113
+ async def get_scans(db: AsyncSession = Depends(get_db), user: DBUser = Depends(get_user_or_demo)):
114
+ result = await db.execute(
115
+ select(UserAgenticScan)
116
+ .where(UserAgenticScan.user_id == user.id)
117
+ .order_by(desc(UserAgenticScan.created_at))
118
+ )
119
+ scans = result.scalars().all()
120
+
121
+ return [
122
+ AgenticScanSchema(
123
+ id=s.id,
124
+ symbol=s.symbol,
125
+ decision=s.decision,
126
+ confidence=s.confidence,
127
+ recommendation=s.recommendation,
128
+ created_at=s.created_at.isoformat(),
129
+ risk_warning=s.risk_warning,
130
+ entry_price=s.entry_price
131
+ )
132
+ for s in scans
133
+ ]
backend/app/core/__init__.py ADDED
File without changes
backend/app/core/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (143 Bytes). View file
 
backend/app/core/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (146 Bytes). View file