Spaces:
Build error
Build error
Deploy hAPI-face2 mediator 2025-10-07T05:24:34Z
Browse files- .DS_Store +0 -0
- .env.example +13 -0
- .github/workflows/sync_to_hf_space.yml +0 -18
- .gitignore +4 -2
- Dockerfile +15 -8
- README.md +93 -151
- __init__.py +0 -0
- apis/__init__.py +0 -0
- apis/chat_api.py +0 -189
- app.yaml +3 -0
- constants/__init__.py +0 -0
- constants/models.py +0 -75
- examples/__init__.py +0 -0
- examples/chat_with_openai.py +0 -25
- examples/chat_with_post.py +0 -55
- gem.md +24 -0
- messagers/__init__.py +0 -0
- messagers/message_composer.py +0 -190
- messagers/message_outputer.py +0 -65
- mocks/__init__.py +0 -0
- mocks/stream_chat_mocker.py +0 -13
- networks/__init__.py +0 -0
- networks/message_streamer.py +0 -201
- package.json +30 -0
- requirements.txt +0 -14
- src/.DS_Store +0 -0
- src/routes/bookEmpire.ts +69 -0
- src/routes/flows.ts +118 -0
- src/routes/money.ts +78 -0
- src/routes/river.ts +73 -0
- src/server.ts +83 -0
- src/supabase.ts +34 -0
- tsconfig.json +15 -0
- utils/__init__.py +0 -69
- utils/enver.py +0 -60
- utils/logger.py +0 -269
- vercel.json +0 -17
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
.env.example
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Supabase credentials
|
| 2 |
+
SUPABASE_URL=https://your-project.supabase.co
|
| 3 |
+
SUPABASE_SERVICE_ROLE_KEY=service-role-key
|
| 4 |
+
|
| 5 |
+
# Optional: override Supabase function names
|
| 6 |
+
SUPABASE_FLOW_EXECUTOR=flow-executor
|
| 7 |
+
SUPABASE_AUTOMATION_FUNCTION=automated-revenue-generator
|
| 8 |
+
SUPABASE_BOOK_ORCHESTRATOR=wealth-river-orchestrator
|
| 9 |
+
SUPABASE_MARKET_RESEARCH=market-research
|
| 10 |
+
SUPABASE_BOOK_GENERATOR=product-generation
|
| 11 |
+
|
| 12 |
+
# API security key expected in Authorization header (Bearer <key>)
|
| 13 |
+
AIS3_API_KEY=set-a-strong-shared-key
|
.github/workflows/sync_to_hf_space.yml
DELETED
|
@@ -1,18 +0,0 @@
|
|
| 1 |
-
name: Sync to Hugging Face hub
|
| 2 |
-
on:
|
| 3 |
-
push:
|
| 4 |
-
branches: [main]
|
| 5 |
-
workflow_dispatch:
|
| 6 |
-
|
| 7 |
-
jobs:
|
| 8 |
-
sync-to-hub:
|
| 9 |
-
runs-on: ubuntu-latest
|
| 10 |
-
steps:
|
| 11 |
-
- uses: actions/checkout@v3
|
| 12 |
-
with:
|
| 13 |
-
fetch-depth: 0
|
| 14 |
-
lfs: true
|
| 15 |
-
- name: Push to hub
|
| 16 |
-
env:
|
| 17 |
-
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 18 |
-
run: git push -f https://Hansimov:$HF_TOKEN@huggingface.co/spaces/Hansimov/hf-llm-api main
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
CHANGED
|
@@ -1,2 +1,4 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
node_modules
|
| 2 |
+
npm-debug.log
|
| 3 |
+
dist
|
| 4 |
+
.env
|
Dockerfile
CHANGED
|
@@ -1,8 +1,15 @@
|
|
| 1 |
-
FROM
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM node:20-alpine
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY package.json package-lock.json* ./
|
| 6 |
+
RUN npm install --omit=dev
|
| 7 |
+
|
| 8 |
+
COPY tsconfig.json ./
|
| 9 |
+
COPY src ./src
|
| 10 |
+
|
| 11 |
+
RUN npm run build
|
| 12 |
+
|
| 13 |
+
ENV NODE_ENV=production
|
| 14 |
+
EXPOSE 7860
|
| 15 |
+
CMD ["node", "dist/server.js"]
|
README.md
CHANGED
|
@@ -1,164 +1,106 @@
|
|
| 1 |
-
|
| 2 |
-
title: HF LLM API
|
| 3 |
-
emoji: ☯️
|
| 4 |
-
colorFrom: gray
|
| 5 |
-
colorTo: gray
|
| 6 |
-
sdk: docker
|
| 7 |
-
app_port: 23333
|
| 8 |
-
---
|
| 9 |
|
| 10 |
-
|
| 11 |
-
Huggingface LLM Inference API in OpenAI message format.
|
| 12 |
-
|
| 13 |
-
Project link: https://github.com/Hansimov/hf-llm-api
|
| 14 |
|
| 15 |
## Features
|
| 16 |
|
| 17 |
-
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
-
|
| 21 |
-
|
| 22 |
-
- Support both stream and no-stream response
|
| 23 |
-
- Support API Key via both HTTP auth header and env varible [#4](https://github.com/Hansimov/hf-llm-api/issues/4)
|
| 24 |
-
- Docker deployment
|
| 25 |
-
|
| 26 |
-
## Run API service
|
| 27 |
-
|
| 28 |
-
### Run in Command Line
|
| 29 |
|
| 30 |
-
|
| 31 |
|
| 32 |
-
```bash
|
| 33 |
-
# pipreqs . --force --mode no-pin
|
| 34 |
-
pip install -r requirements.txt
|
| 35 |
```
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
```
|
| 42 |
|
| 43 |
-
##
|
| 44 |
-
|
| 45 |
-
**Docker build:**
|
| 46 |
|
| 47 |
```bash
|
| 48 |
-
|
|
|
|
|
|
|
| 49 |
```
|
| 50 |
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
#
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
``
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
)
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
```
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
``
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
#
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
"model": "mixtral-8x7b",
|
| 119 |
-
"messages": [
|
| 120 |
-
{
|
| 121 |
-
"role": "user",
|
| 122 |
-
"content": "what is your model",
|
| 123 |
-
}
|
| 124 |
-
],
|
| 125 |
-
"stream": True,
|
| 126 |
-
}
|
| 127 |
-
|
| 128 |
-
with httpx.stream(
|
| 129 |
-
"POST",
|
| 130 |
-
chat_api + "/chat/completions",
|
| 131 |
-
headers=requests_headers,
|
| 132 |
-
json=requests_payload,
|
| 133 |
-
timeout=httpx.Timeout(connect=20, read=60, write=20, pool=None),
|
| 134 |
-
) as response:
|
| 135 |
-
# https://docs.aiohttp.org/en/stable/streams.html
|
| 136 |
-
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
|
| 137 |
-
response_content = ""
|
| 138 |
-
for line in response.iter_lines():
|
| 139 |
-
remove_patterns = [r"^\s*data:\s*", r"^\s*\[DONE\]\s*"]
|
| 140 |
-
for pattern in remove_patterns:
|
| 141 |
-
line = re.sub(pattern, "", line).strip()
|
| 142 |
-
|
| 143 |
-
if line:
|
| 144 |
-
try:
|
| 145 |
-
line_data = json.loads(line)
|
| 146 |
-
except Exception as e:
|
| 147 |
-
try:
|
| 148 |
-
line_data = ast.literal_eval(line)
|
| 149 |
-
except:
|
| 150 |
-
print(f"Error: {line}")
|
| 151 |
-
raise e
|
| 152 |
-
# print(f"line: {line_data}")
|
| 153 |
-
delta_data = line_data["choices"][0]["delta"]
|
| 154 |
-
finish_reason = line_data["choices"][0]["finish_reason"]
|
| 155 |
-
if "role" in delta_data:
|
| 156 |
-
role = delta_data["role"]
|
| 157 |
-
if "content" in delta_data:
|
| 158 |
-
delta_content = delta_data["content"]
|
| 159 |
-
response_content += delta_content
|
| 160 |
-
print(delta_content, end="", flush=True)
|
| 161 |
-
if finish_reason == "stop":
|
| 162 |
-
print()
|
| 163 |
-
|
| 164 |
-
```
|
|
|
|
| 1 |
+
# hAPI face² — Hugging Face Mediator API
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
This Space exposes a secure Express API that bridges Aura Venture, Supabase edge functions, and the Terragon automation stack. Host it on Hugging Face Spaces (Docker runtime) and point the front-end `ais3-bridge` edge function at the Space URL to unlock live River of Wealth + AIS3 data in the UI.
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
## Features
|
| 6 |
|
| 7 |
+
- **River catalog & analytics** – reads Supabase `river_tools`, `revenue_streams`, and metrics tables.
|
| 8 |
+
- **Automation flows** – lists and manages automation flow records via Supabase tables/functions.
|
| 9 |
+
- **Revenue telemetry** – aggregates live revenue metrics + opportunities by querying Supabase.
|
| 10 |
+
- **Book empire triggers** – proxies to Supabase functions for autonomous book pipeline and market research.
|
| 11 |
+
- **Simple API key auth** – protect every endpoint (except `/health`) with a shared `AIS3_API_KEY`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
## Directory Structure
|
| 14 |
|
|
|
|
|
|
|
|
|
|
| 15 |
```
|
| 16 |
+
hAPI_face2/
|
| 17 |
+
├─ Dockerfile
|
| 18 |
+
├─ package.json
|
| 19 |
+
├─ tsconfig.json
|
| 20 |
+
├─ .env.example
|
| 21 |
+
├─ README.md
|
| 22 |
+
└─ src/
|
| 23 |
+
├─ server.ts # Express bootstrap + middleware
|
| 24 |
+
├─ routes/
|
| 25 |
+
│ ├─ river.ts # /api/river/* endpoints
|
| 26 |
+
│ ├─ flows.ts # /api/flows/* endpoints
|
| 27 |
+
│ ├─ money.ts # /api/money/* endpoints
|
| 28 |
+
│ └─ bookEmpire.ts # /api/book-empire/* endpoints
|
| 29 |
+
└─ supabase.ts # Supabase client helpers
|
| 30 |
```
|
| 31 |
|
| 32 |
+
## Quick Start (local)
|
|
|
|
|
|
|
| 33 |
|
| 34 |
```bash
|
| 35 |
+
cp .env.example .env # fill with Supabase + API key values
|
| 36 |
+
npm install
|
| 37 |
+
npm run dev # hot reload with tsx
|
| 38 |
```
|
| 39 |
|
| 40 |
+
- Health check: `GET http://localhost:7860/health`
|
| 41 |
+
- Authenticated call: `curl -H "Authorization: Bearer $AIS3_API_KEY" http://localhost:7860/api/river/tools`
|
| 42 |
+
|
| 43 |
+
## Deploy to Hugging Face Space
|
| 44 |
+
|
| 45 |
+
1. Create a **Docker** Space (e.g., `acecalisto3/hAPI_face2`). Via CLI:
|
| 46 |
+
```bash
|
| 47 |
+
huggingface-cli repo create acecalisto3/hAPI_face2 \
|
| 48 |
+
--type space \
|
| 49 |
+
--sdk docker \
|
| 50 |
+
--private
|
| 51 |
+
```
|
| 52 |
+
2. Push this repo to the Space (or mirror via `huggingface-cli`).
|
| 53 |
+
3. Configure secrets in the Space Settings → *New secret*:
|
| 54 |
+
- `SUPABASE_URL`
|
| 55 |
+
- `SUPABASE_SERVICE_ROLE_KEY`
|
| 56 |
+
- `AIS3_API_KEY`
|
| 57 |
+
- Optional function overrides (`SUPABASE_FLOW_EXECUTOR`, ...)
|
| 58 |
+
4. The Space will build automatically; once running, copy the public URL (e.g. `https://acecalisto3-hapi-face2.hf.space`).
|
| 59 |
+
5. Update the Supabase `ais3-bridge` function env:
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
supabase secrets set AIS3_API_URL=https://acecalisto3-hapi-face2.hf.space/api
|
| 63 |
+
supabase secrets set AIS3_API_KEY=<same-key>
|
| 64 |
+
supabase functions deploy ais3-bridge
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
## Environment Variables
|
| 68 |
+
|
| 69 |
+
| Name | Description |
|
| 70 |
+
|------|-------------|
|
| 71 |
+
| `SUPABASE_URL` | Supabase project URL |
|
| 72 |
+
| `SUPABASE_SERVICE_ROLE_KEY` | Service role key used for trusted server-side access |
|
| 73 |
+
| `AIS3_API_KEY` | Shared bearer token expected from consumers |
|
| 74 |
+
| `SUPABASE_FLOW_EXECUTOR` | (optional) Supabase function that executes flows (default `flow-executor`) |
|
| 75 |
+
| `SUPABASE_AUTOMATION_FUNCTION` | (optional) Function that runs revenue automation (default `automated-revenue-generator`) |
|
| 76 |
+
| `SUPABASE_BOOK_ORCHESTRATOR` | (optional) Function that orchestrates book pipeline (default `wealth-river-orchestrator`) |
|
| 77 |
+
| `SUPABASE_MARKET_RESEARCH` | (optional) Function for book idea discovery (default `market-research`) |
|
| 78 |
+
| `SUPABASE_BOOK_GENERATOR` | (optional) Function that drafts a book manually (default `product-generation`) |
|
| 79 |
+
|
| 80 |
+
## API Overview
|
| 81 |
+
|
| 82 |
+
- `GET /health` – readiness probe (no auth)
|
| 83 |
+
- `GET /api/river/tools` – list monetized tools
|
| 84 |
+
- `GET /api/river/analytics/:toolId` – aggregate revenue + subscriber metrics for a tool
|
| 85 |
+
- `GET /api/flows` – list automation flows (supports `status`, `type` query params)
|
| 86 |
+
- `POST /api/flows/:id/execute` – execute flow via Supabase function
|
| 87 |
+
- `POST /api/flows/:id/publish` – mark flow live
|
| 88 |
+
- `POST /api/flows/:id/duplicate` – duplicate flow
|
| 89 |
+
- `POST /api/flows/:id/archive` – archive flow
|
| 90 |
+
- `GET /api/money/revenue` – revenue metrics suite
|
| 91 |
+
- `GET /api/money/opportunities` – list revenue opportunities
|
| 92 |
+
- `POST /api/money/automate` – trigger revenue automation function
|
| 93 |
+
- `POST /api/book-empire/autonomous/start` – start autonomous book pipeline
|
| 94 |
+
- `GET /api/book-empire/autonomous/status` – latest pipeline stats
|
| 95 |
+
- `POST /api/book-empire/idea/generate` – generate market-validated book idea
|
| 96 |
+
- `POST /api/book-empire/book/generate` – kick off manual book generation flow
|
| 97 |
+
|
| 98 |
+
All authenticated endpoints must receive `Authorization: Bearer <AIS3_API_KEY>`.
|
| 99 |
+
|
| 100 |
+
## Notes
|
| 101 |
+
|
| 102 |
+
- The server gracefully handles missing Supabase tables by returning empty responses instead of crashing. Adjust queries if your schema differs.
|
| 103 |
+
- Extend `src/routes/*` to add more orchestrator endpoints as your backend grows.
|
| 104 |
+
- Hugging Face Spaces provide ephemeral storage; all state should live in Supabase.
|
| 105 |
+
|
| 106 |
+
Enjoy the flow! 🚀
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__init__.py
DELETED
|
File without changes
|
apis/__init__.py
DELETED
|
File without changes
|
apis/chat_api.py
DELETED
|
@@ -1,189 +0,0 @@
|
|
| 1 |
-
import argparse
|
| 2 |
-
import markdown2
|
| 3 |
-
import os
|
| 4 |
-
import sys
|
| 5 |
-
import uvicorn
|
| 6 |
-
|
| 7 |
-
from pathlib import Path
|
| 8 |
-
from typing import Union
|
| 9 |
-
|
| 10 |
-
from fastapi import FastAPI, Depends
|
| 11 |
-
from fastapi.responses import HTMLResponse
|
| 12 |
-
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
| 13 |
-
from pydantic import BaseModel, Field
|
| 14 |
-
from sse_starlette.sse import EventSourceResponse, ServerSentEvent
|
| 15 |
-
|
| 16 |
-
from messagers.message_composer import MessageComposer
|
| 17 |
-
from mocks.stream_chat_mocker import stream_chat_mock
|
| 18 |
-
from networks.message_streamer import MessageStreamer
|
| 19 |
-
from utils.logger import logger
|
| 20 |
-
from constants.models import AVAILABLE_MODELS_DICTS
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
class ChatAPIApp:
|
| 24 |
-
def __init__(self):
|
| 25 |
-
self.app = FastAPI(
|
| 26 |
-
docs_url="/",
|
| 27 |
-
title="HuggingFace LLM API",
|
| 28 |
-
swagger_ui_parameters={"defaultModelsExpandDepth": -1},
|
| 29 |
-
version="1.0",
|
| 30 |
-
)
|
| 31 |
-
self.setup_routes()
|
| 32 |
-
|
| 33 |
-
def get_available_models(self):
|
| 34 |
-
return {"object": "list", "data": AVAILABLE_MODELS_DICTS}
|
| 35 |
-
|
| 36 |
-
def extract_api_key(
|
| 37 |
-
credentials: HTTPAuthorizationCredentials = Depends(
|
| 38 |
-
HTTPBearer(auto_error=False)
|
| 39 |
-
),
|
| 40 |
-
):
|
| 41 |
-
api_key = None
|
| 42 |
-
if credentials:
|
| 43 |
-
api_key = credentials.credentials
|
| 44 |
-
else:
|
| 45 |
-
api_key = os.getenv("HF_TOKEN")
|
| 46 |
-
|
| 47 |
-
if api_key:
|
| 48 |
-
if api_key.startswith("hf_"):
|
| 49 |
-
return api_key
|
| 50 |
-
else:
|
| 51 |
-
logger.warn(f"Invalid HF Token!")
|
| 52 |
-
else:
|
| 53 |
-
logger.warn("Not provide HF Token!")
|
| 54 |
-
return None
|
| 55 |
-
|
| 56 |
-
class ChatCompletionsPostItem(BaseModel):
|
| 57 |
-
model: str = Field(
|
| 58 |
-
default="mixtral-8x7b",
|
| 59 |
-
description="(str) `mixtral-8x7b`",
|
| 60 |
-
)
|
| 61 |
-
messages: list = Field(
|
| 62 |
-
default=[{"role": "user", "content": "Hello, who are you?"}],
|
| 63 |
-
description="(list) Messages",
|
| 64 |
-
)
|
| 65 |
-
temperature: Union[float, None] = Field(
|
| 66 |
-
default=0.5,
|
| 67 |
-
description="(float) Temperature",
|
| 68 |
-
)
|
| 69 |
-
top_p: Union[float, None] = Field(
|
| 70 |
-
default=0.95,
|
| 71 |
-
description="(float) top p",
|
| 72 |
-
)
|
| 73 |
-
max_tokens: Union[int, None] = Field(
|
| 74 |
-
default=-1,
|
| 75 |
-
description="(int) Max tokens",
|
| 76 |
-
)
|
| 77 |
-
use_cache: bool = Field(
|
| 78 |
-
default=False,
|
| 79 |
-
description="(bool) Use cache",
|
| 80 |
-
)
|
| 81 |
-
stream: bool = Field(
|
| 82 |
-
default=True,
|
| 83 |
-
description="(bool) Stream",
|
| 84 |
-
)
|
| 85 |
-
|
| 86 |
-
def chat_completions(
|
| 87 |
-
self, item: ChatCompletionsPostItem, api_key: str = Depends(extract_api_key)
|
| 88 |
-
):
|
| 89 |
-
streamer = MessageStreamer(model=item.model)
|
| 90 |
-
composer = MessageComposer(model=item.model)
|
| 91 |
-
composer.merge(messages=item.messages)
|
| 92 |
-
# streamer.chat = stream_chat_mock
|
| 93 |
-
|
| 94 |
-
stream_response = streamer.chat_response(
|
| 95 |
-
prompt=composer.merged_str,
|
| 96 |
-
temperature=item.temperature,
|
| 97 |
-
top_p=item.top_p,
|
| 98 |
-
max_new_tokens=item.max_tokens,
|
| 99 |
-
api_key=api_key,
|
| 100 |
-
use_cache=item.use_cache,
|
| 101 |
-
)
|
| 102 |
-
if item.stream:
|
| 103 |
-
event_source_response = EventSourceResponse(
|
| 104 |
-
streamer.chat_return_generator(stream_response),
|
| 105 |
-
media_type="text/event-stream",
|
| 106 |
-
ping=2000,
|
| 107 |
-
ping_message_factory=lambda: ServerSentEvent(**{"comment": ""}),
|
| 108 |
-
)
|
| 109 |
-
return event_source_response
|
| 110 |
-
else:
|
| 111 |
-
data_response = streamer.chat_return_dict(stream_response)
|
| 112 |
-
return data_response
|
| 113 |
-
|
| 114 |
-
def get_readme(self):
|
| 115 |
-
readme_path = Path(__file__).parents[1] / "README.md"
|
| 116 |
-
with open(readme_path, "r", encoding="utf-8") as rf:
|
| 117 |
-
readme_str = rf.read()
|
| 118 |
-
readme_html = markdown2.markdown(
|
| 119 |
-
readme_str, extras=["table", "fenced-code-blocks", "highlightjs-lang"]
|
| 120 |
-
)
|
| 121 |
-
return readme_html
|
| 122 |
-
|
| 123 |
-
def setup_routes(self):
|
| 124 |
-
for prefix in ["", "/v1", "/api", "/api/v1"]:
|
| 125 |
-
if prefix in ["/api/v1"]:
|
| 126 |
-
include_in_schema = True
|
| 127 |
-
else:
|
| 128 |
-
include_in_schema = False
|
| 129 |
-
|
| 130 |
-
self.app.get(
|
| 131 |
-
prefix + "/models",
|
| 132 |
-
summary="Get available models",
|
| 133 |
-
include_in_schema=include_in_schema,
|
| 134 |
-
)(self.get_available_models)
|
| 135 |
-
|
| 136 |
-
self.app.post(
|
| 137 |
-
prefix + "/chat/completions",
|
| 138 |
-
summary="Chat completions in conversation session",
|
| 139 |
-
include_in_schema=include_in_schema,
|
| 140 |
-
)(self.chat_completions)
|
| 141 |
-
self.app.get(
|
| 142 |
-
"/readme",
|
| 143 |
-
summary="README of HF LLM API",
|
| 144 |
-
response_class=HTMLResponse,
|
| 145 |
-
include_in_schema=False,
|
| 146 |
-
)(self.get_readme)
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
class ArgParser(argparse.ArgumentParser):
|
| 150 |
-
def __init__(self, *args, **kwargs):
|
| 151 |
-
super(ArgParser, self).__init__(*args, **kwargs)
|
| 152 |
-
|
| 153 |
-
self.add_argument(
|
| 154 |
-
"-s",
|
| 155 |
-
"--server",
|
| 156 |
-
type=str,
|
| 157 |
-
default="0.0.0.0",
|
| 158 |
-
help="Server IP for HF LLM Chat API",
|
| 159 |
-
)
|
| 160 |
-
self.add_argument(
|
| 161 |
-
"-p",
|
| 162 |
-
"--port",
|
| 163 |
-
type=int,
|
| 164 |
-
default=23333,
|
| 165 |
-
help="Server Port for HF LLM Chat API",
|
| 166 |
-
)
|
| 167 |
-
|
| 168 |
-
self.add_argument(
|
| 169 |
-
"-d",
|
| 170 |
-
"--dev",
|
| 171 |
-
default=False,
|
| 172 |
-
action="store_true",
|
| 173 |
-
help="Run in dev mode",
|
| 174 |
-
)
|
| 175 |
-
|
| 176 |
-
self.args = self.parse_args(sys.argv[1:])
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
app = ChatAPIApp().app
|
| 180 |
-
|
| 181 |
-
if __name__ == "__main__":
|
| 182 |
-
args = ArgParser().args
|
| 183 |
-
if args.dev:
|
| 184 |
-
uvicorn.run("__main__:app", host=args.server, port=args.port, reload=True)
|
| 185 |
-
else:
|
| 186 |
-
uvicorn.run("__main__:app", host=args.server, port=args.port, reload=False)
|
| 187 |
-
|
| 188 |
-
# python -m apis.chat_api # [Docker] on product mode
|
| 189 |
-
# python -m apis.chat_api -d # [Dev] on develop mode
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
sdk: docker
|
| 2 |
+
authors:
|
| 3 |
+
- acecalisto3
|
constants/__init__.py
DELETED
|
File without changes
|
constants/models.py
DELETED
|
@@ -1,75 +0,0 @@
|
|
| 1 |
-
MODEL_MAP = {
|
| 2 |
-
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", # [Recommended]
|
| 3 |
-
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 4 |
-
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
|
| 5 |
-
"openchat-3.5": "openchat/openchat-3.5-0106",
|
| 6 |
-
"gemma-7b": "google/gemma-7b-it",
|
| 7 |
-
"default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 8 |
-
}
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
STOP_SEQUENCES_MAP = {
|
| 12 |
-
"mixtral-8x7b": "</s>",
|
| 13 |
-
"nous-mixtral-8x7b": "<|im_end|>",
|
| 14 |
-
"mistral-7b": "</s>",
|
| 15 |
-
"openchat-3.5": "<|end_of_turn|>",
|
| 16 |
-
"gemma-7b": "<eos>",
|
| 17 |
-
}
|
| 18 |
-
|
| 19 |
-
TOKEN_LIMIT_MAP = {
|
| 20 |
-
"mixtral-8x7b": 32768,
|
| 21 |
-
"nous-mixtral-8x7b": 32768,
|
| 22 |
-
"mistral-7b": 32768,
|
| 23 |
-
"openchat-3.5": 8192,
|
| 24 |
-
"gemma-7b": 8192,
|
| 25 |
-
}
|
| 26 |
-
|
| 27 |
-
TOKEN_RESERVED = 20
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
AVAILABLE_MODELS = [
|
| 31 |
-
"mixtral-8x7b",
|
| 32 |
-
"nous-mixtral-8x7b",
|
| 33 |
-
"mistral-7b",
|
| 34 |
-
"openchat-3.5",
|
| 35 |
-
"gemma-7b",
|
| 36 |
-
]
|
| 37 |
-
|
| 38 |
-
# https://platform.openai.com/docs/api-reference/models/list
|
| 39 |
-
AVAILABLE_MODELS_DICTS = [
|
| 40 |
-
{
|
| 41 |
-
"id": "mixtral-8x7b",
|
| 42 |
-
"description": "[mistralai/Mixtral-8x7B-Instruct-v0.1]: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 43 |
-
"object": "model",
|
| 44 |
-
"created": 1700000000,
|
| 45 |
-
"owned_by": "mistralai",
|
| 46 |
-
},
|
| 47 |
-
{
|
| 48 |
-
"id": "nous-mixtral-8x7b",
|
| 49 |
-
"description": "[NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO]: https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 50 |
-
"object": "model",
|
| 51 |
-
"created": 1700000000,
|
| 52 |
-
"owned_by": "NousResearch",
|
| 53 |
-
},
|
| 54 |
-
{
|
| 55 |
-
"id": "mistral-7b",
|
| 56 |
-
"description": "[mistralai/Mistral-7B-Instruct-v0.2]: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
|
| 57 |
-
"object": "model",
|
| 58 |
-
"created": 1700000000,
|
| 59 |
-
"owned_by": "mistralai",
|
| 60 |
-
},
|
| 61 |
-
{
|
| 62 |
-
"id": "openchat-3.5",
|
| 63 |
-
"description": "[openchat/openchat-3.5-0106]: https://huggingface.co/openchat/openchat-3.5-0106",
|
| 64 |
-
"object": "model",
|
| 65 |
-
"created": 1700000000,
|
| 66 |
-
"owned_by": "openchat",
|
| 67 |
-
},
|
| 68 |
-
{
|
| 69 |
-
"id": "gemma-7b",
|
| 70 |
-
"description": "[google/gemma-7b-it]: https://huggingface.co/google/gemma-7b-it",
|
| 71 |
-
"object": "model",
|
| 72 |
-
"created": 1700000000,
|
| 73 |
-
"owned_by": "Google",
|
| 74 |
-
},
|
| 75 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/__init__.py
DELETED
|
File without changes
|
examples/chat_with_openai.py
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
from openai import OpenAI
|
| 2 |
-
|
| 3 |
-
# If runnning this service with proxy, you might need to unset `http(s)_proxy`.
|
| 4 |
-
base_url = "http://127.0.0.1:23333"
|
| 5 |
-
api_key = "sk-xxxxx"
|
| 6 |
-
|
| 7 |
-
client = OpenAI(base_url=base_url, api_key=api_key)
|
| 8 |
-
response = client.chat.completions.create(
|
| 9 |
-
model="mixtral-8x7b",
|
| 10 |
-
messages=[
|
| 11 |
-
{
|
| 12 |
-
"role": "user",
|
| 13 |
-
"content": "what is your model",
|
| 14 |
-
}
|
| 15 |
-
],
|
| 16 |
-
stream=True,
|
| 17 |
-
)
|
| 18 |
-
|
| 19 |
-
for chunk in response:
|
| 20 |
-
if chunk.choices[0].delta.content is not None:
|
| 21 |
-
print(chunk.choices[0].delta.content, end="", flush=True)
|
| 22 |
-
elif chunk.choices[0].finish_reason == "stop":
|
| 23 |
-
print()
|
| 24 |
-
else:
|
| 25 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/chat_with_post.py
DELETED
|
@@ -1,55 +0,0 @@
|
|
| 1 |
-
import ast
|
| 2 |
-
import httpx
|
| 3 |
-
import json
|
| 4 |
-
import re
|
| 5 |
-
|
| 6 |
-
# If runnning this service with proxy, you might need to unset `http(s)_proxy`.
|
| 7 |
-
chat_api = "http://127.0.0.1:23333"
|
| 8 |
-
api_key = "sk-xxxxx"
|
| 9 |
-
requests_headers = {}
|
| 10 |
-
requests_payload = {
|
| 11 |
-
"model": "mixtral-8x7b",
|
| 12 |
-
"messages": [
|
| 13 |
-
{
|
| 14 |
-
"role": "user",
|
| 15 |
-
"content": "what is your model",
|
| 16 |
-
}
|
| 17 |
-
],
|
| 18 |
-
"stream": True,
|
| 19 |
-
}
|
| 20 |
-
|
| 21 |
-
with httpx.stream(
|
| 22 |
-
"POST",
|
| 23 |
-
chat_api + "/chat/completions",
|
| 24 |
-
headers=requests_headers,
|
| 25 |
-
json=requests_payload,
|
| 26 |
-
timeout=httpx.Timeout(connect=20, read=60, write=20, pool=None),
|
| 27 |
-
) as response:
|
| 28 |
-
# https://docs.aiohttp.org/en/stable/streams.html
|
| 29 |
-
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
|
| 30 |
-
response_content = ""
|
| 31 |
-
for line in response.iter_lines():
|
| 32 |
-
remove_patterns = [r"^\s*data:\s*", r"^\s*\[DONE\]\s*"]
|
| 33 |
-
for pattern in remove_patterns:
|
| 34 |
-
line = re.sub(pattern, "", line).strip()
|
| 35 |
-
|
| 36 |
-
if line:
|
| 37 |
-
try:
|
| 38 |
-
line_data = json.loads(line)
|
| 39 |
-
except Exception as e:
|
| 40 |
-
try:
|
| 41 |
-
line_data = ast.literal_eval(line)
|
| 42 |
-
except:
|
| 43 |
-
print(f"Error: {line}")
|
| 44 |
-
raise e
|
| 45 |
-
# print(f"line: {line_data}")
|
| 46 |
-
delta_data = line_data["choices"][0]["delta"]
|
| 47 |
-
finish_reason = line_data["choices"][0]["finish_reason"]
|
| 48 |
-
if "role" in delta_data:
|
| 49 |
-
role = delta_data["role"]
|
| 50 |
-
if "content" in delta_data:
|
| 51 |
-
delta_content = delta_data["content"]
|
| 52 |
-
response_content += delta_content
|
| 53 |
-
print(delta_content, end="", flush=True)
|
| 54 |
-
if finish_reason == "stop":
|
| 55 |
-
print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gem.md
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# gem.md: State & Quality Charter for ./hAPI_face2
|
| 2 |
+
|
| 3 |
+
## 1. Directory Scope & Purpose
|
| 4 |
+
Docker-friendly Express mediator API deployed to Hugging Face Spaces. Proxies Aura Venture requests into Supabase functions/tables and enforces server-side auth.
|
| 5 |
+
|
| 6 |
+
## 2. Architectural & Quality Mandate
|
| 7 |
+
- Keep the codebase self-contained; no relative imports from parent repo.
|
| 8 |
+
- All outbound data access routes through Supabase service client with least privilege secrets held in env.
|
| 9 |
+
- Every route except `/health` must verify `AIS3_API_KEY` bearer token.
|
| 10 |
+
- Return JSON envelopes (`{ success, ... }`) and never crash on missing tables—fallback to safe defaults.
|
| 11 |
+
- Maintain TypeScript source compiled to JS for production.
|
| 12 |
+
|
| 13 |
+
## 3. Content Manifest
|
| 14 |
+
- Dockerfile — container build for Hugging Face.
|
| 15 |
+
- app.yaml — Space metadata.
|
| 16 |
+
- package.json / tsconfig.json — Node + TypeScript config.
|
| 17 |
+
- src/server.ts — Express bootstrap and middleware.
|
| 18 |
+
- src/routes/*.ts — Modular route handlers for river, flows, money, book empire.
|
| 19 |
+
- src/supabase.ts — Supabase admin client helper.
|
| 20 |
+
- .env.example — Reference for required secrets.
|
| 21 |
+
|
| 22 |
+
## 4. State Change Log (Newest First)
|
| 23 |
+
- 2025-10-07 | Documented Docker CLI setup | README.md | Added explicit huggingface-cli example for Docker SDK Spaces.
|
| 24 |
+
- 2025-10-07 | Initial mediator scaffolding | * | Created Express/Supabase bridge tailored for Hugging Face Spaces.
|
messagers/__init__.py
DELETED
|
File without changes
|
messagers/message_composer.py
DELETED
|
@@ -1,190 +0,0 @@
|
|
| 1 |
-
import re
|
| 2 |
-
from pprint import pprint
|
| 3 |
-
|
| 4 |
-
from transformers import AutoTokenizer
|
| 5 |
-
|
| 6 |
-
from constants.models import AVAILABLE_MODELS, MODEL_MAP
|
| 7 |
-
from utils.logger import logger
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
class MessageComposer:
|
| 11 |
-
def __init__(self, model: str = None):
|
| 12 |
-
if model in AVAILABLE_MODELS:
|
| 13 |
-
self.model = model
|
| 14 |
-
else:
|
| 15 |
-
self.model = "mixtral-8x7b"
|
| 16 |
-
self.model_fullname = MODEL_MAP[self.model]
|
| 17 |
-
self.system_roles = ["system"]
|
| 18 |
-
self.inst_roles = ["user", "system", "inst"]
|
| 19 |
-
self.answer_roles = ["assistant", "bot", "answer", "model"]
|
| 20 |
-
self.default_role = "user"
|
| 21 |
-
|
| 22 |
-
def concat_messages_by_role(self, messages):
|
| 23 |
-
def is_same_role(role1, role2):
|
| 24 |
-
if (
|
| 25 |
-
(role1 == role2)
|
| 26 |
-
or (role1 in self.inst_roles and role2 in self.inst_roles)
|
| 27 |
-
or (role1 in self.answer_roles and role2 in self.answer_roles)
|
| 28 |
-
):
|
| 29 |
-
return True
|
| 30 |
-
else:
|
| 31 |
-
return False
|
| 32 |
-
|
| 33 |
-
concat_messages = []
|
| 34 |
-
for message in messages:
|
| 35 |
-
role = message["role"]
|
| 36 |
-
content = message["content"]
|
| 37 |
-
if concat_messages and is_same_role(role, concat_messages[-1]["role"]):
|
| 38 |
-
concat_messages[-1]["content"] += "\n" + content
|
| 39 |
-
else:
|
| 40 |
-
if role in self.inst_roles:
|
| 41 |
-
message["role"] = "inst"
|
| 42 |
-
elif role in self.answer_roles:
|
| 43 |
-
message["role"] = "answer"
|
| 44 |
-
else:
|
| 45 |
-
message["role"] = "inst"
|
| 46 |
-
concat_messages.append(message)
|
| 47 |
-
return concat_messages
|
| 48 |
-
|
| 49 |
-
def merge(self, messages) -> str:
|
| 50 |
-
# Templates for Chat Models
|
| 51 |
-
# - https://huggingface.co/docs/transformers/main/en/chat_templating
|
| 52 |
-
# - https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1#instruction-format
|
| 53 |
-
# - https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
| 54 |
-
# - https://huggingface.co/openchat/openchat-3.5-0106
|
| 55 |
-
# - https://huggingface.co/google/gemma-7b-it#chat-template
|
| 56 |
-
|
| 57 |
-
# Mistral and Mixtral:
|
| 58 |
-
# <s> [INST] Instruction [/INST] Model answer </s> [INST] Follow-up instruction [/INST]
|
| 59 |
-
|
| 60 |
-
# Nous Mixtral:
|
| 61 |
-
# <|im_start|>system
|
| 62 |
-
# You are "Hermes 2".<|im_end|>
|
| 63 |
-
# <|im_start|>user
|
| 64 |
-
# Hello, who are you?<|im_end|>
|
| 65 |
-
# <|im_start|>assistant
|
| 66 |
-
|
| 67 |
-
# OpenChat:
|
| 68 |
-
# GPT4 Correct User: Hello<|end_of_turn|>GPT4 Correct Assistant: Hi<|end_of_turn|>GPT4 Correct User: How are you today?<|end_of_turn|>GPT4 Correct Assistant:
|
| 69 |
-
|
| 70 |
-
# Google Gemma-it
|
| 71 |
-
# <start_of_turn>user
|
| 72 |
-
# How does the brain work?<end_of_turn>
|
| 73 |
-
# <start_of_turn>model
|
| 74 |
-
|
| 75 |
-
self.messages = messages
|
| 76 |
-
self.merged_str = ""
|
| 77 |
-
|
| 78 |
-
# https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1#instruction-format
|
| 79 |
-
if self.model in ["mixtral-8x7b", "mistral-7b"]:
|
| 80 |
-
self.messages = self.concat_messages_by_role(messages)
|
| 81 |
-
self.cached_str = ""
|
| 82 |
-
for message in self.messages:
|
| 83 |
-
role = message["role"]
|
| 84 |
-
content = message["content"]
|
| 85 |
-
if role in self.inst_roles:
|
| 86 |
-
self.cached_str = f"[INST] {content} [/INST]"
|
| 87 |
-
elif role in self.answer_roles:
|
| 88 |
-
self.merged_str += f"<s> {self.cached_str} {content} </s>\n"
|
| 89 |
-
self.cached_str = ""
|
| 90 |
-
else:
|
| 91 |
-
self.cached_str = f"[INST] {content} [/INST]"
|
| 92 |
-
if self.cached_str:
|
| 93 |
-
self.merged_str += f"{self.cached_str}"
|
| 94 |
-
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
| 95 |
-
elif self.model in ["nous-mixtral-8x7b"]:
|
| 96 |
-
self.merged_str_list = []
|
| 97 |
-
for message in self.messages:
|
| 98 |
-
role = message["role"]
|
| 99 |
-
content = message["content"]
|
| 100 |
-
if role not in ["system", "user", "assistant"]:
|
| 101 |
-
role = self.default_role
|
| 102 |
-
message_line = f"<|im_start|>{role}\n{content}<|im_end|>"
|
| 103 |
-
self.merged_str_list.append(message_line)
|
| 104 |
-
self.merged_str_list.append("<|im_start|>assistant")
|
| 105 |
-
self.merged_str = "\n".join(self.merged_str_list)
|
| 106 |
-
# https://huggingface.co/openchat/openchat-3.5-0106
|
| 107 |
-
elif self.model in ["openchat-3.5"]:
|
| 108 |
-
self.messages = self.concat_messages_by_role(messages)
|
| 109 |
-
self.merged_str_list = []
|
| 110 |
-
self.end_of_turn = "<|end_of_turn|>"
|
| 111 |
-
for message in self.messages:
|
| 112 |
-
role = message["role"]
|
| 113 |
-
content = message["content"]
|
| 114 |
-
if role in self.inst_roles:
|
| 115 |
-
self.merged_str_list.append(
|
| 116 |
-
f"GPT4 Correct User:\n{content}{self.end_of_turn}"
|
| 117 |
-
)
|
| 118 |
-
elif role in self.answer_roles:
|
| 119 |
-
self.merged_str_list.append(
|
| 120 |
-
f"GPT4 Correct Assistant:\n{content}{self.end_of_turn}"
|
| 121 |
-
)
|
| 122 |
-
else:
|
| 123 |
-
self.merged_str_list.append(
|
| 124 |
-
f"GPT4 Correct User: {content}{self.end_of_turn}"
|
| 125 |
-
)
|
| 126 |
-
self.merged_str_list.append(f"GPT4 Correct Assistant:\n")
|
| 127 |
-
self.merged_str = "\n".join(self.merged_str_list)
|
| 128 |
-
# https://huggingface.co/google/gemma-7b-it#chat-template
|
| 129 |
-
elif self.model in ["gemma-7b"]:
|
| 130 |
-
self.messages = self.concat_messages_by_role(messages)
|
| 131 |
-
self.merged_str_list = []
|
| 132 |
-
self.end_of_turn = "<end_of_turn>"
|
| 133 |
-
self.start_of_turn = "<start_of_turn>"
|
| 134 |
-
for message in self.messages:
|
| 135 |
-
role = message["role"]
|
| 136 |
-
content = message["content"]
|
| 137 |
-
if role in self.inst_roles:
|
| 138 |
-
self.merged_str_list.append(
|
| 139 |
-
f"{self.start_of_turn}user\n{content}{self.end_of_turn}"
|
| 140 |
-
)
|
| 141 |
-
elif role in self.answer_roles:
|
| 142 |
-
self.merged_str_list.append(
|
| 143 |
-
f"{self.start_of_turn}model\n{content}{self.end_of_turn}"
|
| 144 |
-
)
|
| 145 |
-
else:
|
| 146 |
-
self.merged_str_list.append(
|
| 147 |
-
f"{self.start_of_turn}user\n{content}{self.end_of_turn}"
|
| 148 |
-
)
|
| 149 |
-
self.merged_str_list.append(f"{self.start_of_turn}model\n")
|
| 150 |
-
self.merged_str = "\n".join(self.merged_str_list)
|
| 151 |
-
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
| 152 |
-
# https://huggingface.co/openchat/openchat-3.5-0106
|
| 153 |
-
# elif self.model in ["openchat-3.5", "nous-mixtral-8x7b"]:
|
| 154 |
-
else:
|
| 155 |
-
tokenizer = AutoTokenizer.from_pretrained("openchat/openchat-3.5-0106")
|
| 156 |
-
self.merged_str = tokenizer.apply_chat_template(
|
| 157 |
-
messages, tokenize=False, add_generation_prompt=True
|
| 158 |
-
)
|
| 159 |
-
|
| 160 |
-
return self.merged_str
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
if __name__ == "__main__":
|
| 164 |
-
# model = "mixtral-8x7b"
|
| 165 |
-
# model = "nous-mixtral-8x7b"
|
| 166 |
-
# model = "gemma-7b"
|
| 167 |
-
model = "openchat-3.5"
|
| 168 |
-
composer = MessageComposer(model)
|
| 169 |
-
messages = [
|
| 170 |
-
{
|
| 171 |
-
"role": "system",
|
| 172 |
-
"content": "You are a LLM developed by OpenAI.\nYour name is GPT-4.",
|
| 173 |
-
},
|
| 174 |
-
{"role": "user", "content": "Hello, who are you?"},
|
| 175 |
-
{"role": "assistant", "content": "I am a bot."},
|
| 176 |
-
{"role": "user", "content": "What is your name?"},
|
| 177 |
-
# {"role": "assistant", "content": "My name is Bing."},
|
| 178 |
-
# {"role": "user", "content": "Tell me a joke."},
|
| 179 |
-
# {"role": "assistant", "content": "What is a robot's favorite type of music?"},
|
| 180 |
-
# {
|
| 181 |
-
# "role": "user",
|
| 182 |
-
# "content": "How many questions have I asked? Please list them.",
|
| 183 |
-
# },
|
| 184 |
-
]
|
| 185 |
-
logger.note(f"model: {composer.model}")
|
| 186 |
-
merged_str = composer.merge(messages)
|
| 187 |
-
logger.note("merged_str:")
|
| 188 |
-
logger.mesg(merged_str)
|
| 189 |
-
|
| 190 |
-
# python -m messagers.message_composer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
messagers/message_outputer.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
class OpenaiStreamOutputer:
|
| 5 |
-
"""
|
| 6 |
-
Create chat completion - OpenAI API Documentation
|
| 7 |
-
* https://platform.openai.com/docs/api-reference/chat/create
|
| 8 |
-
"""
|
| 9 |
-
|
| 10 |
-
def __init__(self):
|
| 11 |
-
self.default_data = {
|
| 12 |
-
"created": 1700000000,
|
| 13 |
-
"id": "chatcmpl-hugginface",
|
| 14 |
-
"object": "chat.completion.chunk",
|
| 15 |
-
# "content_type": "Completions",
|
| 16 |
-
"model": "hugginface",
|
| 17 |
-
"choices": [],
|
| 18 |
-
}
|
| 19 |
-
|
| 20 |
-
def data_to_string(self, data={}, content_type=""):
|
| 21 |
-
data_str = f"{json.dumps(data)}"
|
| 22 |
-
return data_str
|
| 23 |
-
|
| 24 |
-
def output(self, content=None, content_type="Completions") -> str:
|
| 25 |
-
data = self.default_data.copy()
|
| 26 |
-
if content_type == "Role":
|
| 27 |
-
data["choices"] = [
|
| 28 |
-
{
|
| 29 |
-
"index": 0,
|
| 30 |
-
"delta": {"role": "assistant"},
|
| 31 |
-
"finish_reason": None,
|
| 32 |
-
}
|
| 33 |
-
]
|
| 34 |
-
elif content_type in [
|
| 35 |
-
"Completions",
|
| 36 |
-
"InternalSearchQuery",
|
| 37 |
-
"InternalSearchResult",
|
| 38 |
-
"SuggestedResponses",
|
| 39 |
-
]:
|
| 40 |
-
if content_type in ["InternalSearchQuery", "InternalSearchResult"]:
|
| 41 |
-
content += "\n"
|
| 42 |
-
data["choices"] = [
|
| 43 |
-
{
|
| 44 |
-
"index": 0,
|
| 45 |
-
"delta": {"content": content},
|
| 46 |
-
"finish_reason": None,
|
| 47 |
-
}
|
| 48 |
-
]
|
| 49 |
-
elif content_type == "Finished":
|
| 50 |
-
data["choices"] = [
|
| 51 |
-
{
|
| 52 |
-
"index": 0,
|
| 53 |
-
"delta": {},
|
| 54 |
-
"finish_reason": "stop",
|
| 55 |
-
}
|
| 56 |
-
]
|
| 57 |
-
else:
|
| 58 |
-
data["choices"] = [
|
| 59 |
-
{
|
| 60 |
-
"index": 0,
|
| 61 |
-
"delta": {},
|
| 62 |
-
"finish_reason": None,
|
| 63 |
-
}
|
| 64 |
-
]
|
| 65 |
-
return self.data_to_string(data, content_type)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mocks/__init__.py
DELETED
|
File without changes
|
mocks/stream_chat_mocker.py
DELETED
|
@@ -1,13 +0,0 @@
|
|
| 1 |
-
import time
|
| 2 |
-
from utils.logger import logger
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
def stream_chat_mock(*args, **kwargs):
|
| 6 |
-
logger.note(msg=str(args) + str(kwargs))
|
| 7 |
-
for i in range(10):
|
| 8 |
-
content = f"W{i+1} "
|
| 9 |
-
time.sleep(0.1)
|
| 10 |
-
logger.mesg(content, end="")
|
| 11 |
-
yield content
|
| 12 |
-
logger.mesg("")
|
| 13 |
-
yield ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
networks/__init__.py
DELETED
|
File without changes
|
networks/message_streamer.py
DELETED
|
@@ -1,201 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import re
|
| 3 |
-
import requests
|
| 4 |
-
|
| 5 |
-
from tiktoken import get_encoding as tiktoken_get_encoding
|
| 6 |
-
from transformers import AutoTokenizer
|
| 7 |
-
|
| 8 |
-
from constants.models import (
|
| 9 |
-
MODEL_MAP,
|
| 10 |
-
STOP_SEQUENCES_MAP,
|
| 11 |
-
TOKEN_LIMIT_MAP,
|
| 12 |
-
TOKEN_RESERVED,
|
| 13 |
-
)
|
| 14 |
-
from messagers.message_outputer import OpenaiStreamOutputer
|
| 15 |
-
from utils.logger import logger
|
| 16 |
-
from utils.enver import enver
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
class MessageStreamer:
|
| 20 |
-
|
| 21 |
-
def __init__(self, model: str):
|
| 22 |
-
if model in MODEL_MAP.keys():
|
| 23 |
-
self.model = model
|
| 24 |
-
else:
|
| 25 |
-
self.model = "default"
|
| 26 |
-
self.model_fullname = MODEL_MAP[self.model]
|
| 27 |
-
self.message_outputer = OpenaiStreamOutputer()
|
| 28 |
-
|
| 29 |
-
if self.model == "gemma-7b":
|
| 30 |
-
# this is not wrong, as repo `google/gemma-7b-it` is gated and must authenticate to access it
|
| 31 |
-
# so I use mistral-7b as a fallback
|
| 32 |
-
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_MAP["mistral-7b"])
|
| 33 |
-
else:
|
| 34 |
-
self.tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
|
| 35 |
-
|
| 36 |
-
def parse_line(self, line):
|
| 37 |
-
line = line.decode("utf-8")
|
| 38 |
-
line = re.sub(r"data:\s*", "", line)
|
| 39 |
-
data = json.loads(line)
|
| 40 |
-
try:
|
| 41 |
-
content = data["token"]["text"]
|
| 42 |
-
except:
|
| 43 |
-
logger.err(data)
|
| 44 |
-
return content
|
| 45 |
-
|
| 46 |
-
def count_tokens(self, text):
|
| 47 |
-
tokens = self.tokenizer.encode(text)
|
| 48 |
-
token_count = len(tokens)
|
| 49 |
-
logger.note(f"Prompt Token Count: {token_count}")
|
| 50 |
-
return token_count
|
| 51 |
-
|
| 52 |
-
def chat_response(
|
| 53 |
-
self,
|
| 54 |
-
prompt: str = None,
|
| 55 |
-
temperature: float = 0.5,
|
| 56 |
-
top_p: float = 0.95,
|
| 57 |
-
max_new_tokens: int = None,
|
| 58 |
-
api_key: str = None,
|
| 59 |
-
use_cache: bool = False,
|
| 60 |
-
):
|
| 61 |
-
# https://huggingface.co/docs/api-inference/detailed_parameters?code=curl
|
| 62 |
-
# curl --proxy http://<server>:<port> https://api-inference.huggingface.co/models/<org>/<model_name> -X POST -d '{"inputs":"who are you?","parameters":{"max_new_token":64}}' -H 'Content-Type: application/json' -H 'Authorization: Bearer <HF_TOKEN>'
|
| 63 |
-
self.request_url = (
|
| 64 |
-
f"https://api-inference.huggingface.co/models/{self.model_fullname}"
|
| 65 |
-
)
|
| 66 |
-
self.request_headers = {
|
| 67 |
-
"Content-Type": "application/json",
|
| 68 |
-
}
|
| 69 |
-
|
| 70 |
-
if api_key:
|
| 71 |
-
logger.note(
|
| 72 |
-
f"Using API Key: {api_key[:3]}{(len(api_key)-7)*'*'}{api_key[-4:]}"
|
| 73 |
-
)
|
| 74 |
-
self.request_headers["Authorization"] = f"Bearer {api_key}"
|
| 75 |
-
|
| 76 |
-
if temperature is None or temperature < 0:
|
| 77 |
-
temperature = 0.0
|
| 78 |
-
# temperature must 0 < and < 1 for HF LLM models
|
| 79 |
-
temperature = max(temperature, 0.01)
|
| 80 |
-
temperature = min(temperature, 0.99)
|
| 81 |
-
top_p = max(top_p, 0.01)
|
| 82 |
-
top_p = min(top_p, 0.99)
|
| 83 |
-
|
| 84 |
-
token_limit = int(
|
| 85 |
-
TOKEN_LIMIT_MAP[self.model] - TOKEN_RESERVED - self.count_tokens(prompt)
|
| 86 |
-
)
|
| 87 |
-
if token_limit <= 0:
|
| 88 |
-
raise ValueError("Prompt exceeded token limit!")
|
| 89 |
-
|
| 90 |
-
if max_new_tokens is None or max_new_tokens <= 0:
|
| 91 |
-
max_new_tokens = token_limit
|
| 92 |
-
else:
|
| 93 |
-
max_new_tokens = min(max_new_tokens, token_limit)
|
| 94 |
-
|
| 95 |
-
# References:
|
| 96 |
-
# huggingface_hub/inference/_client.py:
|
| 97 |
-
# class InferenceClient > def text_generation()
|
| 98 |
-
# huggingface_hub/inference/_text_generation.py:
|
| 99 |
-
# class TextGenerationRequest > param `stream`
|
| 100 |
-
# https://huggingface.co/docs/text-generation-inference/conceptual/streaming#streaming-with-curl
|
| 101 |
-
# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task
|
| 102 |
-
self.request_body = {
|
| 103 |
-
"inputs": prompt,
|
| 104 |
-
"parameters": {
|
| 105 |
-
"temperature": temperature,
|
| 106 |
-
"top_p": top_p,
|
| 107 |
-
"max_new_tokens": max_new_tokens,
|
| 108 |
-
"return_full_text": False,
|
| 109 |
-
},
|
| 110 |
-
"options": {
|
| 111 |
-
"use_cache": use_cache,
|
| 112 |
-
},
|
| 113 |
-
"stream": True,
|
| 114 |
-
}
|
| 115 |
-
|
| 116 |
-
if self.model in STOP_SEQUENCES_MAP.keys():
|
| 117 |
-
self.stop_sequences = STOP_SEQUENCES_MAP[self.model]
|
| 118 |
-
# self.request_body["parameters"]["stop_sequences"] = [
|
| 119 |
-
# self.STOP_SEQUENCES[self.model]
|
| 120 |
-
# ]
|
| 121 |
-
|
| 122 |
-
logger.back(self.request_url)
|
| 123 |
-
enver.set_envs(proxies=True)
|
| 124 |
-
stream_response = requests.post(
|
| 125 |
-
self.request_url,
|
| 126 |
-
headers=self.request_headers,
|
| 127 |
-
json=self.request_body,
|
| 128 |
-
proxies=enver.requests_proxies,
|
| 129 |
-
stream=True,
|
| 130 |
-
)
|
| 131 |
-
status_code = stream_response.status_code
|
| 132 |
-
if status_code == 200:
|
| 133 |
-
logger.success(status_code)
|
| 134 |
-
else:
|
| 135 |
-
logger.err(status_code)
|
| 136 |
-
|
| 137 |
-
return stream_response
|
| 138 |
-
|
| 139 |
-
def chat_return_dict(self, stream_response):
|
| 140 |
-
# https://platform.openai.com/docs/guides/text-generation/chat-completions-response-format
|
| 141 |
-
final_output = self.message_outputer.default_data.copy()
|
| 142 |
-
final_output["choices"] = [
|
| 143 |
-
{
|
| 144 |
-
"index": 0,
|
| 145 |
-
"finish_reason": "stop",
|
| 146 |
-
"message": {
|
| 147 |
-
"role": "assistant",
|
| 148 |
-
"content": "",
|
| 149 |
-
},
|
| 150 |
-
}
|
| 151 |
-
]
|
| 152 |
-
logger.back(final_output)
|
| 153 |
-
|
| 154 |
-
final_content = ""
|
| 155 |
-
for line in stream_response.iter_lines():
|
| 156 |
-
if not line:
|
| 157 |
-
continue
|
| 158 |
-
content = self.parse_line(line)
|
| 159 |
-
|
| 160 |
-
if content.strip() == self.stop_sequences:
|
| 161 |
-
logger.success("\n[Finished]")
|
| 162 |
-
break
|
| 163 |
-
else:
|
| 164 |
-
logger.back(content, end="")
|
| 165 |
-
final_content += content
|
| 166 |
-
|
| 167 |
-
if self.model in STOP_SEQUENCES_MAP.keys():
|
| 168 |
-
final_content = final_content.replace(self.stop_sequences, "")
|
| 169 |
-
|
| 170 |
-
final_content = final_content.strip()
|
| 171 |
-
final_output["choices"][0]["message"]["content"] = final_content
|
| 172 |
-
return final_output
|
| 173 |
-
|
| 174 |
-
def chat_return_generator(self, stream_response):
|
| 175 |
-
is_finished = False
|
| 176 |
-
line_count = 0
|
| 177 |
-
for line in stream_response.iter_lines():
|
| 178 |
-
if line:
|
| 179 |
-
line_count += 1
|
| 180 |
-
else:
|
| 181 |
-
continue
|
| 182 |
-
|
| 183 |
-
content = self.parse_line(line)
|
| 184 |
-
|
| 185 |
-
if content.strip() == self.stop_sequences:
|
| 186 |
-
content_type = "Finished"
|
| 187 |
-
logger.success("\n[Finished]")
|
| 188 |
-
is_finished = True
|
| 189 |
-
else:
|
| 190 |
-
content_type = "Completions"
|
| 191 |
-
if line_count == 1:
|
| 192 |
-
content = content.lstrip()
|
| 193 |
-
logger.back(content, end="")
|
| 194 |
-
|
| 195 |
-
output = self.message_outputer.output(
|
| 196 |
-
content=content, content_type=content_type
|
| 197 |
-
)
|
| 198 |
-
yield output
|
| 199 |
-
|
| 200 |
-
if not is_finished:
|
| 201 |
-
yield self.message_outputer.output(content="", content_type="Finished")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
package.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "hapi-face2",
|
| 3 |
+
"version": "1.0.0",
|
| 4 |
+
"type": "module",
|
| 5 |
+
"scripts": {
|
| 6 |
+
"dev": "tsx watch src/server.ts",
|
| 7 |
+
"build": "tsc --project tsconfig.json",
|
| 8 |
+
"start": "node dist/server.js"
|
| 9 |
+
},
|
| 10 |
+
"dependencies": {
|
| 11 |
+
"@supabase/supabase-js": "^2.45.0",
|
| 12 |
+
"compression": "^1.7.4",
|
| 13 |
+
"cors": "^2.8.5",
|
| 14 |
+
"express": "^4.19.2",
|
| 15 |
+
"helmet": "^7.0.0",
|
| 16 |
+
"morgan": "^1.10.0",
|
| 17 |
+
"zod": "^3.23.8"
|
| 18 |
+
},
|
| 19 |
+
"devDependencies": {
|
| 20 |
+
"@types/compression": "^1.7.5",
|
| 21 |
+
"@types/cors": "^2.8.17",
|
| 22 |
+
"@types/express": "^4.17.21",
|
| 23 |
+
"@types/morgan": "^1.9.7",
|
| 24 |
+
"@types/node": "^20.12.12",
|
| 25 |
+
"ts-node": "^10.9.2",
|
| 26 |
+
"tslib": "^2.6.3",
|
| 27 |
+
"tsx": "^4.7.1",
|
| 28 |
+
"typescript": "^5.4.5"
|
| 29 |
+
}
|
| 30 |
+
}
|
requirements.txt
DELETED
|
@@ -1,14 +0,0 @@
|
|
| 1 |
-
aiohttp
|
| 2 |
-
fastapi
|
| 3 |
-
httpx
|
| 4 |
-
jinja2
|
| 5 |
-
markdown2[all]
|
| 6 |
-
openai
|
| 7 |
-
pydantic
|
| 8 |
-
requests
|
| 9 |
-
sse_starlette
|
| 10 |
-
termcolor
|
| 11 |
-
tiktoken
|
| 12 |
-
transformers
|
| 13 |
-
uvicorn
|
| 14 |
-
websockets
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
src/routes/bookEmpire.ts
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { Router } from 'express';
|
| 2 |
+
import { supabase, tableMissing } from '../supabase.js';
|
| 3 |
+
|
| 4 |
+
const ORCHESTRATOR_FUNCTION = process.env.SUPABASE_BOOK_ORCHESTRATOR || 'wealth-river-orchestrator';
|
| 5 |
+
const MARKET_RESEARCH_FUNCTION = process.env.SUPABASE_MARKET_RESEARCH || 'market-research';
|
| 6 |
+
const BOOK_GENERATOR_FUNCTION = process.env.SUPABASE_BOOK_GENERATOR || 'product-generation';
|
| 7 |
+
|
| 8 |
+
export function registerBookEmpireRoutes(router: Router) {
|
| 9 |
+
router.post('/book-empire/autonomous/start', async (req, res, next) => {
|
| 10 |
+
try {
|
| 11 |
+
const { userId, action = 'orchestrate' } = req.body ?? {};
|
| 12 |
+
const { data, error } = await supabase.functions.invoke(ORCHESTRATOR_FUNCTION, {
|
| 13 |
+
body: { action, userId },
|
| 14 |
+
});
|
| 15 |
+
|
| 16 |
+
if (error) {
|
| 17 |
+
return res.status(502).json({ success: false, error: error.message });
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
res.json({ success: true, response: data });
|
| 21 |
+
} catch (error) {
|
| 22 |
+
next(error);
|
| 23 |
+
}
|
| 24 |
+
});
|
| 25 |
+
|
| 26 |
+
router.get('/book-empire/autonomous/status', async (_req, res, next) => {
|
| 27 |
+
try {
|
| 28 |
+
const { data, error } = await supabase
|
| 29 |
+
.from('system_metrics')
|
| 30 |
+
.select('*')
|
| 31 |
+
.eq('metric_type', 'book_pipeline')
|
| 32 |
+
.order('recorded_at', { ascending: false })
|
| 33 |
+
.limit(1)
|
| 34 |
+
.single();
|
| 35 |
+
|
| 36 |
+
if (error && !tableMissing(error)) throw error;
|
| 37 |
+
|
| 38 |
+
res.json({ success: true, status: data ?? null });
|
| 39 |
+
} catch (error) {
|
| 40 |
+
next(error);
|
| 41 |
+
}
|
| 42 |
+
});
|
| 43 |
+
|
| 44 |
+
router.post('/book-empire/idea/generate', async (_req, res, next) => {
|
| 45 |
+
try {
|
| 46 |
+
const { data, error } = await supabase.functions.invoke(MARKET_RESEARCH_FUNCTION, {});
|
| 47 |
+
if (error) {
|
| 48 |
+
return res.status(502).json({ success: false, error: error.message });
|
| 49 |
+
}
|
| 50 |
+
res.json({ success: true, idea: data });
|
| 51 |
+
} catch (error) {
|
| 52 |
+
next(error);
|
| 53 |
+
}
|
| 54 |
+
});
|
| 55 |
+
|
| 56 |
+
router.post('/book-empire/book/generate', async (req, res, next) => {
|
| 57 |
+
try {
|
| 58 |
+
const { data, error } = await supabase.functions.invoke(BOOK_GENERATOR_FUNCTION, {
|
| 59 |
+
body: req.body ?? {},
|
| 60 |
+
});
|
| 61 |
+
if (error) {
|
| 62 |
+
return res.status(502).json({ success: false, error: error.message });
|
| 63 |
+
}
|
| 64 |
+
res.json({ success: true, book: data });
|
| 65 |
+
} catch (error) {
|
| 66 |
+
next(error);
|
| 67 |
+
}
|
| 68 |
+
});
|
| 69 |
+
}
|
src/routes/flows.ts
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { Router } from 'express';
|
| 2 |
+
import { supabase, tableMissing } from '../supabase.js';
|
| 3 |
+
|
| 4 |
+
const FLOW_EXECUTOR = process.env.SUPABASE_FLOW_EXECUTOR || 'flow-executor';
|
| 5 |
+
|
| 6 |
+
export function registerFlowRoutes(router: Router) {
|
| 7 |
+
router.get('/flows', async (req, res, next) => {
|
| 8 |
+
const { status, type } = req.query;
|
| 9 |
+
try {
|
| 10 |
+
let query = supabase.from('flows').select('*').order('created_at', { ascending: false });
|
| 11 |
+
|
| 12 |
+
if (status && typeof status === 'string') {
|
| 13 |
+
query = query.eq('status', status);
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
if (type && typeof type === 'string') {
|
| 17 |
+
query = query.eq('type', type);
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
const { data, error } = await query;
|
| 21 |
+
if (error && !tableMissing(error)) throw error;
|
| 22 |
+
|
| 23 |
+
res.json({ flows: data ?? [] });
|
| 24 |
+
} catch (error) {
|
| 25 |
+
next(error);
|
| 26 |
+
}
|
| 27 |
+
});
|
| 28 |
+
|
| 29 |
+
router.post('/flows/:id/execute', async (req, res, next) => {
|
| 30 |
+
const { id } = req.params;
|
| 31 |
+
try {
|
| 32 |
+
const { data, error } = await supabase.functions.invoke(FLOW_EXECUTOR, {
|
| 33 |
+
body: { flowId: id, parameters: req.body?.parameters ?? {} },
|
| 34 |
+
});
|
| 35 |
+
|
| 36 |
+
if (error) {
|
| 37 |
+
// bubble up but mark accepted so caller can inspect details
|
| 38 |
+
return res.status(502).json({ success: false, error: error.message });
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
res.status(202).json({ success: true, response: data });
|
| 42 |
+
} catch (error) {
|
| 43 |
+
next(error);
|
| 44 |
+
}
|
| 45 |
+
});
|
| 46 |
+
|
| 47 |
+
router.post('/flows/:id/publish', async (req, res, next) => {
|
| 48 |
+
const { id } = req.params;
|
| 49 |
+
try {
|
| 50 |
+
const { data, error } = await supabase
|
| 51 |
+
.from('flows')
|
| 52 |
+
.update({ status: 'live', published_at: new Date().toISOString() })
|
| 53 |
+
.eq('id', id)
|
| 54 |
+
.select()
|
| 55 |
+
.single();
|
| 56 |
+
|
| 57 |
+
if (error) throw error;
|
| 58 |
+
|
| 59 |
+
res.json({ success: true, flow: data });
|
| 60 |
+
} catch (error) {
|
| 61 |
+
next(error);
|
| 62 |
+
}
|
| 63 |
+
});
|
| 64 |
+
|
| 65 |
+
router.post('/flows/:id/duplicate', async (req, res, next) => {
|
| 66 |
+
const { id } = req.params;
|
| 67 |
+
try {
|
| 68 |
+
const { data: original, error: fetchError } = await supabase
|
| 69 |
+
.from('flows')
|
| 70 |
+
.select('*')
|
| 71 |
+
.eq('id', id)
|
| 72 |
+
.single();
|
| 73 |
+
|
| 74 |
+
if (fetchError) throw fetchError;
|
| 75 |
+
|
| 76 |
+
const payload = {
|
| 77 |
+
...original,
|
| 78 |
+
id: undefined,
|
| 79 |
+
name: `${original.name} Copy`,
|
| 80 |
+
status: 'draft',
|
| 81 |
+
created_at: undefined,
|
| 82 |
+
updated_at: undefined,
|
| 83 |
+
published_at: null,
|
| 84 |
+
last_run_at: null,
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
const { data: inserted, error: insertError } = await supabase
|
| 88 |
+
.from('flows')
|
| 89 |
+
.insert(payload)
|
| 90 |
+
.select()
|
| 91 |
+
.single();
|
| 92 |
+
|
| 93 |
+
if (insertError) throw insertError;
|
| 94 |
+
|
| 95 |
+
res.json({ success: true, flow: inserted });
|
| 96 |
+
} catch (error) {
|
| 97 |
+
next(error);
|
| 98 |
+
}
|
| 99 |
+
});
|
| 100 |
+
|
| 101 |
+
router.post('/flows/:id/archive', async (req, res, next) => {
|
| 102 |
+
const { id } = req.params;
|
| 103 |
+
try {
|
| 104 |
+
const { data, error } = await supabase
|
| 105 |
+
.from('flows')
|
| 106 |
+
.update({ status: 'archived', archived_at: new Date().toISOString() })
|
| 107 |
+
.eq('id', id)
|
| 108 |
+
.select()
|
| 109 |
+
.single();
|
| 110 |
+
|
| 111 |
+
if (error) throw error;
|
| 112 |
+
|
| 113 |
+
res.json({ success: true, flow: data });
|
| 114 |
+
} catch (error) {
|
| 115 |
+
next(error);
|
| 116 |
+
}
|
| 117 |
+
});
|
| 118 |
+
}
|
src/routes/money.ts
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { Router } from 'express';
|
| 2 |
+
import { supabase, tableMissing } from '../supabase.js';
|
| 3 |
+
|
| 4 |
+
const AUTOMATION_FUNCTION = process.env.SUPABASE_AUTOMATION_FUNCTION || 'automated-revenue-generator';
|
| 5 |
+
|
| 6 |
+
export function registerMoneyRoutes(router: Router) {
|
| 7 |
+
router.get('/money/revenue', async (_req, res, next) => {
|
| 8 |
+
try {
|
| 9 |
+
const { data, error } = await supabase
|
| 10 |
+
.from('revenue_streams')
|
| 11 |
+
.select('amount, status, stream_type, platform, currency, received_at')
|
| 12 |
+
.order('received_at', { ascending: false });
|
| 13 |
+
|
| 14 |
+
if (error && !tableMissing(error)) throw error;
|
| 15 |
+
|
| 16 |
+
const streams = data ?? [];
|
| 17 |
+
const totals = streams.reduce(
|
| 18 |
+
(acc, stream) => {
|
| 19 |
+
if (stream.status === 'confirmed') {
|
| 20 |
+
acc.totalRevenue += stream.amount;
|
| 21 |
+
acc.monthlyRevenue += isWithinHours(stream.received_at, 24 * 30) ? stream.amount : 0;
|
| 22 |
+
acc.weeklyRevenue += isWithinHours(stream.received_at, 24 * 7) ? stream.amount : 0;
|
| 23 |
+
}
|
| 24 |
+
acc.automationRuns += 1;
|
| 25 |
+
return acc;
|
| 26 |
+
},
|
| 27 |
+
{ totalRevenue: 0, monthlyRevenue: 0, weeklyRevenue: 0, automationRuns: 0 }
|
| 28 |
+
);
|
| 29 |
+
|
| 30 |
+
res.json({ success: true, data: totals, streams });
|
| 31 |
+
} catch (error) {
|
| 32 |
+
next(error);
|
| 33 |
+
}
|
| 34 |
+
});
|
| 35 |
+
|
| 36 |
+
router.get('/money/opportunities', async (_req, res, next) => {
|
| 37 |
+
try {
|
| 38 |
+
const { data, error } = await supabase
|
| 39 |
+
.from('revenue_opportunities')
|
| 40 |
+
.select('*')
|
| 41 |
+
.order('posted_at', { ascending: false })
|
| 42 |
+
.limit(25);
|
| 43 |
+
|
| 44 |
+
if (error) {
|
| 45 |
+
if (tableMissing(error)) {
|
| 46 |
+
return res.json({ success: true, opportunities: [] });
|
| 47 |
+
}
|
| 48 |
+
throw error;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
res.json({ success: true, opportunities: data ?? [] });
|
| 52 |
+
} catch (error) {
|
| 53 |
+
next(error);
|
| 54 |
+
}
|
| 55 |
+
});
|
| 56 |
+
|
| 57 |
+
router.post('/money/automate', async (_req, res, next) => {
|
| 58 |
+
try {
|
| 59 |
+
const { data, error } = await supabase.functions.invoke(AUTOMATION_FUNCTION, {
|
| 60 |
+
body: { trigger: 'mediator_api' },
|
| 61 |
+
});
|
| 62 |
+
|
| 63 |
+
if (error) {
|
| 64 |
+
return res.status(502).json({ success: false, error: error.message });
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
res.json({ success: true, response: data });
|
| 68 |
+
} catch (error) {
|
| 69 |
+
next(error);
|
| 70 |
+
}
|
| 71 |
+
});
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
function isWithinHours(timestamp: string, hours: number): boolean {
|
| 75 |
+
if (!timestamp) return false;
|
| 76 |
+
const cutoff = Date.now() - hours * 60 * 60 * 1000;
|
| 77 |
+
return new Date(timestamp).getTime() >= cutoff;
|
| 78 |
+
}
|
src/routes/river.ts
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { Router } from 'express';
|
| 2 |
+
import { supabase, tableMissing } from '../supabase.js';
|
| 3 |
+
|
| 4 |
+
interface RevenueStreamRecord {
|
| 5 |
+
amount: number;
|
| 6 |
+
status: string;
|
| 7 |
+
metadata: Record<string, any> | null;
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
export function registerRiverRoutes(router: Router) {
|
| 11 |
+
router.get('/river/tools', async (_req, res, next) => {
|
| 12 |
+
try {
|
| 13 |
+
const { data, error } = await supabase
|
| 14 |
+
.from('river_tools')
|
| 15 |
+
.select('*')
|
| 16 |
+
.order('created_at', { ascending: false });
|
| 17 |
+
|
| 18 |
+
if (error && !tableMissing(error)) throw error;
|
| 19 |
+
|
| 20 |
+
res.json({ tools: data ?? [] });
|
| 21 |
+
} catch (error) {
|
| 22 |
+
next(error);
|
| 23 |
+
}
|
| 24 |
+
});
|
| 25 |
+
|
| 26 |
+
router.get('/river/analytics/:toolId', async (req, res, next) => {
|
| 27 |
+
const { toolId } = req.params;
|
| 28 |
+
try {
|
| 29 |
+
const { data, error } = await supabase
|
| 30 |
+
.from('river_tool_metrics')
|
| 31 |
+
.select('*')
|
| 32 |
+
.eq('tool_id', toolId)
|
| 33 |
+
.order('recorded_at', { ascending: false })
|
| 34 |
+
.limit(1)
|
| 35 |
+
.single();
|
| 36 |
+
|
| 37 |
+
if (!error && data) {
|
| 38 |
+
return res.json({ toolId, ...data });
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
if (error && !tableMissing(error)) {
|
| 42 |
+
throw error;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
// Fallback to aggregate revenue streams
|
| 46 |
+
const { data: revenueData, error: revenueError } = await supabase
|
| 47 |
+
.from('revenue_streams')
|
| 48 |
+
.select('amount,status,metadata');
|
| 49 |
+
|
| 50 |
+
if (revenueError && !tableMissing(revenueError)) throw revenueError;
|
| 51 |
+
|
| 52 |
+
const streams = (revenueData ?? []) as RevenueStreamRecord[];
|
| 53 |
+
const relevant = streams.filter(stream => {
|
| 54 |
+
const meta = stream.metadata || {};
|
| 55 |
+
return meta.tool_id === toolId || meta.toolId === toolId;
|
| 56 |
+
});
|
| 57 |
+
|
| 58 |
+
const revenue = relevant.reduce((sum, stream) => sum + (stream.status === 'confirmed' ? stream.amount : 0), 0);
|
| 59 |
+
const conversions = relevant.length;
|
| 60 |
+
|
| 61 |
+
res.json({
|
| 62 |
+
toolId,
|
| 63 |
+
revenue,
|
| 64 |
+
conversions,
|
| 65 |
+
subscribers: conversions, // placeholder until distinct subscribers tracked
|
| 66 |
+
mrr: revenue,
|
| 67 |
+
timestamp: new Date().toISOString(),
|
| 68 |
+
});
|
| 69 |
+
} catch (error) {
|
| 70 |
+
next(error);
|
| 71 |
+
}
|
| 72 |
+
});
|
| 73 |
+
}
|
src/server.ts
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import 'dotenv/config';
|
| 2 |
+
import express from 'express';
|
| 3 |
+
import helmet from 'helmet';
|
| 4 |
+
import compression from 'compression';
|
| 5 |
+
import cors from 'cors';
|
| 6 |
+
import morgan from 'morgan';
|
| 7 |
+
import { z } from 'zod';
|
| 8 |
+
import { registerRiverRoutes } from './routes/river.js';
|
| 9 |
+
import { registerFlowRoutes } from './routes/flows.js';
|
| 10 |
+
import { registerMoneyRoutes } from './routes/money.js';
|
| 11 |
+
import { registerBookEmpireRoutes } from './routes/bookEmpire.js';
|
| 12 |
+
|
| 13 |
+
const app = express();
|
| 14 |
+
const PORT = Number(process.env.PORT || process.env.HTTP_PORT || 7860);
|
| 15 |
+
const AIS3_API_KEY = process.env.AIS3_API_KEY;
|
| 16 |
+
|
| 17 |
+
const requiredEnvSchema = z.object({
|
| 18 |
+
SUPABASE_URL: z.string().url(),
|
| 19 |
+
SUPABASE_SERVICE_ROLE_KEY: z.string().min(10),
|
| 20 |
+
});
|
| 21 |
+
|
| 22 |
+
requiredEnvSchema.parse({
|
| 23 |
+
SUPABASE_URL: process.env.SUPABASE_URL,
|
| 24 |
+
SUPABASE_SERVICE_ROLE_KEY: process.env.SUPABASE_SERVICE_ROLE_KEY,
|
| 25 |
+
});
|
| 26 |
+
|
| 27 |
+
app.use(helmet());
|
| 28 |
+
app.use(cors());
|
| 29 |
+
app.use(express.json({ limit: '2mb' }));
|
| 30 |
+
app.use(express.urlencoded({ extended: true }));
|
| 31 |
+
app.use(compression());
|
| 32 |
+
app.use(morgan('combined'));
|
| 33 |
+
|
| 34 |
+
// Health endpoint (no auth)
|
| 35 |
+
app.get('/health', (_req, res) => {
|
| 36 |
+
res.json({
|
| 37 |
+
status: 'ok',
|
| 38 |
+
service: 'hAPI-face2',
|
| 39 |
+
timestamp: new Date().toISOString(),
|
| 40 |
+
});
|
| 41 |
+
});
|
| 42 |
+
|
| 43 |
+
// Simple Bearer auth for everything else
|
| 44 |
+
app.use((req, res, next) => {
|
| 45 |
+
if (!AIS3_API_KEY) {
|
| 46 |
+
return next(); // auth disabled when key missing
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
const header = req.get('authorization');
|
| 50 |
+
if (!header || !header.toLowerCase().startsWith('bearer ')) {
|
| 51 |
+
return res.status(401).json({ error: 'Unauthorized' });
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
const token = header.slice(7);
|
| 55 |
+
if (token !== AIS3_API_KEY) {
|
| 56 |
+
return res.status(403).json({ error: 'Forbidden' });
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
next();
|
| 60 |
+
});
|
| 61 |
+
|
| 62 |
+
// Register API routes under /api
|
| 63 |
+
const apiRouter = express.Router();
|
| 64 |
+
registerRiverRoutes(apiRouter);
|
| 65 |
+
registerFlowRoutes(apiRouter);
|
| 66 |
+
registerMoneyRoutes(apiRouter);
|
| 67 |
+
registerBookEmpireRoutes(apiRouter);
|
| 68 |
+
|
| 69 |
+
app.use('/api', apiRouter);
|
| 70 |
+
|
| 71 |
+
// Error handler
|
| 72 |
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
| 73 |
+
app.use((error: any, _req: express.Request, res: express.Response, _next: express.NextFunction) => {
|
| 74 |
+
console.error('API error:', error);
|
| 75 |
+
res.status(500).json({
|
| 76 |
+
error: 'Internal Server Error',
|
| 77 |
+
message: error?.message ?? 'Unknown error',
|
| 78 |
+
});
|
| 79 |
+
});
|
| 80 |
+
|
| 81 |
+
app.listen(PORT, '0.0.0.0', () => {
|
| 82 |
+
console.log(`hAPI-face2 mediator listening on port ${PORT}`);
|
| 83 |
+
});
|
src/supabase.ts
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { createClient, SupabaseClient } from '@supabase/supabase-js';
|
| 2 |
+
|
| 3 |
+
const { SUPABASE_URL, SUPABASE_SERVICE_ROLE_KEY } = process.env;
|
| 4 |
+
|
| 5 |
+
if (!SUPABASE_URL) {
|
| 6 |
+
throw new Error('SUPABASE_URL is required');
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
if (!SUPABASE_SERVICE_ROLE_KEY) {
|
| 10 |
+
throw new Error('SUPABASE_SERVICE_ROLE_KEY is required');
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
export const supabase: SupabaseClient = createClient(SUPABASE_URL, SUPABASE_SERVICE_ROLE_KEY, {
|
| 14 |
+
auth: {
|
| 15 |
+
autoRefreshToken: false,
|
| 16 |
+
persistSession: false,
|
| 17 |
+
},
|
| 18 |
+
global: {
|
| 19 |
+
headers: {
|
| 20 |
+
'X-Client-Info': 'hAPI-face2/1.0.0',
|
| 21 |
+
},
|
| 22 |
+
},
|
| 23 |
+
});
|
| 24 |
+
|
| 25 |
+
export function tableMissing(error: unknown): boolean {
|
| 26 |
+
if (!error || typeof error !== 'object') return false;
|
| 27 |
+
if ('code' in error && typeof error.code === 'string') {
|
| 28 |
+
return error.code === '42P01'; // Postgres undefined table
|
| 29 |
+
}
|
| 30 |
+
if ('message' in error && typeof error.message === 'string') {
|
| 31 |
+
return error.message.includes('does not exist');
|
| 32 |
+
}
|
| 33 |
+
return false;
|
| 34 |
+
}
|
tsconfig.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"compilerOptions": {
|
| 3 |
+
"target": "ES2021",
|
| 4 |
+
"module": "ES2020",
|
| 5 |
+
"outDir": "dist",
|
| 6 |
+
"rootDir": "src",
|
| 7 |
+
"moduleResolution": "node16",
|
| 8 |
+
"esModuleInterop": true,
|
| 9 |
+
"forceConsistentCasingInFileNames": true,
|
| 10 |
+
"resolveJsonModule": true,
|
| 11 |
+
"skipLibCheck": true,
|
| 12 |
+
"strict": true
|
| 13 |
+
},
|
| 14 |
+
"include": ["src/**/*"]
|
| 15 |
+
}
|
utils/__init__.py
DELETED
|
@@ -1,69 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import requests
|
| 3 |
-
import os
|
| 4 |
-
|
| 5 |
-
from pathlib import Path
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
class OSEnver:
|
| 9 |
-
def __init__(self):
|
| 10 |
-
self.envs_stack = []
|
| 11 |
-
self.envs = os.environ.copy()
|
| 12 |
-
|
| 13 |
-
def store_envs(self):
|
| 14 |
-
self.envs_stack.append(self.envs)
|
| 15 |
-
|
| 16 |
-
def restore_envs(self):
|
| 17 |
-
self.envs = self.envs_stack.pop()
|
| 18 |
-
if self.global_scope:
|
| 19 |
-
os.environ = self.envs
|
| 20 |
-
|
| 21 |
-
def set_envs(self, secrets=True, proxies=None, store_envs=True):
|
| 22 |
-
# caller_info = inspect.stack()[1]
|
| 23 |
-
# logger.back(f"OS Envs is set by: {caller_info.filename}")
|
| 24 |
-
|
| 25 |
-
if store_envs:
|
| 26 |
-
self.store_envs()
|
| 27 |
-
|
| 28 |
-
if secrets:
|
| 29 |
-
secrets_path = Path(__file__).parents[1] / "secrets.json"
|
| 30 |
-
if secrets_path.exists():
|
| 31 |
-
with open(secrets_path, "r") as rf:
|
| 32 |
-
secrets = json.load(rf)
|
| 33 |
-
else:
|
| 34 |
-
secrets = {}
|
| 35 |
-
|
| 36 |
-
if proxies:
|
| 37 |
-
for proxy_env in ["http_proxy", "https_proxy"]:
|
| 38 |
-
if isinstance(proxies, str):
|
| 39 |
-
self.envs[proxy_env] = proxies
|
| 40 |
-
elif "http_proxy" in secrets.keys():
|
| 41 |
-
self.envs[proxy_env] = secrets["http_proxy"]
|
| 42 |
-
elif os.getenv("http_proxy"):
|
| 43 |
-
self.envs[proxy_env] = os.getenv("http_proxy")
|
| 44 |
-
else:
|
| 45 |
-
continue
|
| 46 |
-
|
| 47 |
-
self.proxy = (
|
| 48 |
-
self.envs.get("all_proxy")
|
| 49 |
-
or self.envs.get("http_proxy")
|
| 50 |
-
or self.envs.get("https_proxy")
|
| 51 |
-
or None
|
| 52 |
-
)
|
| 53 |
-
self.requests_proxies = {
|
| 54 |
-
"http": self.proxy,
|
| 55 |
-
"https": self.proxy,
|
| 56 |
-
}
|
| 57 |
-
|
| 58 |
-
# https://www.proxynova.com/proxy-server-list/country-us/
|
| 59 |
-
|
| 60 |
-
print(f"Using proxy: [{self.proxy}]")
|
| 61 |
-
# r = requests.get(
|
| 62 |
-
# "http://ifconfig.me/ip",
|
| 63 |
-
# proxies=self.requests_proxies,
|
| 64 |
-
# timeout=10,
|
| 65 |
-
# )
|
| 66 |
-
# print(f"[r.status_code] r.text")
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
enver = OSEnver()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/enver.py
DELETED
|
@@ -1,60 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
|
| 4 |
-
from pathlib import Path
|
| 5 |
-
from utils.logger import logger
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
class OSEnver:
|
| 9 |
-
def __init__(self):
|
| 10 |
-
self.envs_stack = []
|
| 11 |
-
self.envs = os.environ.copy()
|
| 12 |
-
|
| 13 |
-
def store_envs(self):
|
| 14 |
-
self.envs_stack.append(self.envs)
|
| 15 |
-
|
| 16 |
-
def restore_envs(self):
|
| 17 |
-
self.envs = self.envs_stack.pop()
|
| 18 |
-
|
| 19 |
-
def set_envs(self, secrets=True, proxies=None, store_envs=True):
|
| 20 |
-
# caller_info = inspect.stack()[1]
|
| 21 |
-
# logger.back(f"OS Envs is set by: {caller_info.filename}")
|
| 22 |
-
|
| 23 |
-
if store_envs:
|
| 24 |
-
self.store_envs()
|
| 25 |
-
|
| 26 |
-
if secrets:
|
| 27 |
-
secrets_path = Path(__file__).parents[1] / "secrets.json"
|
| 28 |
-
if secrets_path.exists():
|
| 29 |
-
with open(secrets_path, "r") as rf:
|
| 30 |
-
secrets = json.load(rf)
|
| 31 |
-
else:
|
| 32 |
-
secrets = {}
|
| 33 |
-
|
| 34 |
-
if proxies:
|
| 35 |
-
for proxy_env in ["http_proxy", "https_proxy"]:
|
| 36 |
-
if isinstance(proxies, str):
|
| 37 |
-
self.envs[proxy_env] = proxies
|
| 38 |
-
elif "http_proxy" in secrets.keys():
|
| 39 |
-
self.envs[proxy_env] = secrets["http_proxy"]
|
| 40 |
-
elif os.getenv("http_proxy"):
|
| 41 |
-
self.envs[proxy_env] = os.getenv("http_proxy")
|
| 42 |
-
else:
|
| 43 |
-
continue
|
| 44 |
-
|
| 45 |
-
self.proxy = (
|
| 46 |
-
self.envs.get("all_proxy")
|
| 47 |
-
or self.envs.get("http_proxy")
|
| 48 |
-
or self.envs.get("https_proxy")
|
| 49 |
-
or None
|
| 50 |
-
)
|
| 51 |
-
self.requests_proxies = {
|
| 52 |
-
"http": self.proxy,
|
| 53 |
-
"https": self.proxy,
|
| 54 |
-
}
|
| 55 |
-
|
| 56 |
-
if self.proxy:
|
| 57 |
-
logger.note(f"Using proxy: [{self.proxy}]")
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
enver = OSEnver()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/logger.py
DELETED
|
@@ -1,269 +0,0 @@
|
|
| 1 |
-
import datetime
|
| 2 |
-
import functools
|
| 3 |
-
import inspect
|
| 4 |
-
import logging
|
| 5 |
-
import os
|
| 6 |
-
import shutil
|
| 7 |
-
import subprocess
|
| 8 |
-
from termcolor import colored
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
def add_fillers(text, filler="=", fill_side="both"):
|
| 12 |
-
terminal_width = shutil.get_terminal_size().columns
|
| 13 |
-
text = text.strip()
|
| 14 |
-
text_width = len(text)
|
| 15 |
-
if text_width >= terminal_width:
|
| 16 |
-
return text
|
| 17 |
-
|
| 18 |
-
if fill_side[0].lower() == "b":
|
| 19 |
-
leading_fill_str = filler * ((terminal_width - text_width) // 2 - 1) + " "
|
| 20 |
-
trailing_fill_str = " " + filler * (
|
| 21 |
-
terminal_width - text_width - len(leading_fill_str) - 1
|
| 22 |
-
)
|
| 23 |
-
elif fill_side[0].lower() == "l":
|
| 24 |
-
leading_fill_str = filler * (terminal_width - text_width - 1) + " "
|
| 25 |
-
trailing_fill_str = ""
|
| 26 |
-
elif fill_side[0].lower() == "r":
|
| 27 |
-
leading_fill_str = ""
|
| 28 |
-
trailing_fill_str = " " + filler * (terminal_width - text_width - 1)
|
| 29 |
-
else:
|
| 30 |
-
raise ValueError("Invalid fill_side")
|
| 31 |
-
|
| 32 |
-
filled_str = f"{leading_fill_str}{text}{trailing_fill_str}"
|
| 33 |
-
return filled_str
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
class OSLogger(logging.Logger):
|
| 37 |
-
LOG_METHODS = {
|
| 38 |
-
"err": ("error", "red"),
|
| 39 |
-
"warn": ("warning", "light_red"),
|
| 40 |
-
"note": ("info", "light_magenta"),
|
| 41 |
-
"mesg": ("info", "light_cyan"),
|
| 42 |
-
"file": ("info", "light_blue"),
|
| 43 |
-
"line": ("info", "white"),
|
| 44 |
-
"success": ("info", "light_green"),
|
| 45 |
-
"fail": ("info", "light_red"),
|
| 46 |
-
"back": ("debug", "light_cyan"),
|
| 47 |
-
}
|
| 48 |
-
INDENT_METHODS = [
|
| 49 |
-
"indent",
|
| 50 |
-
"set_indent",
|
| 51 |
-
"reset_indent",
|
| 52 |
-
"store_indent",
|
| 53 |
-
"restore_indent",
|
| 54 |
-
"log_indent",
|
| 55 |
-
]
|
| 56 |
-
LEVEL_METHODS = [
|
| 57 |
-
"set_level",
|
| 58 |
-
"store_level",
|
| 59 |
-
"restore_level",
|
| 60 |
-
"quiet",
|
| 61 |
-
"enter_quiet",
|
| 62 |
-
"exit_quiet",
|
| 63 |
-
]
|
| 64 |
-
LEVEL_NAMES = {
|
| 65 |
-
"critical": logging.CRITICAL,
|
| 66 |
-
"error": logging.ERROR,
|
| 67 |
-
"warning": logging.WARNING,
|
| 68 |
-
"info": logging.INFO,
|
| 69 |
-
"debug": logging.DEBUG,
|
| 70 |
-
}
|
| 71 |
-
|
| 72 |
-
def __init__(self, name=None, prefix=False):
|
| 73 |
-
if not name:
|
| 74 |
-
frame = inspect.stack()[1]
|
| 75 |
-
module = inspect.getmodule(frame[0])
|
| 76 |
-
name = module.__name__
|
| 77 |
-
|
| 78 |
-
super().__init__(name)
|
| 79 |
-
self.setLevel(logging.INFO)
|
| 80 |
-
|
| 81 |
-
if prefix:
|
| 82 |
-
formatter_prefix = "[%(asctime)s] - [%(name)s] - [%(levelname)s]\n"
|
| 83 |
-
else:
|
| 84 |
-
formatter_prefix = ""
|
| 85 |
-
|
| 86 |
-
self.formatter = logging.Formatter(formatter_prefix + "%(message)s")
|
| 87 |
-
|
| 88 |
-
stream_handler = logging.StreamHandler()
|
| 89 |
-
stream_handler.setLevel(logging.INFO)
|
| 90 |
-
stream_handler.setFormatter(self.formatter)
|
| 91 |
-
self.addHandler(stream_handler)
|
| 92 |
-
|
| 93 |
-
self.log_indent = 0
|
| 94 |
-
self.log_indents = []
|
| 95 |
-
|
| 96 |
-
self.log_level = "info"
|
| 97 |
-
self.log_levels = []
|
| 98 |
-
|
| 99 |
-
def indent(self, indent=2):
|
| 100 |
-
self.log_indent += indent
|
| 101 |
-
|
| 102 |
-
def set_indent(self, indent=2):
|
| 103 |
-
self.log_indent = indent
|
| 104 |
-
|
| 105 |
-
def reset_indent(self):
|
| 106 |
-
self.log_indent = 0
|
| 107 |
-
|
| 108 |
-
def store_indent(self):
|
| 109 |
-
self.log_indents.append(self.log_indent)
|
| 110 |
-
|
| 111 |
-
def restore_indent(self):
|
| 112 |
-
self.log_indent = self.log_indents.pop(-1)
|
| 113 |
-
|
| 114 |
-
def set_level(self, level):
|
| 115 |
-
self.log_level = level
|
| 116 |
-
self.setLevel(self.LEVEL_NAMES[level])
|
| 117 |
-
|
| 118 |
-
def store_level(self):
|
| 119 |
-
self.log_levels.append(self.log_level)
|
| 120 |
-
|
| 121 |
-
def restore_level(self):
|
| 122 |
-
self.log_level = self.log_levels.pop(-1)
|
| 123 |
-
self.set_level(self.log_level)
|
| 124 |
-
|
| 125 |
-
def quiet(self):
|
| 126 |
-
self.set_level("critical")
|
| 127 |
-
|
| 128 |
-
def enter_quiet(self, quiet=False):
|
| 129 |
-
if quiet:
|
| 130 |
-
self.store_level()
|
| 131 |
-
self.quiet()
|
| 132 |
-
|
| 133 |
-
def exit_quiet(self, quiet=False):
|
| 134 |
-
if quiet:
|
| 135 |
-
self.restore_level()
|
| 136 |
-
|
| 137 |
-
def log(
|
| 138 |
-
self,
|
| 139 |
-
level,
|
| 140 |
-
color,
|
| 141 |
-
msg,
|
| 142 |
-
indent=0,
|
| 143 |
-
fill=False,
|
| 144 |
-
fill_side="both",
|
| 145 |
-
end="\n",
|
| 146 |
-
*args,
|
| 147 |
-
**kwargs,
|
| 148 |
-
):
|
| 149 |
-
if type(msg) == str:
|
| 150 |
-
msg_str = msg
|
| 151 |
-
else:
|
| 152 |
-
msg_str = repr(msg)
|
| 153 |
-
quotes = ["'", '"']
|
| 154 |
-
if msg_str[0] in quotes and msg_str[-1] in quotes:
|
| 155 |
-
msg_str = msg_str[1:-1]
|
| 156 |
-
|
| 157 |
-
indent_str = " " * (self.log_indent + indent)
|
| 158 |
-
indented_msg = "\n".join([indent_str + line for line in msg_str.split("\n")])
|
| 159 |
-
|
| 160 |
-
if fill:
|
| 161 |
-
indented_msg = add_fillers(indented_msg, fill_side=fill_side)
|
| 162 |
-
|
| 163 |
-
handler = self.handlers[0]
|
| 164 |
-
handler.terminator = end
|
| 165 |
-
|
| 166 |
-
getattr(self, level)(colored(indented_msg, color), *args, **kwargs)
|
| 167 |
-
|
| 168 |
-
def route_log(self, method, msg, *args, **kwargs):
|
| 169 |
-
level, method = method
|
| 170 |
-
functools.partial(self.log, level, method, msg)(*args, **kwargs)
|
| 171 |
-
|
| 172 |
-
def err(self, msg: str = "", *args, **kwargs):
|
| 173 |
-
self.route_log(("error", "red"), msg, *args, **kwargs)
|
| 174 |
-
|
| 175 |
-
def warn(self, msg: str = "", *args, **kwargs):
|
| 176 |
-
self.route_log(("warning", "light_red"), msg, *args, **kwargs)
|
| 177 |
-
|
| 178 |
-
def note(self, msg: str = "", *args, **kwargs):
|
| 179 |
-
self.route_log(("info", "light_magenta"), msg, *args, **kwargs)
|
| 180 |
-
|
| 181 |
-
def mesg(self, msg: str = "", *args, **kwargs):
|
| 182 |
-
self.route_log(("info", "light_cyan"), msg, *args, **kwargs)
|
| 183 |
-
|
| 184 |
-
def file(self, msg: str = "", *args, **kwargs):
|
| 185 |
-
self.route_log(("info", "light_blue"), msg, *args, **kwargs)
|
| 186 |
-
|
| 187 |
-
def line(self, msg: str = "", *args, **kwargs):
|
| 188 |
-
self.route_log(("info", "white"), msg, *args, **kwargs)
|
| 189 |
-
|
| 190 |
-
def success(self, msg: str = "", *args, **kwargs):
|
| 191 |
-
self.route_log(("info", "light_green"), msg, *args, **kwargs)
|
| 192 |
-
|
| 193 |
-
def fail(self, msg: str = "", *args, **kwargs):
|
| 194 |
-
self.route_log(("info", "light_red"), msg, *args, **kwargs)
|
| 195 |
-
|
| 196 |
-
def back(self, msg: str = "", *args, **kwargs):
|
| 197 |
-
self.route_log(("debug", "light_cyan"), msg, *args, **kwargs)
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
logger = OSLogger()
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
def shell_cmd(cmd, getoutput=False, showcmd=True, env=None):
|
| 204 |
-
if showcmd:
|
| 205 |
-
logger.info(colored(f"\n$ [{os.getcwd()}]", "light_blue"))
|
| 206 |
-
logger.info(colored(f" $ {cmd}\n", "light_cyan"))
|
| 207 |
-
if getoutput:
|
| 208 |
-
output = subprocess.getoutput(cmd, env=env)
|
| 209 |
-
return output
|
| 210 |
-
else:
|
| 211 |
-
subprocess.run(cmd, shell=True, env=env)
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
class Runtimer:
|
| 215 |
-
def __enter__(self):
|
| 216 |
-
self.t1, _ = self.start_time()
|
| 217 |
-
return self
|
| 218 |
-
|
| 219 |
-
def __exit__(self, exc_type, exc_value, traceback):
|
| 220 |
-
self.t2, _ = self.end_time()
|
| 221 |
-
self.elapsed_time(self.t2 - self.t1)
|
| 222 |
-
|
| 223 |
-
def start_time(self):
|
| 224 |
-
t1 = datetime.datetime.now()
|
| 225 |
-
self.logger_time("start", t1)
|
| 226 |
-
return t1, self.time2str(t1)
|
| 227 |
-
|
| 228 |
-
def end_time(self):
|
| 229 |
-
t2 = datetime.datetime.now()
|
| 230 |
-
self.logger_time("end", t2)
|
| 231 |
-
return t2, self.time2str(t2)
|
| 232 |
-
|
| 233 |
-
def elapsed_time(self, dt=None):
|
| 234 |
-
if dt is None:
|
| 235 |
-
dt = self.t2 - self.t1
|
| 236 |
-
self.logger_time("elapsed", dt)
|
| 237 |
-
return dt, self.time2str(dt)
|
| 238 |
-
|
| 239 |
-
def logger_time(self, time_type, t):
|
| 240 |
-
time_types = {
|
| 241 |
-
"start": "Start",
|
| 242 |
-
"end": "End",
|
| 243 |
-
"elapsed": "Elapsed",
|
| 244 |
-
}
|
| 245 |
-
time_str = add_fillers(
|
| 246 |
-
colored(
|
| 247 |
-
f"{time_types[time_type]} time: [ {self.time2str(t)} ]",
|
| 248 |
-
"light_magenta",
|
| 249 |
-
),
|
| 250 |
-
fill_side="both",
|
| 251 |
-
)
|
| 252 |
-
logger.line(time_str)
|
| 253 |
-
|
| 254 |
-
# Convert time to string
|
| 255 |
-
def time2str(self, t):
|
| 256 |
-
datetime_str_format = "%Y-%m-%d %H:%M:%S"
|
| 257 |
-
if isinstance(t, datetime.datetime):
|
| 258 |
-
return t.strftime(datetime_str_format)
|
| 259 |
-
elif isinstance(t, datetime.timedelta):
|
| 260 |
-
hours = t.seconds // 3600
|
| 261 |
-
hour_str = f"{hours} hr" if hours > 0 else ""
|
| 262 |
-
minutes = (t.seconds // 60) % 60
|
| 263 |
-
minute_str = f"{minutes:>2} min" if minutes > 0 else ""
|
| 264 |
-
seconds = t.seconds % 60
|
| 265 |
-
second_str = f"{seconds:>2} s"
|
| 266 |
-
time_str = " ".join([hour_str, minute_str, second_str]).strip()
|
| 267 |
-
return time_str
|
| 268 |
-
else:
|
| 269 |
-
return str(t)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vercel.json
DELETED
|
@@ -1,17 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"builds": [
|
| 3 |
-
{
|
| 4 |
-
"src": "apis/chat_api.py",
|
| 5 |
-
"use": "@vercel/python"
|
| 6 |
-
}
|
| 7 |
-
],
|
| 8 |
-
"routes": [
|
| 9 |
-
{
|
| 10 |
-
"src": "/(.*)",
|
| 11 |
-
"dest": "/apis/chat_api.py"
|
| 12 |
-
}
|
| 13 |
-
],
|
| 14 |
-
"env": {
|
| 15 |
-
"APP_MODULE": "apis.chat_api:app"
|
| 16 |
-
}
|
| 17 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|