first
Browse files- .dockerignore +9 -0
- .gitignore +1 -0
- Dockerfile +14 -0
- README.md +5 -8
- app.py +471 -0
- restapi.js +438 -0
- tunner.py +67 -0
.dockerignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
| 2 |
+
*.pyc
|
| 3 |
+
*.pyo
|
| 4 |
+
*.pyd
|
| 5 |
+
.Python
|
| 6 |
+
env
|
| 7 |
+
venv
|
| 8 |
+
.env
|
| 9 |
+
.git
|
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
*.txt
|
Dockerfile
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY ./requirements.txt /app/requirements.txt
|
| 6 |
+
COPY ./app.py /app/app.py
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
EXPOSE 7860
|
| 13 |
+
|
| 14 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,12 +1,9 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
license: apache-2.0
|
| 9 |
-
short_description: test rest api
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Flash API Forwarder
|
| 3 |
+
emoji: 🚀
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: red
|
| 6 |
sdk: docker
|
|
|
|
|
|
|
|
|
|
| 7 |
---
|
| 8 |
|
| 9 |
+
FastAPI service for forwarding AI API requests
|
app.py
ADDED
|
@@ -0,0 +1,471 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException, UploadFile, File, Form
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from fastapi.responses import HTMLResponse
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
from fastapi import FastAPI, HTTPException, Request
|
| 6 |
+
from asyncio import TimeoutError
|
| 7 |
+
import asyncio
|
| 8 |
+
from typing import Optional
|
| 9 |
+
import requests
|
| 10 |
+
import uvicorn
|
| 11 |
+
import shutil
|
| 12 |
+
import datetime # Add this import
|
| 13 |
+
import logging
|
| 14 |
+
from logging.handlers import RotatingFileHandler
|
| 15 |
+
import time
|
| 16 |
+
from typing import List, Dict, Optional
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
import psutil
|
| 20 |
+
import sys
|
| 21 |
+
from typing import Dict
|
| 22 |
+
import tempfile
|
| 23 |
+
import re
|
| 24 |
+
import random
|
| 25 |
+
import aiohttp
|
| 26 |
+
app = FastAPI()
|
| 27 |
+
|
| 28 |
+
# Add USER_AGENTS constant
|
| 29 |
+
USER_AGENTS = [
|
| 30 |
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
|
| 31 |
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
|
| 32 |
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
|
| 33 |
+
]
|
| 34 |
+
# Configure logging
|
| 35 |
+
# Configure detailed logging
|
| 36 |
+
logging.basicConfig(
|
| 37 |
+
level=logging.INFO,
|
| 38 |
+
format='%(asctime)s - %(levelname)s - %(message)s'
|
| 39 |
+
)
|
| 40 |
+
logger = logging.getLogger(__name__)
|
| 41 |
+
# Request counter
|
| 42 |
+
request_counter = {
|
| 43 |
+
"analyze": 0,
|
| 44 |
+
"compareAnalyze": 0,
|
| 45 |
+
"total": 0
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
# Add CORS middleware
|
| 49 |
+
# Add CORS middleware with both HTTP and HTTPS
|
| 50 |
+
app.add_middleware(
|
| 51 |
+
CORSMiddleware,
|
| 52 |
+
allow_origins=[
|
| 53 |
+
"http://*",
|
| 54 |
+
"https://*"
|
| 55 |
+
],
|
| 56 |
+
allow_credentials=True,
|
| 57 |
+
allow_methods=["*"],
|
| 58 |
+
allow_headers=["*"],
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
class AskRequest(BaseModel):
|
| 62 |
+
prompt: str
|
| 63 |
+
model: str = "GEMINI"
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@app.get("/")
|
| 67 |
+
async def health_check():
|
| 68 |
+
return {
|
| 69 |
+
"health": "ok",
|
| 70 |
+
"timestamp": datetime.datetime.now().isoformat(),
|
| 71 |
+
"service": "AI API Forwarding Service",
|
| 72 |
+
"version": "1.0"
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@app.post("/ask")
|
| 77 |
+
async def forward_ask(request: AskRequest):
|
| 78 |
+
request_counter["total"] += 1
|
| 79 |
+
try:
|
| 80 |
+
response = requests.post(
|
| 81 |
+
"http://s5.serv00.com:9081/ask",
|
| 82 |
+
headers={'Content-Type': 'application/json'},
|
| 83 |
+
json=request.dict()
|
| 84 |
+
)
|
| 85 |
+
return response.json()
|
| 86 |
+
except Exception as e:
|
| 87 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 88 |
+
|
| 89 |
+
@app.post("/analyze")
|
| 90 |
+
async def forward_analyze(image: UploadFile = File(...), model: str = Form(...)):
|
| 91 |
+
request_counter["analyze"] += 1
|
| 92 |
+
request_counter["total"] += 1
|
| 93 |
+
try:
|
| 94 |
+
files = {'image': (image.filename, image.file, image.content_type)}
|
| 95 |
+
data = {'model': model}
|
| 96 |
+
response = requests.post(
|
| 97 |
+
"http://s5.serv00.com:9081/analyze",
|
| 98 |
+
files=files,
|
| 99 |
+
data=data
|
| 100 |
+
)
|
| 101 |
+
return response.json()
|
| 102 |
+
except Exception as e:
|
| 103 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 104 |
+
|
| 105 |
+
@app.post("/compareAnalyze")
|
| 106 |
+
async def forward_compare_analyze(image: UploadFile = File(...)):
|
| 107 |
+
request_counter["compareAnalyze"] += 1
|
| 108 |
+
request_counter["total"] += 1
|
| 109 |
+
try:
|
| 110 |
+
files = {'image': (image.filename, image.file, image.content_type)}
|
| 111 |
+
response = requests.post(
|
| 112 |
+
"http://s5.serv00.com:9081/compareAnalyze",
|
| 113 |
+
files=files
|
| 114 |
+
)
|
| 115 |
+
return response.json()
|
| 116 |
+
except Exception as e:
|
| 117 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 118 |
+
|
| 119 |
+
@app.get("/status")
|
| 120 |
+
async def forward_status():
|
| 121 |
+
start_time = time.time()
|
| 122 |
+
logger.info(f"Received status request at {datetime.datetime.now()}")
|
| 123 |
+
logger.info(f"Current request counter: {request_counter}")
|
| 124 |
+
|
| 125 |
+
try:
|
| 126 |
+
logger.info("Attempting to contact upstream server...")
|
| 127 |
+
response = requests.get("http://s5.serv00.com:9081/status")
|
| 128 |
+
elapsed_time = time.time() - start_time
|
| 129 |
+
|
| 130 |
+
logger.info(f"Upstream server responded in {elapsed_time:.2f} seconds")
|
| 131 |
+
logger.info(f"Response status code: {response.status_code}")
|
| 132 |
+
logger.info(f"Response content: {response.text[:200]}...")
|
| 133 |
+
|
| 134 |
+
return response.json()
|
| 135 |
+
except Exception as e:
|
| 136 |
+
logger.error(f"Error occurred: {str(e)}")
|
| 137 |
+
logger.error(f"Error type: {type(e).__name__}")
|
| 138 |
+
return {
|
| 139 |
+
"status": "running",
|
| 140 |
+
"requests": request_counter,
|
| 141 |
+
"error": str(e),
|
| 142 |
+
"timestamp": datetime.datetime.now().isoformat()
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
@app.get("/check", response_class=HTMLResponse)
|
| 146 |
+
async def forward_check():
|
| 147 |
+
try:
|
| 148 |
+
response = requests.get("http://s5.serv00.com:9081/check")
|
| 149 |
+
return response.text
|
| 150 |
+
except Exception as e:
|
| 151 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# Add new models
|
| 156 |
+
class Translation(BaseModel):
|
| 157 |
+
translation: str
|
| 158 |
+
type: str
|
| 159 |
+
|
| 160 |
+
class Phrase(BaseModel):
|
| 161 |
+
phrase: str
|
| 162 |
+
translation: str
|
| 163 |
+
|
| 164 |
+
class Word(BaseModel):
|
| 165 |
+
word: str
|
| 166 |
+
translations: List[dict] # Changed to accept dictionary format
|
| 167 |
+
phrases: List[Phrase] = [] # Made optional with default empty list
|
| 168 |
+
level: str = "" # Add level field with default empty string
|
| 169 |
+
|
| 170 |
+
# Add global word map
|
| 171 |
+
word_map: Dict[str, Word] = {}
|
| 172 |
+
|
| 173 |
+
def get_level_from_filename(filename: str) -> str:
|
| 174 |
+
# Extract level from filenames like "1-初中-顺序.json"
|
| 175 |
+
match = re.match(r'\d+-(.+?)-顺序\.json', filename)
|
| 176 |
+
return match.group(1) if match else "unknown"
|
| 177 |
+
# Add initialization function
|
| 178 |
+
def init_word_map():
|
| 179 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 180 |
+
json_dir = os.path.join(current_dir, "json")
|
| 181 |
+
stats = {
|
| 182 |
+
"total_words": 0,
|
| 183 |
+
"total_files": 0,
|
| 184 |
+
"file_stats": {}
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
try:
|
| 188 |
+
for filename in os.listdir(json_dir):
|
| 189 |
+
if filename.endswith('.json'):
|
| 190 |
+
try:
|
| 191 |
+
level = get_level_from_filename(filename)
|
| 192 |
+
with open(os.path.join(json_dir, filename), 'r', encoding='utf-8') as f:
|
| 193 |
+
words = json.load(f)
|
| 194 |
+
word_count = len(words)
|
| 195 |
+
stats["total_words"] += word_count
|
| 196 |
+
stats["total_files"] += 1
|
| 197 |
+
stats["file_stats"][filename] = word_count
|
| 198 |
+
for word_data in words:
|
| 199 |
+
# Convert legacy format to new format
|
| 200 |
+
if 'translations' not in word_data:
|
| 201 |
+
word_data['translations'] = [{
|
| 202 |
+
'translation': word_data.get('translation', ''),
|
| 203 |
+
'type': word_data.get('type', '')
|
| 204 |
+
}]
|
| 205 |
+
if 'phrases' not in word_data:
|
| 206 |
+
word_data['phrases'] = []
|
| 207 |
+
|
| 208 |
+
word_data['level'] = level
|
| 209 |
+
word = Word(**word_data)
|
| 210 |
+
word_map[word.word.lower()] = word
|
| 211 |
+
logger.info(f"Loaded {filename}: {word_count} words")
|
| 212 |
+
except Exception as e:
|
| 213 |
+
logger.error(f"Error loading {filename}: {str(e)}")
|
| 214 |
+
continue
|
| 215 |
+
|
| 216 |
+
logger.info(f"Dictionary initialization complete:")
|
| 217 |
+
logger.info(f"Total files processed: {stats['total_files']}")
|
| 218 |
+
logger.info(f"Total words loaded: {stats['total_words']}")
|
| 219 |
+
return stats
|
| 220 |
+
except Exception as e:
|
| 221 |
+
logger.error(f"Fatal error in init_word_map: {str(e)}")
|
| 222 |
+
return stats
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
# Add cache configuration
|
| 226 |
+
# Update cache file location
|
| 227 |
+
CACHE_DIR = os.path.join(tempfile.gettempdir(), "flash_api_cache")
|
| 228 |
+
CACHE_FILE = os.path.join(CACHE_DIR, "ai_translation_cache.json")
|
| 229 |
+
ai_cache: Dict[str, dict] = {}
|
| 230 |
+
|
| 231 |
+
# Load cache on startup
|
| 232 |
+
def save_cache():
|
| 233 |
+
try:
|
| 234 |
+
# Create cache directory if it doesn't exist
|
| 235 |
+
os.makedirs(CACHE_DIR, exist_ok=True)
|
| 236 |
+
|
| 237 |
+
with open(CACHE_FILE, 'w', encoding='utf-8') as f:
|
| 238 |
+
json.dump(ai_cache, f, ensure_ascii=False, indent=2)
|
| 239 |
+
logger.info(f"Cache saved to: {CACHE_FILE}")
|
| 240 |
+
except PermissionError as pe:
|
| 241 |
+
logger.error(f"Permission denied writing to cache: {pe}")
|
| 242 |
+
except Exception as e:
|
| 243 |
+
logger.error(f"Error saving cache: {e}")
|
| 244 |
+
|
| 245 |
+
def load_cache():
|
| 246 |
+
global ai_cache
|
| 247 |
+
try:
|
| 248 |
+
if os.path.exists(CACHE_FILE):
|
| 249 |
+
with open(CACHE_FILE, 'r', encoding='utf-8') as f:
|
| 250 |
+
ai_cache = json.load(f)
|
| 251 |
+
logger.info(f"Loaded {len(ai_cache)} cached translations from: {CACHE_FILE}")
|
| 252 |
+
except Exception as e:
|
| 253 |
+
logger.error(f"Error loading cache: {e}")
|
| 254 |
+
ai_cache = {}
|
| 255 |
+
|
| 256 |
+
# Add translate endpoint
|
| 257 |
+
@app.get("/translate/{word}")
|
| 258 |
+
async def translate_word(word: str):
|
| 259 |
+
start_time = time.time()
|
| 260 |
+
logger.info(f"Translation request received for word: {word}")
|
| 261 |
+
|
| 262 |
+
try:
|
| 263 |
+
word = word.lower().strip()
|
| 264 |
+
logger.debug(f"Processed word: {word}")
|
| 265 |
+
|
| 266 |
+
# Check word map
|
| 267 |
+
# if word in word_map:
|
| 268 |
+
# logger.info(f"Word found in map: {word}")
|
| 269 |
+
# word_data = word_map[word]
|
| 270 |
+
# logger.debug(f"Word data: {word_data}")
|
| 271 |
+
|
| 272 |
+
# # Format all translations
|
| 273 |
+
# translations_text = []
|
| 274 |
+
# for trans in word_data.translations:
|
| 275 |
+
# translation = trans['translation']
|
| 276 |
+
# type_info = trans['type']
|
| 277 |
+
# translations_text.append(f"({type_info}) {translation}")
|
| 278 |
+
|
| 279 |
+
# # Join translations with separators
|
| 280 |
+
# translations_combined = " | ".join(translations_text)
|
| 281 |
+
# logger.debug(f"Combined translations: {translations_combined}")
|
| 282 |
+
|
| 283 |
+
# # Handle examples
|
| 284 |
+
# examples = []
|
| 285 |
+
# if word_data.phrases:
|
| 286 |
+
# examples = [f"{p.phrase}: {p.translation}" for p in word_data.phrases[:3]]
|
| 287 |
+
# logger.debug(f"Examples found: {examples}")
|
| 288 |
+
|
| 289 |
+
# # Build response with proper formatting
|
| 290 |
+
# formatted_response = f"{word} [{word_data.level}]: {translations_combined}"
|
| 291 |
+
# if examples:
|
| 292 |
+
# formatted_response += f"\n\n例句:\n{chr(10).join(examples)}"
|
| 293 |
+
|
| 294 |
+
# elapsed = time.time() - start_time
|
| 295 |
+
# logger.info(f"Word map translation completed in {elapsed:.2f}s")
|
| 296 |
+
|
| 297 |
+
# return {
|
| 298 |
+
# "status": 200,
|
| 299 |
+
# "data": {
|
| 300 |
+
# "response": formatted_response,
|
| 301 |
+
# "word": word,
|
| 302 |
+
# "level": word_data.level, # Add level info here
|
| 303 |
+
# "translations": word_data.translations,
|
| 304 |
+
# "examples": examples
|
| 305 |
+
# }
|
| 306 |
+
# }
|
| 307 |
+
|
| 308 |
+
# Check AI cache
|
| 309 |
+
# if word in ai_cache:
|
| 310 |
+
# logger.info(f"Word found in AI cache: {word}")
|
| 311 |
+
# elapsed = time.time() - start_time
|
| 312 |
+
# logger.info(f"Cache hit completed in {elapsed:.2f}s")
|
| 313 |
+
# return ai_cache[word]
|
| 314 |
+
|
| 315 |
+
# Fallback to AI translation
|
| 316 |
+
logger.info("Word not found in cache, calling AI API")
|
| 317 |
+
# Fallback to AI translation
|
| 318 |
+
logger.info("Word not found in map, falling back to AI translation")
|
| 319 |
+
try:
|
| 320 |
+
request = AskRequest(
|
| 321 |
+
prompt=f'''翻译以下英文
|
| 322 |
+
{word}
|
| 323 |
+
每行一个 格式参考,不要任何md格式,分别要有音标,单词属性(名词,动词,形容词),中文翻译,英文解析,例句,近义词,反义词,词性
|
| 324 |
+
格式参考:
|
| 325 |
+
hello:/həˈləʊ/| n. vt. int.|你好,问候语,|例句:Hello, how are you? 你好,你好吗?|近义词:hi, hey, |反义词:sick, bad.''',
|
| 326 |
+
model="GEMINI"
|
| 327 |
+
)
|
| 328 |
+
logger.debug(f"AI Request: {request}")
|
| 329 |
+
|
| 330 |
+
result = await forward_ask(request)
|
| 331 |
+
|
| 332 |
+
# Cache the result
|
| 333 |
+
#ai_cache[word] = result
|
| 334 |
+
#save_cache()
|
| 335 |
+
logger.debug(f"AI Response: {result}")
|
| 336 |
+
|
| 337 |
+
elapsed = time.time() - start_time
|
| 338 |
+
logger.info(f"AI translation completed in {elapsed:.2f}s")
|
| 339 |
+
return result
|
| 340 |
+
|
| 341 |
+
except Exception as e:
|
| 342 |
+
logger.error(f"AI translation error: {str(e)}", exc_info=True)
|
| 343 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 344 |
+
|
| 345 |
+
except Exception as e:
|
| 346 |
+
logger.error(f"Translation error: {str(e)}", exc_info=True)
|
| 347 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 348 |
+
|
| 349 |
+
# Add cleanup functions
|
| 350 |
+
def cleanup_temp_files():
|
| 351 |
+
try:
|
| 352 |
+
# Clean temp directory
|
| 353 |
+
temp_dir = os.path.join(tempfile.gettempdir(), "flash_api_cache")
|
| 354 |
+
if os.path.exists(temp_dir):
|
| 355 |
+
shutil.rmtree(temp_dir)
|
| 356 |
+
logger.info(f"Cleaned up temp directory: {temp_dir}")
|
| 357 |
+
except Exception as e:
|
| 358 |
+
logger.error(f"Error cleaning temp files: {e}")
|
| 359 |
+
|
| 360 |
+
def cleanup_cache():
|
| 361 |
+
global ai_cache
|
| 362 |
+
ai_cache = {}
|
| 363 |
+
logger.info("Cache cleared")
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
# Initialize word map on startup
|
| 367 |
+
@app.on_event("startup")
|
| 368 |
+
async def startup_event():
|
| 369 |
+
#init_word_map()// waste of memory
|
| 370 |
+
#load_cache()
|
| 371 |
+
cleanup_temp_files()
|
| 372 |
+
cleanup_cache()
|
| 373 |
+
logger.info(f"Memory usage after init: {get_memory_usage()}")
|
| 374 |
+
|
| 375 |
+
@app.on_event("shutdown")
|
| 376 |
+
async def shutdown_event():
|
| 377 |
+
# Cleanup on shutdown
|
| 378 |
+
cleanup_temp_files()
|
| 379 |
+
cleanup_cache()
|
| 380 |
+
logger.info("Application shutdown cleanup complete")
|
| 381 |
+
def get_memory_usage():
|
| 382 |
+
process = psutil.Process()
|
| 383 |
+
memory_info = process.memory_info()
|
| 384 |
+
|
| 385 |
+
# Get system memory info
|
| 386 |
+
system = psutil.virtual_memory()
|
| 387 |
+
|
| 388 |
+
return {
|
| 389 |
+
"process": {
|
| 390 |
+
"rss": f"{memory_info.rss / 1024 / 1024:.2f} MB",
|
| 391 |
+
"rss_percent": f"{memory_info.rss / system.total * 100:.2f}%",
|
| 392 |
+
"vms": f"{memory_info.vms / 1024 / 1024:.2f} MB",
|
| 393 |
+
"vms_percent": f"{memory_info.vms / system.total * 100:.2f}%"
|
| 394 |
+
},
|
| 395 |
+
"system": {
|
| 396 |
+
"total": f"{system.total / 1024 / 1024:.2f} MB",
|
| 397 |
+
"available": f"{system.available / 1024 / 1024:.2f} MB",
|
| 398 |
+
"used_percent": f"{system.percent:.2f}%"
|
| 399 |
+
},
|
| 400 |
+
"word_map": {
|
| 401 |
+
"entries": len(word_map),
|
| 402 |
+
"memory": f"{sys.getsizeof(word_map) / 1024 / 1024:.2f} MB",
|
| 403 |
+
"memory_percent": f"{sys.getsizeof(word_map) / system.total * 100:.4f}%"
|
| 404 |
+
}
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
@app.get("/memory")
|
| 408 |
+
async def memory_status():
|
| 409 |
+
return get_memory_usage()
|
| 410 |
+
|
| 411 |
+
# Add new endpoint
|
| 412 |
+
@app.get("/proxy")
|
| 413 |
+
async def proxy_request(url: str, request: Request):
|
| 414 |
+
try:
|
| 415 |
+
# Get random user agent
|
| 416 |
+
user_agent = random.choice(USER_AGENTS)
|
| 417 |
+
|
| 418 |
+
#print url
|
| 419 |
+
|
| 420 |
+
logger.info(f"Proxy request received for: {url}")
|
| 421 |
+
|
| 422 |
+
# Prepare headers
|
| 423 |
+
headers = {
|
| 424 |
+
'User-Agent': user_agent,
|
| 425 |
+
'Accept': 'application/json, text/plain, */*',
|
| 426 |
+
'Accept-Language': 'en-US,en;q=0.9',
|
| 427 |
+
'Origin': 'https://www.youtube.com',
|
| 428 |
+
'Referer': 'https://www.youtube.com/',
|
| 429 |
+
'Sec-Fetch-Dest': 'empty',
|
| 430 |
+
'Sec-Fetch-Mode': 'cors',
|
| 431 |
+
'Sec-Fetch-Site': 'same-site',
|
| 432 |
+
'Connection': 'keep-alive'
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
# Set timeout
|
| 436 |
+
timeout = aiohttp.ClientTimeout(total=10) # 10 seconds timeout
|
| 437 |
+
|
| 438 |
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
| 439 |
+
async with session.get(url, headers=headers) as response:
|
| 440 |
+
# Check HTTP status
|
| 441 |
+
if response.status != 200:
|
| 442 |
+
raise HTTPException(
|
| 443 |
+
status_code=response.status,
|
| 444 |
+
detail=f"HTTP error: {response.status}"
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
# Parse JSON response
|
| 448 |
+
data = await response.json()
|
| 449 |
+
|
| 450 |
+
#print data's length
|
| 451 |
+
|
| 452 |
+
logger.info(f"Received youtube subtile data: {len(data)} bytes")
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
# Validate data format
|
| 456 |
+
if not data or 'events' not in data:
|
| 457 |
+
raise HTTPException(
|
| 458 |
+
status_code=400,
|
| 459 |
+
detail="Invalid subtitle data format"
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
return data
|
| 463 |
+
|
| 464 |
+
except TimeoutError:
|
| 465 |
+
raise HTTPException(status_code=408, detail="Request timeout")
|
| 466 |
+
except Exception as e:
|
| 467 |
+
logger.error(f"Proxy error: {str(e)}")
|
| 468 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 469 |
+
|
| 470 |
+
if __name__ == "__main__":
|
| 471 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
restapi.js
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import express from 'express';
|
| 2 |
+
import multer from 'multer';
|
| 3 |
+
import { GoogleGenerativeAI } from "@google/generative-ai";
|
| 4 |
+
import { HarmCategory, HarmBlockThreshold } from '@google/generative-ai';
|
| 5 |
+
import { Mistral } from "@mistralai/mistralai";
|
| 6 |
+
import dotenv from "dotenv";
|
| 7 |
+
import sharp from 'sharp';
|
| 8 |
+
import rateLimit from 'express-rate-limit';
|
| 9 |
+
dotenv.config();
|
| 10 |
+
|
| 11 |
+
// Configure rate limiter
|
| 12 |
+
const limiter = rateLimit({
|
| 13 |
+
windowMs: 60 * 1000, // 1 minute
|
| 14 |
+
max: 10, // 1 request per window
|
| 15 |
+
message: {
|
| 16 |
+
status: 429,
|
| 17 |
+
error: "Too many requests, please try again after 1 minute"
|
| 18 |
+
},
|
| 19 |
+
standardHeaders: true,
|
| 20 |
+
legacyHeaders: false
|
| 21 |
+
});
|
| 22 |
+
|
| 23 |
+
const app = express();
|
| 24 |
+
const upload = multer({ storage: multer.memoryStorage() });
|
| 25 |
+
const port = 9081;
|
| 26 |
+
|
| 27 |
+
// Add after imports
|
| 28 |
+
let requestCounter = {
|
| 29 |
+
analyze: 0,
|
| 30 |
+
compareAnalyze: 0,
|
| 31 |
+
total: 0
|
| 32 |
+
};
|
| 33 |
+
// Model type enum
|
| 34 |
+
const ModelType = {
|
| 35 |
+
GEMINI: 'GEMINI',
|
| 36 |
+
MIXTRAL: 'MIXTRAL',
|
| 37 |
+
GEMINI_THINKING: 'GEMINI_THINKING'
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
class ImageAnalysisClient {
|
| 41 |
+
constructor() {
|
| 42 |
+
this.init();
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
init() {
|
| 46 |
+
// Initialize Gemini
|
| 47 |
+
// Initialize Gemini models
|
| 48 |
+
const geminiApiKey = process.env.API_KEY6;
|
| 49 |
+
const geminiThinkingApiKey = process.env.API_KEY5;
|
| 50 |
+
if (!geminiApiKey) throw new Error("Gemini API_KEY not found");
|
| 51 |
+
if (!geminiThinkingApiKey) throw new Error("Gemini Thinking API_KEY not found");
|
| 52 |
+
|
| 53 |
+
this.genAI = new GoogleGenerativeAI(geminiApiKey);
|
| 54 |
+
this.genAIThinking = new GoogleGenerativeAI(geminiThinkingApiKey);
|
| 55 |
+
|
| 56 |
+
// Initialize Mixtral
|
| 57 |
+
const mixtralApiKey = process.env.API_KEY_MIXTRAL12;
|
| 58 |
+
if (!mixtralApiKey) throw new Error("Mixtral API_KEY not found");
|
| 59 |
+
this.mistral = new Mistral({ apiKey: mixtralApiKey });
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
async analyzeImage(imageBuffer, modelType) {
|
| 63 |
+
const processedImageBuffer = await sharp(imageBuffer)
|
| 64 |
+
.grayscale()
|
| 65 |
+
.jpeg({ quality: 100, progressive: true })
|
| 66 |
+
.toBuffer();
|
| 67 |
+
const base64Image = processedImageBuffer.toString('base64');
|
| 68 |
+
|
| 69 |
+
const prompt = `Analyze the image for production date and expiration date. Return in JSON format.
|
| 70 |
+
|
| 71 |
+
Rules:
|
| 72 |
+
- Only extract dates that are explicitly labeled or clearly marked
|
| 73 |
+
- If no clear production date or manufacturing date is found, set production_date to null
|
| 74 |
+
- If no clear expiration date or 保质期 or 质期 is found, set expiration_date to null
|
| 75 |
+
- Do not make assumptions or guess dates EXCEPT:
|
| 76 |
+
* If only one date is found with no label:
|
| 77 |
+
- If date is future (after ${new Date().toISOString().split('T')[0]}), set as expiration_date
|
| 78 |
+
- If date is past, set as production_date
|
| 79 |
+
- Date format must be YYYY.MM.DD when found
|
| 80 |
+
- Production date and expiration date cannot be the same day
|
| 81 |
+
|
| 82 |
+
Example responses:
|
| 83 |
+
Case 1 - Labeled dates:
|
| 84 |
+
{
|
| 85 |
+
"production_date": "2024.08.20",
|
| 86 |
+
"expiration_date": "2026.08.20",
|
| 87 |
+
"production_id": null,
|
| 88 |
+
"additional_info": null
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
Case 2 - Single unlabeled future date:
|
| 92 |
+
{
|
| 93 |
+
"production_date": null,
|
| 94 |
+
"expiration_date": "2025.04.01", // Future date assumed as expiration
|
| 95 |
+
"production_id": null,
|
| 96 |
+
"additional_info": "Single unlabeled date found"
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
Case 3 - Single unlabeled past date:
|
| 100 |
+
{
|
| 101 |
+
"production_date": "2023.04.01", // Past date assumed as production
|
| 102 |
+
"expiration_date": null,
|
| 103 |
+
"production_id": null,
|
| 104 |
+
"additional_info": "Single unlabeled date found"
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
Important: Return null for any field where the information is not explicitly visible in the image.`;
|
| 108 |
+
|
| 109 |
+
try {
|
| 110 |
+
if (modelType === ModelType.GEMINI) {
|
| 111 |
+
return await this.analyzeWithGemini(base64Image, prompt);
|
| 112 |
+
} else if (modelType === ModelType.GEMINI_THINKING) {
|
| 113 |
+
return await this.analyzeWithGeminiThinking(base64Image, prompt);
|
| 114 |
+
} else {
|
| 115 |
+
return await this.analyzeWithMixtral(base64Image, prompt);
|
| 116 |
+
}
|
| 117 |
+
} catch (error) {
|
| 118 |
+
console.error(`Error analyzing with ${modelType}:`, error);
|
| 119 |
+
throw error;
|
| 120 |
+
}
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
async analyzeWithGemini(base64Image, prompt) {
|
| 124 |
+
const model = this.genAI.getGenerativeModel({ model: "gemini-2.0-flash-exp" });
|
| 125 |
+
const result = await model.generateContent([
|
| 126 |
+
{ text: prompt },
|
| 127 |
+
{
|
| 128 |
+
inlineData: {
|
| 129 |
+
data: base64Image,
|
| 130 |
+
mimeType: "image/jpeg"
|
| 131 |
+
}
|
| 132 |
+
}
|
| 133 |
+
]);
|
| 134 |
+
|
| 135 |
+
const text = result.response.text();
|
| 136 |
+
const jsonMatch = text.match(/```json\s*([\s\S]*?)\s*```/);
|
| 137 |
+
if (jsonMatch) {
|
| 138 |
+
return JSON.parse(jsonMatch[1]);
|
| 139 |
+
}
|
| 140 |
+
throw new Error("No JSON content found in Gemini response");
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
async analyzeWithMixtral(base64Image, prompt) {
|
| 144 |
+
try {
|
| 145 |
+
const result = await this.mistral.chat.stream({
|
| 146 |
+
model: "pixtral-large-latest",
|
| 147 |
+
messages: [
|
| 148 |
+
{
|
| 149 |
+
role: "user",
|
| 150 |
+
content: [
|
| 151 |
+
{ type: "text", text: prompt },
|
| 152 |
+
{
|
| 153 |
+
type: "image_url",
|
| 154 |
+
imageUrl: `data:image/jpeg;base64,${base64Image}`,
|
| 155 |
+
},
|
| 156 |
+
]
|
| 157 |
+
}
|
| 158 |
+
],
|
| 159 |
+
max_tokens: 1024,
|
| 160 |
+
temperature: 0.8,
|
| 161 |
+
});
|
| 162 |
+
|
| 163 |
+
let response = "";
|
| 164 |
+
for await (const chunk of result) {
|
| 165 |
+
response += chunk.data.choices[0].delta.content;
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
const jsonMatch = response.match(/```json\s*([\s\S]*?)\s*```/);
|
| 169 |
+
if (jsonMatch) {
|
| 170 |
+
return JSON.parse(jsonMatch[1]);
|
| 171 |
+
}
|
| 172 |
+
return {
|
| 173 |
+
production_date: null,
|
| 174 |
+
expiration_date: null,
|
| 175 |
+
production_id: null,
|
| 176 |
+
additional_info: "Error: No valid JSON found in Mixtral response"
|
| 177 |
+
};
|
| 178 |
+
} catch (error) {
|
| 179 |
+
console.error("Mixtral API error:", error);
|
| 180 |
+
return {
|
| 181 |
+
production_date: null,
|
| 182 |
+
expiration_date: null,
|
| 183 |
+
production_id: null,
|
| 184 |
+
additional_info: `Mixtral Error: ${error.message}`
|
| 185 |
+
};
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
async analyzeWithGeminiThinking(base64Image, prompt) {
|
| 190 |
+
const model = this.genAIThinking.getGenerativeModel({ model: "gemini-2.0-flash-thinking-exp-1219" });
|
| 191 |
+
const result = await model.generateContent([
|
| 192 |
+
{ text: prompt },
|
| 193 |
+
{
|
| 194 |
+
inlineData: {
|
| 195 |
+
data: base64Image,
|
| 196 |
+
mimeType: "image/jpeg"
|
| 197 |
+
}
|
| 198 |
+
}
|
| 199 |
+
]);
|
| 200 |
+
|
| 201 |
+
const text = result.response.text();
|
| 202 |
+
const jsonMatch = text.match(/```json\s*([\s\S]*?)\s*```/);
|
| 203 |
+
if (jsonMatch) {
|
| 204 |
+
return JSON.parse(jsonMatch[1]);
|
| 205 |
+
}
|
| 206 |
+
throw new Error("No JSON content found in Gemini Thinking response");
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
async ask(prompt, modelType) {
|
| 210 |
+
try {
|
| 211 |
+
if (modelType === ModelType.GEMINI || modelType === ModelType.GEMINI_THINKING) {
|
| 212 |
+
const genAI = modelType === ModelType.GEMINI ? this.genAI : this.genAIThinking;
|
| 213 |
+
const modelName = modelType === ModelType.GEMINI ? "gemini-2.0-flash-exp" : "gemini-2.0-flash-thinking-exp-1219";
|
| 214 |
+
const model = genAI.getGenerativeModel({ model: modelName });
|
| 215 |
+
|
| 216 |
+
const chat = model.startChat({
|
| 217 |
+
generationConfig: {
|
| 218 |
+
maxOutputTokens: 8192,
|
| 219 |
+
temperature: 1,
|
| 220 |
+
},
|
| 221 |
+
safetySettings: [
|
| 222 |
+
{ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold: HarmBlockThreshold.BLOCK_NONE },
|
| 223 |
+
{ category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold: HarmBlockThreshold.BLOCK_NONE },
|
| 224 |
+
{ category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: HarmBlockThreshold.BLOCK_NONE },
|
| 225 |
+
{ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_NONE },
|
| 226 |
+
]
|
| 227 |
+
});
|
| 228 |
+
|
| 229 |
+
let totalResponse = "";
|
| 230 |
+
const result = await chat.sendMessageStream(prompt);
|
| 231 |
+
for await (const chunk of result.stream) {
|
| 232 |
+
const chunkText = chunk.text();
|
| 233 |
+
totalResponse += chunkText;
|
| 234 |
+
}
|
| 235 |
+
return { response: totalResponse };
|
| 236 |
+
} else {
|
| 237 |
+
// Mixtral handling
|
| 238 |
+
const result = await this.mistral.chat.stream({
|
| 239 |
+
model: "mistral-large-latest",
|
| 240 |
+
messages: [{ role: "user", content: prompt }],
|
| 241 |
+
max_tokens: 1024*128,
|
| 242 |
+
temperature: 0.8,
|
| 243 |
+
});
|
| 244 |
+
|
| 245 |
+
let response = "";
|
| 246 |
+
for await (const chunk of result) {
|
| 247 |
+
response += chunk.data.choices[0].delta.content;
|
| 248 |
+
}
|
| 249 |
+
return { response };
|
| 250 |
+
}
|
| 251 |
+
} catch (error) {
|
| 252 |
+
if (error.toString().includes("Too Many Requests") ||
|
| 253 |
+
error.toString().includes("Please try again later")) {
|
| 254 |
+
throw new Error("Rate limit exceeded, please try again later");
|
| 255 |
+
}
|
| 256 |
+
console.error(`Error in ${modelType} ask:`, error);
|
| 257 |
+
throw error;
|
| 258 |
+
}
|
| 259 |
+
}
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
const client = new ImageAnalysisClient();
|
| 263 |
+
|
| 264 |
+
app.post('/analyze', limiter,upload.single('image'), async (req, res) => {
|
| 265 |
+
requestCounter.analyze++;
|
| 266 |
+
requestCounter.total++;
|
| 267 |
+
try {
|
| 268 |
+
if (!req.file) {
|
| 269 |
+
return res.status(400).json({ status: 400, error: "No image file provided" });
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
const modelType = req.body.model?.toUpperCase();
|
| 273 |
+
if (!ModelType[modelType]) {
|
| 274 |
+
return res.status(400).json({ status: 400, error: "Invalid model type. Use GEMINI or MIXTRAL" });
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
const result = await client.analyzeImage(req.file.buffer, modelType);
|
| 278 |
+
res.json({ status: 200, data: result });
|
| 279 |
+
} catch (error) {
|
| 280 |
+
console.error("Analysis error:", error);
|
| 281 |
+
res.status(500).json({ status: 500, error: error.message });
|
| 282 |
+
}
|
| 283 |
+
});
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
// Update HTML form
|
| 287 |
+
app.get('/check', limiter, (req, res) => {
|
| 288 |
+
res.send(`
|
| 289 |
+
<html>
|
| 290 |
+
<body>
|
| 291 |
+
<h1>AI API Service</h1>
|
| 292 |
+
<h2>API Endpoints:</h2>
|
| 293 |
+
<ul>
|
| 294 |
+
<li>POST /analyze - Upload image for analysis</li>
|
| 295 |
+
<li>POST /ask - Ask AI a question</li>
|
| 296 |
+
<li>GET /status - Check API status</li>
|
| 297 |
+
</ul>
|
| 298 |
+
|
| 299 |
+
<h2>Image Analysis Form:</h2>
|
| 300 |
+
<form action="/analyze" method="post" enctype="multipart/form-data">
|
| 301 |
+
<p>Select image file: <input type="file" name="image" accept="image/*" required></p>
|
| 302 |
+
<p>Select model:
|
| 303 |
+
<select name="model" required>
|
| 304 |
+
<option value="GEMINI">GEMINI</option>
|
| 305 |
+
<option value="MIXTRAL">MIXTRAL</option>
|
| 306 |
+
<option value="GEMINI_THINKING">GEMINI THINKING</option>
|
| 307 |
+
</select>
|
| 308 |
+
</p>
|
| 309 |
+
<input type="submit" value="Analyze">
|
| 310 |
+
</form>
|
| 311 |
+
|
| 312 |
+
<h2>Ask AI Form:</h2>
|
| 313 |
+
<form id="askForm">
|
| 314 |
+
<p>Question: <input type="text" id="prompt" required style="width:300px"></p>
|
| 315 |
+
<p>Select model:
|
| 316 |
+
<select id="model" required>
|
| 317 |
+
<option value="GEMINI">GEMINI</option>
|
| 318 |
+
<option value="MIXTRAL">MIXTRAL</option>
|
| 319 |
+
<option value="GEMINI_THINKING">GEMINI THINKING</option>
|
| 320 |
+
</select>
|
| 321 |
+
</p>
|
| 322 |
+
<button type="submit">Ask</button>
|
| 323 |
+
<pre id="result"></pre>
|
| 324 |
+
</form>
|
| 325 |
+
|
| 326 |
+
<script>
|
| 327 |
+
document.getElementById('askForm').onsubmit = async (e) => {
|
| 328 |
+
e.preventDefault();
|
| 329 |
+
const response = await fetch('/ask', {
|
| 330 |
+
method: 'POST',
|
| 331 |
+
headers: {'Content-Type': 'application/json'},
|
| 332 |
+
body: JSON.stringify({
|
| 333 |
+
prompt: document.getElementById('prompt').value,
|
| 334 |
+
model: document.getElementById('model').value
|
| 335 |
+
})
|
| 336 |
+
});
|
| 337 |
+
const data = await response.json();
|
| 338 |
+
document.getElementById('result').textContent =
|
| 339 |
+
JSON.stringify(data, null, 2);
|
| 340 |
+
};
|
| 341 |
+
</script>
|
| 342 |
+
</body>
|
| 343 |
+
</html>
|
| 344 |
+
`);
|
| 345 |
+
});
|
| 346 |
+
|
| 347 |
+
app.post('/compareAnalyze',limiter, upload.single('image'), async (req, res) => {
|
| 348 |
+
requestCounter.compareAnalyze++;
|
| 349 |
+
requestCounter.total++;
|
| 350 |
+
try {
|
| 351 |
+
if (!req.file) {
|
| 352 |
+
return res.status(400).json({ status: 400, error: "No image file provided" });
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
const [geminiResult, mixtralResult, geminiThinkingResult] = await Promise.all([
|
| 356 |
+
client.analyzeImage(req.file.buffer, ModelType.GEMINI)
|
| 357 |
+
.catch(error => ({
|
| 358 |
+
production_date: null,
|
| 359 |
+
expiration_date: null,
|
| 360 |
+
production_id: null,
|
| 361 |
+
additional_info: null
|
| 362 |
+
})),
|
| 363 |
+
client.analyzeImage(req.file.buffer, ModelType.MIXTRAL)
|
| 364 |
+
.catch(error => ({
|
| 365 |
+
production_date: null,
|
| 366 |
+
expiration_date: null,
|
| 367 |
+
production_id: null,
|
| 368 |
+
additional_info: null
|
| 369 |
+
})),
|
| 370 |
+
client.analyzeImage(req.file.buffer, ModelType.GEMINI_THINKING)
|
| 371 |
+
.catch(error => ({
|
| 372 |
+
production_date: null,
|
| 373 |
+
expiration_date: null,
|
| 374 |
+
production_id: null,
|
| 375 |
+
additional_info: null
|
| 376 |
+
}))
|
| 377 |
+
]);
|
| 378 |
+
|
| 379 |
+
res.json({
|
| 380 |
+
status: 200,
|
| 381 |
+
datas: [geminiResult, mixtralResult, geminiThinkingResult]
|
| 382 |
+
});
|
| 383 |
+
} catch (error) {
|
| 384 |
+
console.error("Comparison analysis error:", error);
|
| 385 |
+
res.status(500).json({
|
| 386 |
+
status: 500,
|
| 387 |
+
error: 'Unknown error, please contact the administrator'
|
| 388 |
+
});
|
| 389 |
+
}
|
| 390 |
+
});
|
| 391 |
+
// Add status endpoint
|
| 392 |
+
// Update status endpoint
|
| 393 |
+
app.get('/status', (req, res) => {
|
| 394 |
+
res.json({
|
| 395 |
+
status: "running",
|
| 396 |
+
models: "model",
|
| 397 |
+
version: "1.0.0",
|
| 398 |
+
copyright: "sonygod",
|
| 399 |
+
requests: {
|
| 400 |
+
f1: requestCounter.analyze,
|
| 401 |
+
f2: requestCounter.compareAnalyze,
|
| 402 |
+
total: requestCounter.total
|
| 403 |
+
}
|
| 404 |
+
});
|
| 405 |
+
});
|
| 406 |
+
|
| 407 |
+
app.post('/ask', limiter, express.json(), async (req, res) => {
|
| 408 |
+
requestCounter.total++;
|
| 409 |
+
try {
|
| 410 |
+
const { prompt, model } = req.body;
|
| 411 |
+
|
| 412 |
+
if (!prompt) {
|
| 413 |
+
return res.status(400).json({
|
| 414 |
+
status: 400,
|
| 415 |
+
error: "No prompt provided"
|
| 416 |
+
});
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
const modelType = model?.toUpperCase();
|
| 420 |
+
if (!ModelType[modelType]) {
|
| 421 |
+
return res.status(400).json({
|
| 422 |
+
status: 400,
|
| 423 |
+
error: "Invalid model type. Use GEMINI, MIXTRAL, or GEMINI_THINKING"
|
| 424 |
+
});
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
const result = await client.ask(prompt, modelType);
|
| 428 |
+
res.json({ status: 200, data: result });
|
| 429 |
+
} catch (error) {
|
| 430 |
+
console.error("Ask error:", error);
|
| 431 |
+
res.status(500).json({ status: 500, error: error.message });
|
| 432 |
+
}
|
| 433 |
+
});
|
| 434 |
+
|
| 435 |
+
// Change server binding
|
| 436 |
+
app.listen(port, '0.0.0.0', () => {
|
| 437 |
+
console.log(`Server running on port ${port} (0.0.0.0)`);
|
| 438 |
+
});
|
tunner.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
import subprocess
|
| 3 |
+
from importlib import import_module
|
| 4 |
+
import time
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
message = """
|
| 8 |
+
- Ready!
|
| 9 |
+
- Open VSCode on your laptop and open the command prompt
|
| 10 |
+
- Select: 'Remote-Tunnels: Connect to Tunnel' to connect to colab
|
| 11 |
+
""".strip()
|
| 12 |
+
|
| 13 |
+
dir_path = "/etc/noteable/project"
|
| 14 |
+
local_folder = '/etc/noteable/project'
|
| 15 |
+
|
| 16 |
+
def start_tunnel() -> None:
|
| 17 |
+
command = f"cd {dir_path} &&./code tunnel --accept-server-license-terms --random-name"
|
| 18 |
+
p = subprocess.Popen(
|
| 19 |
+
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
show_outputs = False
|
| 23 |
+
while True:
|
| 24 |
+
line = p.stdout.readline().decode("utf-8")
|
| 25 |
+
if show_outputs:
|
| 26 |
+
print(line.strip())
|
| 27 |
+
if "To grant access to the server" in line:
|
| 28 |
+
print(line.strip())
|
| 29 |
+
if "Open this link" in line:
|
| 30 |
+
print("Starting the tunnel")
|
| 31 |
+
time.sleep(5)
|
| 32 |
+
print(message)
|
| 33 |
+
print("Logs:")
|
| 34 |
+
show_outputs = True
|
| 35 |
+
line = ""
|
| 36 |
+
if line == "" and p.poll() is not None:
|
| 37 |
+
break
|
| 38 |
+
return None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def run(command: str) -> None:
|
| 42 |
+
process = subprocess.run(command.split())
|
| 43 |
+
if process.returncode == 0:
|
| 44 |
+
print(f"Ran: {command}")
|
| 45 |
+
|
| 46 |
+
def connect() -> None:
|
| 47 |
+
# Create a folder to store all the code files
|
| 48 |
+
Path(local_folder).mkdir(parents=True, exist_ok=True)
|
| 49 |
+
|
| 50 |
+
print("Installing python libraries...")
|
| 51 |
+
run("pip3 install --user flake8 black ipywidgets twine")
|
| 52 |
+
run("pip3 install -U ipykernel")
|
| 53 |
+
run("apt install htop -y")
|
| 54 |
+
|
| 55 |
+
print("Installing vscode-cli...")
|
| 56 |
+
run(
|
| 57 |
+
f"curl -Lk https://code.visualstudio.com/sha/download?build=stable&os=cli-alpine-x64 -o {dir_path}/vscode_cli.tar.gz"
|
| 58 |
+
)
|
| 59 |
+
run(f"tar -xf {dir_path}/vscode_cli.tar.gz -C {dir_path}")
|
| 60 |
+
|
| 61 |
+
print("Starting the tunnel")
|
| 62 |
+
start_tunnel()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# To run the function, simply call: connect()
|
| 66 |
+
|
| 67 |
+
connect()
|