Update app.py
Browse files
app.py
CHANGED
|
@@ -9,21 +9,17 @@ import os
|
|
| 9 |
import logging
|
| 10 |
import time
|
| 11 |
from datetime import datetime
|
| 12 |
-
from concurrent.futures import ThreadPoolExecutor
|
| 13 |
from html.parser import HTMLParser
|
| 14 |
-
from fastapi import FastAPI, Request
|
| 15 |
from fastapi.responses import JSONResponse
|
| 16 |
import uvicorn
|
| 17 |
|
| 18 |
-
# Setup logging
|
| 19 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
|
| 20 |
logger = logging.getLogger(__name__)
|
| 21 |
|
| 22 |
-
#
|
| 23 |
-
logger.info("Loading
|
| 24 |
whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
|
| 25 |
-
|
| 26 |
-
logger.info("Loading SmolLM2-360M-Instruct...")
|
| 27 |
model_name = "HuggingFaceTB/SmolLM2-360M-Instruct"
|
| 28 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 29 |
model = AutoModelForCausalLM.from_pretrained(
|
|
@@ -32,14 +28,10 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 32 |
device_map="cpu",
|
| 33 |
low_cpu_mem_usage=True
|
| 34 |
)
|
| 35 |
-
|
| 36 |
-
logger.info("All models loaded!")
|
| 37 |
-
|
| 38 |
-
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY', '')
|
| 39 |
-
BRAVE_API_KEY = os.getenv('BRAVE_API_KEY', '')
|
| 40 |
|
| 41 |
def search_parallel(query):
|
| 42 |
-
"""
|
| 43 |
logger.info("[SEARCH] Starting...")
|
| 44 |
try:
|
| 45 |
response = requests.get(
|
|
@@ -81,7 +73,7 @@ def search_parallel(query):
|
|
| 81 |
return "No search results", "None"
|
| 82 |
|
| 83 |
def generate_answer(text_input):
|
| 84 |
-
"""
|
| 85 |
logger.info(f"[AI] Question: {text_input[:60]}...")
|
| 86 |
|
| 87 |
try:
|
|
@@ -90,12 +82,10 @@ def generate_answer(text_input):
|
|
| 90 |
|
| 91 |
current_date = datetime.now().strftime("%B %d, %Y")
|
| 92 |
|
| 93 |
-
# Search
|
| 94 |
search_start = time.time()
|
| 95 |
search_results, search_engine = search_parallel(text_input)
|
| 96 |
logger.info(f"[AI] Search: {time.time()-search_start:.2f}s")
|
| 97 |
|
| 98 |
-
# Generate
|
| 99 |
messages = [
|
| 100 |
{"role": "system", "content": f"Today is {current_date}. Answer briefly using search results (60-80 words)."},
|
| 101 |
{"role": "user", "content": f"Search:\n{search_results}\n\nQ: {text_input}\nA:"}
|
|
@@ -127,101 +117,86 @@ def generate_answer(text_input):
|
|
| 127 |
logger.error(f"[AI] Error: {str(e)}")
|
| 128 |
return f"Error: {str(e)}"
|
| 129 |
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
|
|
|
|
|
|
|
|
|
| 133 |
try:
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
|
| 137 |
-
temp_audio.write(audio_bytes)
|
| 138 |
-
temp_path = temp_audio.name
|
| 139 |
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
os.unlink(temp_path)
|
| 143 |
|
| 144 |
-
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
-
|
| 148 |
-
logger.error(f"[STT] Error: {str(e)}")
|
| 149 |
-
return ""
|
| 150 |
-
|
| 151 |
-
# Create FastAPI app for Pluely endpoints
|
| 152 |
-
app = FastAPI()
|
| 153 |
-
|
| 154 |
-
@app.post("/api/stt")
|
| 155 |
-
async def api_stt(request: Request):
|
| 156 |
-
"""Direct STT endpoint for Pluely"""
|
| 157 |
-
try:
|
| 158 |
-
body = await request.json()
|
| 159 |
-
logger.info(f"[API STT] Received: {body}")
|
| 160 |
|
| 161 |
-
|
| 162 |
-
if not
|
| 163 |
-
return JSONResponse({"error": "No
|
| 164 |
|
| 165 |
-
|
| 166 |
-
return JSONResponse({"
|
| 167 |
|
| 168 |
except Exception as e:
|
| 169 |
-
logger.error(f"[API
|
| 170 |
return JSONResponse({"error": str(e)}, status_code=500)
|
| 171 |
|
| 172 |
-
@app.
|
| 173 |
-
async def
|
| 174 |
-
"""
|
| 175 |
try:
|
| 176 |
-
|
| 177 |
-
logger.info(f"[API AI] Received: {body}")
|
| 178 |
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
return JSONResponse({"error": "No text provided"}, status_code=400)
|
| 182 |
|
| 183 |
-
answer = generate_answer(
|
| 184 |
return JSONResponse({"answer": answer})
|
| 185 |
|
| 186 |
except Exception as e:
|
| 187 |
-
logger.error(f"[API AI] Error: {str(e)}")
|
| 188 |
return JSONResponse({"error": str(e)}, status_code=500)
|
| 189 |
|
| 190 |
@app.get("/health")
|
| 191 |
async def health():
|
| 192 |
-
"""Health check"""
|
| 193 |
return {"status": "ok", "model": "SmolLM2-360M"}
|
| 194 |
|
| 195 |
-
# Gradio UI
|
| 196 |
-
with gr.Blocks(title="Fast Q&A"
|
| 197 |
gr.Markdown("""
|
| 198 |
-
# ⚡ Ultra-Fast Q&A
|
| 199 |
-
**SmolLM2-360M** + **Direct REST API** for Pluely
|
| 200 |
|
| 201 |
-
## Pluely Configuration
|
| 202 |
|
| 203 |
-
###
|
| 204 |
```
|
| 205 |
-
curl
|
| 206 |
```
|
| 207 |
-
**Response Path:** `
|
| 208 |
|
| 209 |
-
###
|
| 210 |
```
|
| 211 |
-
curl -X POST https://archcoder-basic-app.hf.space/api/ai -H "Content-Type: application/json" -
|
|
|
|
|
|
|
| 212 |
```
|
| 213 |
**Response Path:** `answer`
|
| 214 |
""")
|
| 215 |
|
| 216 |
with gr.Tab("Test"):
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
test_btn = gr.Button("🚀 Test")
|
| 220 |
test_output = gr.Textbox(label="Answer", lines=8)
|
| 221 |
-
|
| 222 |
test_btn.click(fn=generate_answer, inputs=[test_input], outputs=[test_output])
|
| 223 |
|
| 224 |
-
# Mount Gradio to FastAPI
|
| 225 |
app = gr.mount_gradio_app(app, demo, path="/")
|
| 226 |
|
| 227 |
if __name__ == "__main__":
|
|
|
|
| 9 |
import logging
|
| 10 |
import time
|
| 11 |
from datetime import datetime
|
|
|
|
| 12 |
from html.parser import HTMLParser
|
| 13 |
+
from fastapi import FastAPI, Request, Query
|
| 14 |
from fastapi.responses import JSONResponse
|
| 15 |
import uvicorn
|
| 16 |
|
|
|
|
| 17 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
|
| 18 |
logger = logging.getLogger(__name__)
|
| 19 |
|
| 20 |
+
# Models
|
| 21 |
+
logger.info("Loading models...")
|
| 22 |
whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
|
|
|
|
|
|
|
| 23 |
model_name = "HuggingFaceTB/SmolLM2-360M-Instruct"
|
| 24 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 25 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 28 |
device_map="cpu",
|
| 29 |
low_cpu_mem_usage=True
|
| 30 |
)
|
| 31 |
+
logger.info("Models loaded!")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
def search_parallel(query):
|
| 34 |
+
"""DuckDuckGo search"""
|
| 35 |
logger.info("[SEARCH] Starting...")
|
| 36 |
try:
|
| 37 |
response = requests.get(
|
|
|
|
| 73 |
return "No search results", "None"
|
| 74 |
|
| 75 |
def generate_answer(text_input):
|
| 76 |
+
"""Generate answer"""
|
| 77 |
logger.info(f"[AI] Question: {text_input[:60]}...")
|
| 78 |
|
| 79 |
try:
|
|
|
|
| 82 |
|
| 83 |
current_date = datetime.now().strftime("%B %d, %Y")
|
| 84 |
|
|
|
|
| 85 |
search_start = time.time()
|
| 86 |
search_results, search_engine = search_parallel(text_input)
|
| 87 |
logger.info(f"[AI] Search: {time.time()-search_start:.2f}s")
|
| 88 |
|
|
|
|
| 89 |
messages = [
|
| 90 |
{"role": "system", "content": f"Today is {current_date}. Answer briefly using search results (60-80 words)."},
|
| 91 |
{"role": "user", "content": f"Search:\n{search_results}\n\nQ: {text_input}\nA:"}
|
|
|
|
| 117 |
logger.error(f"[AI] Error: {str(e)}")
|
| 118 |
return f"Error: {str(e)}"
|
| 119 |
|
| 120 |
+
# FastAPI app
|
| 121 |
+
app = FastAPI()
|
| 122 |
+
|
| 123 |
+
@app.post("/api/ai")
|
| 124 |
+
async def api_ai_post(request: Request):
|
| 125 |
+
"""AI endpoint - POST with JSON body"""
|
| 126 |
try:
|
| 127 |
+
body = await request.body()
|
| 128 |
+
logger.info(f"[API AI POST] Raw body: {body}")
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
+
if not body:
|
| 131 |
+
return JSONResponse({"error": "Empty request body"}, status_code=400)
|
|
|
|
| 132 |
|
| 133 |
+
try:
|
| 134 |
+
data = await request.json()
|
| 135 |
+
except Exception as e:
|
| 136 |
+
logger.error(f"[API AI POST] JSON parse error: {str(e)}")
|
| 137 |
+
return JSONResponse({"error": f"Invalid JSON: {str(e)}"}, status_code=400)
|
| 138 |
|
| 139 |
+
logger.info(f"[API AI POST] Parsed data: {data}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
+
question = data.get("text", "")
|
| 142 |
+
if not question:
|
| 143 |
+
return JSONResponse({"error": "No 'text' field in JSON"}, status_code=400)
|
| 144 |
|
| 145 |
+
answer = generate_answer(question)
|
| 146 |
+
return JSONResponse({"answer": answer})
|
| 147 |
|
| 148 |
except Exception as e:
|
| 149 |
+
logger.error(f"[API AI POST] Error: {str(e)}")
|
| 150 |
return JSONResponse({"error": str(e)}, status_code=500)
|
| 151 |
|
| 152 |
+
@app.get("/api/ai")
|
| 153 |
+
async def api_ai_get(text: str = Query(..., description="Question text")):
|
| 154 |
+
"""AI endpoint - GET with query param (Pluely fallback)"""
|
| 155 |
try:
|
| 156 |
+
logger.info(f"[API AI GET] Question: {text}")
|
|
|
|
| 157 |
|
| 158 |
+
if not text:
|
| 159 |
+
return JSONResponse({"error": "No text parameter"}, status_code=400)
|
|
|
|
| 160 |
|
| 161 |
+
answer = generate_answer(text)
|
| 162 |
return JSONResponse({"answer": answer})
|
| 163 |
|
| 164 |
except Exception as e:
|
| 165 |
+
logger.error(f"[API AI GET] Error: {str(e)}")
|
| 166 |
return JSONResponse({"error": str(e)}, status_code=500)
|
| 167 |
|
| 168 |
@app.get("/health")
|
| 169 |
async def health():
|
|
|
|
| 170 |
return {"status": "ok", "model": "SmolLM2-360M"}
|
| 171 |
|
| 172 |
+
# Gradio UI
|
| 173 |
+
with gr.Blocks(title="Fast Q&A") as demo:
|
| 174 |
gr.Markdown("""
|
| 175 |
+
# ⚡ Ultra-Fast Q&A - SmolLM2-360M
|
|
|
|
| 176 |
|
| 177 |
+
## 🎯 Pluely Configuration
|
| 178 |
|
| 179 |
+
### Option 1: GET with Query Param (EASIEST - Windows Compatible)
|
| 180 |
```
|
| 181 |
+
curl https://archcoder-basic-app.hf.space/api/ai?text={{TEXT}}
|
| 182 |
```
|
| 183 |
+
**Response Path:** `answer`
|
| 184 |
|
| 185 |
+
### Option 2: POST with JSON (If Option 1 doesn't work)
|
| 186 |
```
|
| 187 |
+
curl -X POST https://archcoder-basic-app.hf.space/api/ai -H "Content-Type: application/json" --data-binary @- << EOF
|
| 188 |
+
{"text":"{{TEXT}}"}
|
| 189 |
+
EOF
|
| 190 |
```
|
| 191 |
**Response Path:** `answer`
|
| 192 |
""")
|
| 193 |
|
| 194 |
with gr.Tab("Test"):
|
| 195 |
+
test_input = gr.Textbox(label="Question")
|
| 196 |
+
test_btn = gr.Button("🚀 Test")
|
|
|
|
| 197 |
test_output = gr.Textbox(label="Answer", lines=8)
|
|
|
|
| 198 |
test_btn.click(fn=generate_answer, inputs=[test_input], outputs=[test_output])
|
| 199 |
|
|
|
|
| 200 |
app = gr.mount_gradio_app(app, demo, path="/")
|
| 201 |
|
| 202 |
if __name__ == "__main__":
|