Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,14 +7,15 @@ import sqlite3
|
|
| 7 |
import gc
|
| 8 |
import time
|
| 9 |
import re
|
| 10 |
-
import traceback # <--
|
|
|
|
| 11 |
|
| 12 |
# --- 외부 모듈 임포트 ---
|
| 13 |
import reg_embedding_system
|
| 14 |
import leximind_prompts
|
| 15 |
|
| 16 |
-
# --- Together AI SDK ---
|
| 17 |
-
from together import Together
|
| 18 |
|
| 19 |
# --- eventlet monkey patch (Gunicorn + SocketIO 필수!) ---
|
| 20 |
import eventlet
|
|
@@ -48,11 +49,11 @@ lexi_prompts = leximind_prompts.PromptLibrary()
|
|
| 48 |
# --- RAG 객체 ---
|
| 49 |
region_rag_objects = {}
|
| 50 |
|
| 51 |
-
# --- Together AI
|
| 52 |
TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY")
|
| 53 |
if not TOGETHER_API_KEY:
|
| 54 |
raise EnvironmentError("TOGETHER_API_KEY가 설정되지 않았습니다. Hugging Face Secrets에 추가하세요.")
|
| 55 |
-
client = Together(api_key=TOGETHER_API_KEY)
|
| 56 |
|
| 57 |
# --- RAG 로딩 ---
|
| 58 |
def load_rag_objects():
|
|
@@ -168,32 +169,60 @@ def cleanup_connections():
|
|
| 168 |
except:
|
| 169 |
pass
|
| 170 |
|
| 171 |
-
# --- Together AI 분석 ---
|
| 172 |
def Gemma3_AI_analysis(query_txt, content_txt):
|
| 173 |
content_txt = "\n".join(doc.page_content for doc in content_txt) if isinstance(content_txt, list) else str(content_txt)
|
| 174 |
query_txt = str(query_txt)
|
| 175 |
prompt = lexi_prompts.use_prompt(lexi_prompts.AI_system_prompt, query_txt=query_txt, content_txt=content_txt)
|
| 176 |
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
def Gemma3_AI_Translate(query_txt):
|
| 187 |
query_txt = str(query_txt)
|
| 188 |
prompt = lexi_prompts.use_prompt(lexi_prompts.query_translator, query_txt=query_txt)
|
| 189 |
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
|
| 198 |
# --- 검색 ---
|
| 199 |
def search_DB_from_multiple_regions(query, selected_regions, region_rag_objects):
|
|
|
|
| 7 |
import gc
|
| 8 |
import time
|
| 9 |
import re
|
| 10 |
+
import traceback # <-- 에러 상세 출력을 위해 임포트
|
| 11 |
+
import requests # <-- Pydantic v1 환경을 위해 Together SDK 대신 requests 사용
|
| 12 |
|
| 13 |
# --- 외부 모듈 임포트 ---
|
| 14 |
import reg_embedding_system
|
| 15 |
import leximind_prompts
|
| 16 |
|
| 17 |
+
# --- Together AI SDK (제거됨) ---
|
| 18 |
+
# from together import Together
|
| 19 |
|
| 20 |
# --- eventlet monkey patch (Gunicorn + SocketIO 필수!) ---
|
| 21 |
import eventlet
|
|
|
|
| 49 |
# --- RAG 객체 ---
|
| 50 |
region_rag_objects = {}
|
| 51 |
|
| 52 |
+
# --- Together AI 설정 (SDK 대신 API 호출에 사용) ---
|
| 53 |
TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY")
|
| 54 |
if not TOGETHER_API_KEY:
|
| 55 |
raise EnvironmentError("TOGETHER_API_KEY가 설정되지 않았습니다. Hugging Face Secrets에 추가하세요.")
|
| 56 |
+
# client = Together(api_key=TOGETHER_API_KEY) # <--- Together SDK 클라이언트 제거
|
| 57 |
|
| 58 |
# --- RAG 로딩 ---
|
| 59 |
def load_rag_objects():
|
|
|
|
| 169 |
except:
|
| 170 |
pass
|
| 171 |
|
| 172 |
+
# --- Together AI 분석 (SDK -> requests 직접 호출로 변경) ---
|
| 173 |
def Gemma3_AI_analysis(query_txt, content_txt):
|
| 174 |
content_txt = "\n".join(doc.page_content for doc in content_txt) if isinstance(content_txt, list) else str(content_txt)
|
| 175 |
query_txt = str(query_txt)
|
| 176 |
prompt = lexi_prompts.use_prompt(lexi_prompts.AI_system_prompt, query_txt=query_txt, content_txt=content_txt)
|
| 177 |
|
| 178 |
+
headers = {
|
| 179 |
+
"Authorization": f"Bearer {TOGETHER_API_KEY}",
|
| 180 |
+
"Content-Type": "application/json"
|
| 181 |
+
}
|
| 182 |
+
payload = {
|
| 183 |
+
"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
| 184 |
+
"messages": [{"role": "user", "content": prompt}],
|
| 185 |
+
"max_tokens": 1024,
|
| 186 |
+
"temperature": 0.7
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
try:
|
| 190 |
+
response = requests.post("https://api.together.xyz/v1/chat/completions", headers=headers, json=payload, timeout=120)
|
| 191 |
+
response.raise_for_status() # HTTP 오류가 발생하면 예외 발생
|
| 192 |
+
|
| 193 |
+
data = response.json()
|
| 194 |
+
return data["choices"][0]["message"]["content"]
|
| 195 |
+
except requests.exceptions.RequestException as e:
|
| 196 |
+
print(f"Together AI 분석 API 호출 실패: {e}")
|
| 197 |
+
traceback.print_exc()
|
| 198 |
+
return f"AI 분석 중 오류가 발생했습니다: {e}"
|
| 199 |
+
|
| 200 |
+
# --- Together AI 번역 (SDK -> requests 직접 호출로 변경) ---
|
| 201 |
def Gemma3_AI_Translate(query_txt):
|
| 202 |
query_txt = str(query_txt)
|
| 203 |
prompt = lexi_prompts.use_prompt(lexi_prompts.query_translator, query_txt=query_txt)
|
| 204 |
|
| 205 |
+
headers = {
|
| 206 |
+
"Authorization": f"Bearer {TOGETHER_API_KEY}",
|
| 207 |
+
"Content-Type": "application/json"
|
| 208 |
+
}
|
| 209 |
+
payload = {
|
| 210 |
+
"model": "meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
| 211 |
+
"messages": [{"role": "user", "content": prompt}],
|
| 212 |
+
"max_tokens": 512,
|
| 213 |
+
"temperature": 0.3
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
try:
|
| 217 |
+
response = requests.post("https://api.together.xyz/v1/chat/completions", headers=headers, json=payload, timeout=60)
|
| 218 |
+
response.raise_for_status() # HTTP 오류가 발생하면 예외 발생
|
| 219 |
+
|
| 220 |
+
data = response.json()
|
| 221 |
+
return data["choices"][0]["message"]["content"]
|
| 222 |
+
except requests.exceptions.RequestException as e:
|
| 223 |
+
print(f"Together AI 번역 API 호출 실패: {e}")
|
| 224 |
+
traceback.print_exc()
|
| 225 |
+
return query_txt # 번역 실패 시 원래 쿼리를 사용 (최소한의 기능 유지)
|
| 226 |
|
| 227 |
# --- 검색 ---
|
| 228 |
def search_DB_from_multiple_regions(query, selected_regions, region_rag_objects):
|