Update app.py
Browse files
app.py
CHANGED
|
@@ -23,6 +23,15 @@ from datetime import datetime
|
|
| 23 |
from typing import Generator, List, Dict, Optional
|
| 24 |
from xml.etree import ElementTree as ET
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
# ============== Comic Style CSS ==============
|
| 27 |
COMIC_CSS = """
|
| 28 |
@import url('https://fonts.googleapis.com/css2?family=Bangers&family=Comic+Neue:wght@400;700&display=swap');
|
|
@@ -872,45 +881,46 @@ def convert_hwp_to_markdown(input_path: str) -> tuple:
|
|
| 872 |
return text, None
|
| 873 |
return None, error
|
| 874 |
|
| 875 |
-
# ============== LLM API ==============
|
| 876 |
-
def call_groq_api_stream(messages: List[Dict]
|
| 877 |
-
|
| 878 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 879 |
return
|
|
|
|
| 880 |
try:
|
| 881 |
-
|
| 882 |
-
|
| 883 |
-
|
| 884 |
-
|
| 885 |
-
|
| 886 |
-
|
| 887 |
-
|
| 888 |
-
|
| 889 |
-
|
| 890 |
-
|
| 891 |
-
|
| 892 |
)
|
| 893 |
-
|
| 894 |
-
|
| 895 |
-
|
| 896 |
-
|
| 897 |
-
|
| 898 |
-
line = line.decode('utf-8')
|
| 899 |
-
if line.startswith('data: ') and line[6:] != '[DONE]':
|
| 900 |
-
try:
|
| 901 |
-
data = json.loads(line[6:])
|
| 902 |
-
content = data.get('choices', [{}])[0].get('delta', {}).get('content', '')
|
| 903 |
-
if content:
|
| 904 |
-
yield content
|
| 905 |
-
except:
|
| 906 |
-
continue
|
| 907 |
except Exception as e:
|
| 908 |
-
|
| 909 |
-
|
| 910 |
-
|
| 911 |
-
|
| 912 |
-
|
|
|
|
|
|
|
|
|
|
| 913 |
return
|
|
|
|
| 914 |
try:
|
| 915 |
formatted_messages = [{"role": m["role"], "content": m["content"]} for m in messages[:-1]]
|
| 916 |
formatted_messages.append({
|
|
@@ -920,21 +930,24 @@ def call_fireworks_api_stream(messages: List[Dict], image_base64: str, mime_type
|
|
| 920 |
{"type": "text", "text": messages[-1]["content"]}
|
| 921 |
]
|
| 922 |
})
|
|
|
|
| 923 |
response = requests.post(
|
| 924 |
"https://api.fireworks.ai/inference/v1/chat/completions",
|
| 925 |
-
headers={"Authorization": f"Bearer {
|
| 926 |
json={
|
| 927 |
"model": "accounts/fireworks/models/qwen3-vl-235b-a22b-thinking",
|
| 928 |
-
"max_tokens":
|
| 929 |
"temperature": 0.6,
|
| 930 |
"messages": formatted_messages,
|
| 931 |
"stream": True
|
| 932 |
},
|
| 933 |
stream=True
|
| 934 |
)
|
|
|
|
| 935 |
if response.status_code != 200:
|
| 936 |
yield f"โ Fireworks API ์ค๋ฅ: {response.status_code}"
|
| 937 |
return
|
|
|
|
| 938 |
for line in response.iter_lines():
|
| 939 |
if line:
|
| 940 |
line = line.decode('utf-8')
|
|
@@ -1093,6 +1106,7 @@ def chat_response(message: str, history: List[Dict], file: Optional[str],
|
|
| 1093 |
|
| 1094 |
# ๋๋ฒ๊ทธ ๋ก๊ทธ
|
| 1095 |
print(f"\n๐ค [API ์์ฒญ]")
|
|
|
|
| 1096 |
print(f" - ๋ฉ์์ง ์: {len(api_messages)}")
|
| 1097 |
print(f" - ํ์ผ ํ์
: {file_type}")
|
| 1098 |
print(f" - ๋ฌธ์ ๊ธธ์ด: {len(file_content) if file_content else 0} ๊ธ์")
|
|
@@ -1102,12 +1116,12 @@ def chat_response(message: str, history: List[Dict], file: Optional[str],
|
|
| 1102 |
# ์๋ต ์์ฑ
|
| 1103 |
full_response = ""
|
| 1104 |
if file_type == "image":
|
| 1105 |
-
for chunk in call_fireworks_api_stream(api_messages, file_content, file_mime
|
| 1106 |
full_response += chunk
|
| 1107 |
history[-1] = {"role": "assistant", "content": full_response}
|
| 1108 |
yield history, session_id
|
| 1109 |
else:
|
| 1110 |
-
for chunk in call_groq_api_stream(api_messages
|
| 1111 |
full_response += chunk
|
| 1112 |
history[-1] = {"role": "assistant", "content": full_response}
|
| 1113 |
yield history, session_id
|
|
|
|
| 23 |
from typing import Generator, List, Dict, Optional
|
| 24 |
from xml.etree import ElementTree as ET
|
| 25 |
|
| 26 |
+
# Groq ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ํฌํธ
|
| 27 |
+
try:
|
| 28 |
+
from groq import Groq
|
| 29 |
+
GROQ_AVAILABLE = True
|
| 30 |
+
print("โ
Groq library loaded")
|
| 31 |
+
except ImportError:
|
| 32 |
+
GROQ_AVAILABLE = False
|
| 33 |
+
print("โ Groq library not available - pip install groq")
|
| 34 |
+
|
| 35 |
# ============== Comic Style CSS ==============
|
| 36 |
COMIC_CSS = """
|
| 37 |
@import url('https://fonts.googleapis.com/css2?family=Bangers&family=Comic+Neue:wght@400;700&display=swap');
|
|
|
|
| 881 |
return text, None
|
| 882 |
return None, error
|
| 883 |
|
| 884 |
+
# ============== LLM API (Groq ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ฌ์ฉ) ==============
|
| 885 |
+
def call_groq_api_stream(messages: List[Dict]) -> Generator[str, None, None]:
|
| 886 |
+
"""Groq API ์คํธ๋ฆฌ๋ฐ ํธ์ถ - openai/gpt-oss-120b ๋ชจ๋ธ ์ฌ์ฉ"""
|
| 887 |
+
if not GROQ_AVAILABLE:
|
| 888 |
+
yield "โ Groq ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ์ค์น๋์ง ์์์ต๋๋ค. pip install groq"
|
| 889 |
+
return
|
| 890 |
+
|
| 891 |
+
if not GROQ_API_KEY:
|
| 892 |
+
yield "โ GROQ_API_KEY ํ๊ฒฝ๋ณ์๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค."
|
| 893 |
return
|
| 894 |
+
|
| 895 |
try:
|
| 896 |
+
client = Groq(api_key=GROQ_API_KEY)
|
| 897 |
+
|
| 898 |
+
completion = client.chat.completions.create(
|
| 899 |
+
model="openai/gpt-oss-120b",
|
| 900 |
+
messages=messages,
|
| 901 |
+
temperature=1,
|
| 902 |
+
max_completion_tokens=8192,
|
| 903 |
+
top_p=1,
|
| 904 |
+
reasoning_effort="medium",
|
| 905 |
+
stream=True,
|
| 906 |
+
stop=None
|
| 907 |
)
|
| 908 |
+
|
| 909 |
+
for chunk in completion:
|
| 910 |
+
if chunk.choices[0].delta.content:
|
| 911 |
+
yield chunk.choices[0].delta.content
|
| 912 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 913 |
except Exception as e:
|
| 914 |
+
error_msg = str(e)
|
| 915 |
+
print(f"โ Groq API ์ค๋ฅ: {error_msg}")
|
| 916 |
+
yield f"โ API ์ค๋ฅ: {error_msg}"
|
| 917 |
+
|
| 918 |
+
def call_fireworks_api_stream(messages: List[Dict], image_base64: str, mime_type: str) -> Generator[str, None, None]:
|
| 919 |
+
"""Fireworks API ์คํธ๋ฆฌ๋ฐ ํธ์ถ (์ด๋ฏธ์ง ๋ถ์์ฉ)"""
|
| 920 |
+
if not FIREWORKS_API_KEY:
|
| 921 |
+
yield "โ FIREWORKS_API_KEY ํ๊ฒฝ๋ณ์๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค."
|
| 922 |
return
|
| 923 |
+
|
| 924 |
try:
|
| 925 |
formatted_messages = [{"role": m["role"], "content": m["content"]} for m in messages[:-1]]
|
| 926 |
formatted_messages.append({
|
|
|
|
| 930 |
{"type": "text", "text": messages[-1]["content"]}
|
| 931 |
]
|
| 932 |
})
|
| 933 |
+
|
| 934 |
response = requests.post(
|
| 935 |
"https://api.fireworks.ai/inference/v1/chat/completions",
|
| 936 |
+
headers={"Authorization": f"Bearer {FIREWORKS_API_KEY}", "Content-Type": "application/json"},
|
| 937 |
json={
|
| 938 |
"model": "accounts/fireworks/models/qwen3-vl-235b-a22b-thinking",
|
| 939 |
+
"max_tokens": 4096,
|
| 940 |
"temperature": 0.6,
|
| 941 |
"messages": formatted_messages,
|
| 942 |
"stream": True
|
| 943 |
},
|
| 944 |
stream=True
|
| 945 |
)
|
| 946 |
+
|
| 947 |
if response.status_code != 200:
|
| 948 |
yield f"โ Fireworks API ์ค๋ฅ: {response.status_code}"
|
| 949 |
return
|
| 950 |
+
|
| 951 |
for line in response.iter_lines():
|
| 952 |
if line:
|
| 953 |
line = line.decode('utf-8')
|
|
|
|
| 1106 |
|
| 1107 |
# ๋๋ฒ๊ทธ ๋ก๊ทธ
|
| 1108 |
print(f"\n๐ค [API ์์ฒญ]")
|
| 1109 |
+
print(f" - ๋ชจ๋ธ: openai/gpt-oss-120b")
|
| 1110 |
print(f" - ๋ฉ์์ง ์: {len(api_messages)}")
|
| 1111 |
print(f" - ํ์ผ ํ์
: {file_type}")
|
| 1112 |
print(f" - ๋ฌธ์ ๊ธธ์ด: {len(file_content) if file_content else 0} ๊ธ์")
|
|
|
|
| 1116 |
# ์๋ต ์์ฑ
|
| 1117 |
full_response = ""
|
| 1118 |
if file_type == "image":
|
| 1119 |
+
for chunk in call_fireworks_api_stream(api_messages, file_content, file_mime):
|
| 1120 |
full_response += chunk
|
| 1121 |
history[-1] = {"role": "assistant", "content": full_response}
|
| 1122 |
yield history, session_id
|
| 1123 |
else:
|
| 1124 |
+
for chunk in call_groq_api_stream(api_messages):
|
| 1125 |
full_response += chunk
|
| 1126 |
history[-1] = {"role": "assistant", "content": full_response}
|
| 1127 |
yield history, session_id
|