Update app.py
Browse files
app.py
CHANGED
|
@@ -20,7 +20,7 @@ LLM_API = os.environ.get("LLM_API", "").strip()
|
|
| 20 |
LLM_URL = os.environ.get("LLM_URL")
|
| 21 |
USER_ID = "HuggingFace Space"
|
| 22 |
|
| 23 |
-
async def send_chat_message(LLM_URL, LLM_API, category,
|
| 24 |
payload = {
|
| 25 |
"inputs": {},
|
| 26 |
"query": category,
|
|
@@ -29,32 +29,62 @@ async def send_chat_message(LLM_URL, LLM_API, category, file_id):
|
|
| 29 |
"files": [
|
| 30 |
{
|
| 31 |
"type": "image",
|
| 32 |
-
"transfer_method": "
|
| 33 |
-
"
|
| 34 |
}
|
| 35 |
]
|
| 36 |
}
|
| 37 |
|
| 38 |
-
|
| 39 |
-
async with
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
json
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
async def upload_file(LLM_URL, LLM_API, file_path, user_id):
|
| 60 |
if not os.path.exists(file_path):
|
|
@@ -87,17 +117,25 @@ async def upload_file(LLM_URL, LLM_API, file_path, user_id):
|
|
| 87 |
return "Error: Invalid JSON response"
|
| 88 |
|
| 89 |
async def handle_input(file_path, category):
|
| 90 |
-
#
|
| 91 |
-
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
|
| 102 |
# UI 元件 & 資料
|
| 103 |
examples = [
|
|
|
|
| 20 |
LLM_URL = os.environ.get("LLM_URL")
|
| 21 |
USER_ID = "HuggingFace Space"
|
| 22 |
|
| 23 |
+
async def send_chat_message(LLM_URL, LLM_API, category, file_url):
|
| 24 |
payload = {
|
| 25 |
"inputs": {},
|
| 26 |
"query": category,
|
|
|
|
| 29 |
"files": [
|
| 30 |
{
|
| 31 |
"type": "image",
|
| 32 |
+
"transfer_method": "remote_url",
|
| 33 |
+
"url": file_url
|
| 34 |
}
|
| 35 |
]
|
| 36 |
}
|
| 37 |
|
| 38 |
+
async with aiohttp.ClientSession() as session:
|
| 39 |
+
async with session.post(
|
| 40 |
+
f"{LLM_URL}/chat-messages",
|
| 41 |
+
headers={
|
| 42 |
+
"Authorization": f"Bearer {LLM_API}",
|
| 43 |
+
"Content-Type": "application/json"
|
| 44 |
+
},
|
| 45 |
+
json=payload
|
| 46 |
+
) as response:
|
| 47 |
+
if response.status != 200:
|
| 48 |
+
error_text = await response.text()
|
| 49 |
+
return f"Error: Server returned status {response.status} - {error_text}"
|
| 50 |
+
data = await response.json()
|
| 51 |
+
return data.get("answer") or data.get("thought") or "No answer returned"
|
| 52 |
+
|
| 53 |
+
# async def send_chat_message(LLM_URL, LLM_API, category, file_id):
|
| 54 |
+
# payload = {
|
| 55 |
+
# "inputs": {},
|
| 56 |
+
# "query": category,
|
| 57 |
+
# "conversation_id": "",
|
| 58 |
+
# "user": USER_ID,
|
| 59 |
+
# "files": [
|
| 60 |
+
# {
|
| 61 |
+
# "type": "image",
|
| 62 |
+
# "transfer_method": "remote_url",
|
| 63 |
+
# "upload_file_id": file_id
|
| 64 |
+
# }
|
| 65 |
+
# ]
|
| 66 |
+
# }
|
| 67 |
+
|
| 68 |
+
# try:
|
| 69 |
+
# async with aiohttp.ClientSession() as session:
|
| 70 |
+
# async with session.post(
|
| 71 |
+
# f"{LLM_URL}/chat-messages",
|
| 72 |
+
# headers={"Authorization": f"Bearer {LLM_API}"},
|
| 73 |
+
# json=payload
|
| 74 |
+
# ) as response:
|
| 75 |
+
|
| 76 |
+
# if response.status != 200:
|
| 77 |
+
# error_text = await response.text()
|
| 78 |
+
# return f"Error: Server returned status {response.status} - {error_text}"
|
| 79 |
+
|
| 80 |
+
# try:
|
| 81 |
+
# data = await response.json()
|
| 82 |
+
# return data.get("thought", "Error: No thought in response")
|
| 83 |
+
# except Exception as e:
|
| 84 |
+
# return f"Error: Failed to parse JSON - {e}"
|
| 85 |
+
|
| 86 |
+
# except Exception as e:
|
| 87 |
+
# return f"Error: Unexpected exception - {e}"
|
| 88 |
|
| 89 |
async def upload_file(LLM_URL, LLM_API, file_path, user_id):
|
| 90 |
if not os.path.exists(file_path):
|
|
|
|
| 117 |
return "Error: Invalid JSON response"
|
| 118 |
|
| 119 |
async def handle_input(file_path, category):
|
| 120 |
+
# 取得檔名
|
| 121 |
+
filename = os.path.basename(file_path)
|
| 122 |
+
|
| 123 |
+
# 直接用 HuggingFace repo URL
|
| 124 |
+
file_url = f"https://huggingface.co/spaces/DeepLearning101/Multimodal-Playground/blob/main/DEMO/{filename}?raw=true"
|
| 125 |
+
|
| 126 |
+
return await send_chat_message(LLM_URL, LLM_API, category, file_url)
|
| 127 |
+
# async def handle_input(file_path, category):
|
| 128 |
+
# # 如果 tmp 路徑不存在,改成 repo 內的 DEMO 路徑
|
| 129 |
+
# if not os.path.exists(file_path):
|
| 130 |
+
# file_path = os.path.join("DEMO", os.path.basename(file_path))
|
| 131 |
|
| 132 |
+
# upload_response = await upload_file(LLM_URL, LLM_API, file_path, USER_ID)
|
| 133 |
+
# if isinstance(upload_response, str) and upload_response.startswith("Error"):
|
| 134 |
+
# return upload_response
|
| 135 |
+
# file_id = upload_response.get("id")
|
| 136 |
+
# if not file_id:
|
| 137 |
+
# return "Error: No file ID returned from upload"
|
| 138 |
+
# return await send_chat_message(LLM_URL, LLM_API, category, file_id)
|
| 139 |
|
| 140 |
# UI 元件 & 資料
|
| 141 |
examples = [
|