OppaAI commited on
Commit
dd985a0
·
verified ·
1 Parent(s): 0413f6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -4,8 +4,8 @@ import base64
4
  import requests
5
  import os
6
 
7
- HF_TOKEN = os.environ.get("HF_CV_ROBOT_TOKEN") # Space Secrets 裡設定
8
- MODEL = "Qwen/Qwen2.5-VL-7B-Instruct" # 你要用的 VLM model
9
 
10
  if not HF_TOKEN:
11
  print("ERROR: HF_CV_ROBOT_TOKEN is not set!")
@@ -17,7 +17,7 @@ def process(payload: dict):
17
 
18
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
19
 
20
- # 這裡用 type='file' + Base64,Router API 支援直接解析
21
  data = {
22
  "model": MODEL,
23
  "messages": [
@@ -25,12 +25,13 @@ def process(payload: dict):
25
  "role": "user",
26
  "content": [
27
  {"type": "text", "text": "Describe this image in detail."},
28
- {"type": "file", "file": {"b64": image_b64, "name": "image.jpg"}}
29
  ]
30
  }
31
  ]
32
  }
33
 
 
34
  resp = requests.post(
35
  "https://router.huggingface.co/v1/chat/completions",
36
  headers=headers,
@@ -41,7 +42,7 @@ def process(payload: dict):
41
  if resp.status_code != 200:
42
  return {"error": f"VLM API error: {resp.status_code}, {resp.text}"}
43
 
44
- # 解析回傳文字描述
45
  try:
46
  vlm_text = resp.json()["choices"][0]["message"]["content"][0]["text"]
47
  except (KeyError, IndexError, json.JSONDecodeError) as e:
 
4
  import requests
5
  import os
6
 
7
+ HF_TOKEN = os.environ.get("HF_CV_ROBOT_TOKEN") # HF Tokens
8
+ MODEL = "Qwen/Qwen2.5-VL-7B-Instruct" # VLM model
9
 
10
  if not HF_TOKEN:
11
  print("ERROR: HF_CV_ROBOT_TOKEN is not set!")
 
17
 
18
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
19
 
20
+ # data format
21
  data = {
22
  "model": MODEL,
23
  "messages": [
 
25
  "role": "user",
26
  "content": [
27
  {"type": "text", "text": "Describe this image in detail."},
28
+ {"type": "image_file", "image_file": {"b64": image_b64}}
29
  ]
30
  }
31
  ]
32
  }
33
 
34
+
35
  resp = requests.post(
36
  "https://router.huggingface.co/v1/chat/completions",
37
  headers=headers,
 
42
  if resp.status_code != 200:
43
  return {"error": f"VLM API error: {resp.status_code}, {resp.text}"}
44
 
45
+ # get image description as response
46
  try:
47
  vlm_text = resp.json()["choices"][0]["message"]["content"][0]["text"]
48
  except (KeyError, IndexError, json.JSONDecodeError) as e: