olive100 commited on
Commit
24e799c
·
1 Parent(s): ea63cad

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import base64
4
+ import requests
5
+ import cv2
6
+
7
+ def hand_classification(img):
8
+ # 可选的请求参数
9
+ # top_num: 返回的分类数量,不声明的话默认为 6 个
10
+ PARAMS = {"top_num": 2}
11
+
12
+ # 服务详情 中的 接口地址
13
+ MODEL_API_URL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/classification/handclass"
14
+
15
+ # 调用 API 需要 ACCESS_TOKEN。若已有 ACCESS_TOKEN 则于下方填入该字符串
16
+ # 否则,留空 ACCESS_TOKEN,于下方填入 该模型部署的 API_KEY 以及 SECRET_KEY,会自动申请并显示新 ACCESS_TOKEN
17
+ ACCESS_TOKEN = ""
18
+ API_KEY = "TPhSFQU5i8tYivTgsLWBRLi9"
19
+ SECRET_KEY = "eZbQHnOqTGYBVDXGTqzAy5kvU03t32Qz"
20
+
21
+
22
+ print("1. 读取目标图片 ")
23
+ success,encoded_image = cv2.imencode(".jpg",img) #注意这句编码,否则图像质量不合格
24
+ img_test = encoded_image.tobytes()
25
+ img_test = base64.b64encode(img_test)
26
+ PARAMS["image"] = img_test.decode('UTF8')
27
+
28
+
29
+ if not ACCESS_TOKEN:
30
+ print("2. ACCESS_TOKEN 为空,调用鉴权接口获取TOKEN")
31
+ auth_url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={}&client_secret={}".format(API_KEY, SECRET_KEY)
32
+ auth_resp = requests.get(auth_url)
33
+ auth_resp_json = auth_resp.json()
34
+ ACCESS_TOKEN = auth_resp_json["access_token"]
35
+ print("新 ACCESS_TOKEN: {}".format(ACCESS_TOKEN))
36
+ else:
37
+ print("2. 使用已有 ACCESS_TOKEN")
38
+
39
+
40
+ print("3. 向模型接口 'MODEL_API_URL' 发送请求")
41
+ request_url = "{}?access_token={}".format(MODEL_API_URL, ACCESS_TOKEN)
42
+ response = requests.post(url=request_url, json=PARAMS)
43
+ response_json = response.json()
44
+ response_str = json.dumps(response_json, indent=4, ensure_ascii=False)
45
+ print("结果:\n{}".format(response_str))
46
+ result = response_json["results"]
47
+ res = {result[0]["name"]:result[0]["score"],result[1]["name"]:result[1]["score"]}
48
+ return res
49
+
50
+ demo = gr.Interface(fn=hand_classification, inputs="image", outputs="label")
51
+ gr.close_all()
52
+ demo.launch()