Update app.py
Browse files
app.py
CHANGED
|
@@ -1,89 +1,134 @@
|
|
| 1 |
import os
|
| 2 |
from flask import Flask, request, abort
|
| 3 |
import hashlib
|
|
|
|
| 4 |
import xmltodict
|
| 5 |
-
import
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
|
| 10 |
app = Flask(__name__)
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
# TOKEN = 'your_wechat_token'
|
| 14 |
-
# APPID = 'your_wechat_appid'
|
| 15 |
-
# APPSECRET = 'your_wechat_appsecret'
|
| 16 |
-
# OPENAI_API_KEY = 'your_openai_api_key'
|
| 17 |
TOKEN = os.getenv("TOKEN")
|
| 18 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 19 |
APPID = os.getenv("APPID")
|
| 20 |
APPSECRET = os.getenv("APPSECRET")
|
| 21 |
-
openai.api_key = OPENAI_API_KEY
|
| 22 |
|
| 23 |
-
|
|
|
|
|
|
|
| 24 |
user_models = {}
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
def split_message(message, max_length=500):
|
| 27 |
"""Split a message into chunks of max_length characters."""
|
| 28 |
return [message[i:i+max_length] for i in range(0, len(message), max_length)]
|
| 29 |
|
| 30 |
-
def
|
| 31 |
-
"""Get response from GPT model."""
|
| 32 |
try:
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
model=model,
|
| 35 |
-
messages=
|
|
|
|
| 36 |
)
|
| 37 |
-
return
|
| 38 |
except Exception as e:
|
| 39 |
return f"Error: {str(e)}"
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
@app.route('/', methods=['GET', 'POST'])
|
| 42 |
def wechat():
|
| 43 |
if request.method == 'GET':
|
| 44 |
-
token = TOKEN
|
| 45 |
-
signature = request.args.get('signature', '')
|
| 46 |
-
timestamp = request.args.get('timestamp', '')
|
| 47 |
-
nonce = request.args.get('nonce', '')
|
| 48 |
echostr = request.args.get('echostr', '')
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
| 52 |
abort(403)
|
| 53 |
-
|
| 54 |
-
else:
|
| 55 |
xml_data = request.data
|
| 56 |
-
msg =
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
if content.startswith('/model'):
|
| 62 |
-
# 切换模型
|
| 63 |
-
model = content.split(' ')[1]
|
| 64 |
-
user_models[user_id] = model
|
| 65 |
-
return f'Model switched to {model}'
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
<FromUserName><![CDATA[{msg.target}]]></FromUserName>
|
| 80 |
-
<CreateTime>{int(time.time())}</CreateTime>
|
| 81 |
-
<MsgType><![CDATA[text]]></MsgType>
|
| 82 |
-
<Content><![CDATA[{part}]]></Content>
|
| 83 |
-
</xml>
|
| 84 |
-
""")
|
| 85 |
-
|
| 86 |
-
return ''.join(reply)
|
| 87 |
|
| 88 |
if __name__ == '__main__':
|
| 89 |
app.run(host='0.0.0.0', port=7860)
|
|
|
|
| 1 |
import os
|
| 2 |
from flask import Flask, request, abort
|
| 3 |
import hashlib
|
| 4 |
+
import time
|
| 5 |
import xmltodict
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
import re
|
| 8 |
+
import base64
|
| 9 |
+
import requests
|
| 10 |
|
| 11 |
app = Flask(__name__)
|
| 12 |
|
| 13 |
+
# 环境变量配置
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
TOKEN = os.getenv("TOKEN")
|
| 15 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 16 |
APPID = os.getenv("APPID")
|
| 17 |
APPSECRET = os.getenv("APPSECRET")
|
|
|
|
| 18 |
|
| 19 |
+
client = OpenAI(api_key=OPENAI_API_KEY)
|
| 20 |
+
|
| 21 |
+
# 用户模型存储
|
| 22 |
user_models = {}
|
| 23 |
|
| 24 |
+
def check_signature():
|
| 25 |
+
signature = request.args.get('signature', '')
|
| 26 |
+
timestamp = request.args.get('timestamp', '')
|
| 27 |
+
nonce = request.args.get('nonce', '')
|
| 28 |
+
token = TOKEN
|
| 29 |
+
tmp_list = sorted([token, timestamp, nonce])
|
| 30 |
+
tmp_str = ''.join(tmp_list)
|
| 31 |
+
hash_obj = hashlib.sha1(tmp_str.encode('utf-8'))
|
| 32 |
+
return hash_obj.hexdigest() == signature
|
| 33 |
+
|
| 34 |
def split_message(message, max_length=500):
|
| 35 |
"""Split a message into chunks of max_length characters."""
|
| 36 |
return [message[i:i+max_length] for i in range(0, len(message), max_length)]
|
| 37 |
|
| 38 |
+
def get_openai_response(message, model="gpt-4o", image_url=None):
|
|
|
|
| 39 |
try:
|
| 40 |
+
messages = [
|
| 41 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 42 |
+
{"role": "user", "content": message}
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
if image_url:
|
| 46 |
+
messages[1]["content"] = [
|
| 47 |
+
{"type": "text", "text": message},
|
| 48 |
+
{"type": "image_url", "image_url": {"url": image_url}}
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
completion = client.chat.completions.create(
|
| 52 |
model=model,
|
| 53 |
+
messages=messages,
|
| 54 |
+
max_tokens=300
|
| 55 |
)
|
| 56 |
+
return completion.choices[0].message.content
|
| 57 |
except Exception as e:
|
| 58 |
return f"Error: {str(e)}"
|
| 59 |
|
| 60 |
+
def handle_text_message(from_user, to_user, content):
|
| 61 |
+
if content.startswith('/'):
|
| 62 |
+
# 处理命令
|
| 63 |
+
parts = content.split()
|
| 64 |
+
command = parts[0][1:]
|
| 65 |
+
if command == 'help':
|
| 66 |
+
return "Available commands: /help, /setmodel [model_name]"
|
| 67 |
+
elif command == 'setmodel':
|
| 68 |
+
if len(parts) > 1:
|
| 69 |
+
model = parts[1]
|
| 70 |
+
user_models[from_user] = model
|
| 71 |
+
return f"Model switched to {model}"
|
| 72 |
+
else:
|
| 73 |
+
return "Please specify a model name. Usage: /setmodel [model_name]"
|
| 74 |
+
else:
|
| 75 |
+
return "Unknown command. Type /help for available commands."
|
| 76 |
+
|
| 77 |
+
# 正常对话,调用OpenAI API
|
| 78 |
+
model = user_models.get(from_user, "gpt-4o")
|
| 79 |
+
response = get_openai_response(content, model)
|
| 80 |
+
return format_reply(from_user, to_user, response)
|
| 81 |
+
|
| 82 |
+
def handle_image_message(from_user, to_user, pic_url):
|
| 83 |
+
model = user_models.get(from_user, "gpt-4o")
|
| 84 |
+
response = get_openai_response("What's in this image?", model, pic_url)
|
| 85 |
+
return format_reply(from_user, to_user, response)
|
| 86 |
+
|
| 87 |
+
def format_reply(from_user, to_user, content):
|
| 88 |
+
response_parts = split_message(content)
|
| 89 |
+
replies = []
|
| 90 |
+
for part in response_parts:
|
| 91 |
+
reply = f"""
|
| 92 |
+
<xml>
|
| 93 |
+
<ToUserName><![CDATA[{from_user}]]></ToUserName>
|
| 94 |
+
<FromUserName><![CDATA[{to_user}]]></FromUserName>
|
| 95 |
+
<CreateTime>{int(time.time())}</CreateTime>
|
| 96 |
+
<MsgType><![CDATA[text]]></MsgType>
|
| 97 |
+
<Content><![CDATA[{part}]]></Content>
|
| 98 |
+
</xml>
|
| 99 |
+
"""
|
| 100 |
+
replies.append(reply)
|
| 101 |
+
return ''.join(replies)
|
| 102 |
+
|
| 103 |
@app.route('/', methods=['GET', 'POST'])
|
| 104 |
def wechat():
|
| 105 |
if request.method == 'GET':
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
echostr = request.args.get('echostr', '')
|
| 107 |
+
if check_signature():
|
| 108 |
+
return echostr
|
| 109 |
+
abort(403)
|
| 110 |
+
elif request.method == 'POST':
|
| 111 |
+
if not check_signature():
|
| 112 |
abort(403)
|
| 113 |
+
|
|
|
|
| 114 |
xml_data = request.data
|
| 115 |
+
msg = xmltodict.parse(xml_data)['xml']
|
| 116 |
+
msg_type = msg['MsgType']
|
| 117 |
+
from_user = msg['FromUserName']
|
| 118 |
+
to_user = msg['ToUserName']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
|
| 120 |
+
if msg_type == 'text':
|
| 121 |
+
content = msg['Content']
|
| 122 |
+
return handle_text_message(from_user, to_user, content)
|
| 123 |
+
elif msg_type == 'image':
|
| 124 |
+
pic_url = msg['PicUrl']
|
| 125 |
+
return handle_image_message(from_user, to_user, pic_url)
|
| 126 |
+
elif msg_type == 'event':
|
| 127 |
+
event = msg['Event']
|
| 128 |
+
if event == 'subscribe':
|
| 129 |
+
return format_reply(from_user, to_user, "感谢关注!输入 /help 查看可用命令。")
|
| 130 |
+
|
| 131 |
+
return 'success'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
if __name__ == '__main__':
|
| 134 |
app.run(host='0.0.0.0', port=7860)
|