from flask import Flask, request, jsonify import openai import os openai.api_key = os.environ.get("OPENAI_API_KEY") def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state): history = state['messages'] if not prompt: return { 'chat_messages': [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], 'total_tokens_used_msg': f"Total tokens used: {state['total_tokens']} / 3000" } prompt_template = prompt_templates[prompt_template] system_prompt = [] if prompt_template: system_prompt = [{ "role": "system", "content": prompt_template }] prompt_msg = { "role": "user", "content": prompt } try: response = openai.ChatCompletion.create( model="gpt-3.5-turbo", prompt=system_prompt + history + [prompt_msg], temperature=temperature, max_tokens=max_tokens ) history.append(prompt_msg) history.append({ "role": "system", "content": response.choices[0].text }) state['total_tokens'] += response['total_characters'] except Exception as e: history.append(prompt_msg) history.append({ "role": "system", "content": f"Error: {e}" }) total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} / 3000" if not user_token else "" chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)] return { 'chat_messages': chat_messages, 'total_tokens_used_msg': total_tokens_used_msg } app = Flask(__name__) state = { 'messages': [], 'total_tokens': 0 } @app.route('/chat', methods=['POST']) def chat(): data = request.json prompt = data.get('prompt') prompt_template = data.get('prompt_template') temperature = data.get('temperature', 0.5) max_tokens = data.get('max_tokens', 50) _, chat_messages, total_tokens_used_msg, state = submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state) response = { 'chat_messages': chat_messages, 'total_tokens_used_msg': total_tokens_used_msg } return jsonify(response) if __name__ == '__main__': app.run()