import os from flask import Flask, render_template, request, jsonify from huggingface_hub import InferenceClient # Import InferenceClient correctly from dotenv import load_dotenv import json # Load environment variables from .env file load_dotenv() # Set up API keys HUGGINGFACE_API_KEY = os.getenv('HUGGINGFACE_API_KEY') # Set up Flask app app = Flask(__name__) # Initialize the Hugging Face API Client client = InferenceClient(HUGGINGFACE_API_KEY) def respond(message, history, system_message, max_tokens, temperature, top_p): messages = [{"role": "system", "content": system_message}] # Ensure history is a list of tuples history = json.loads(history) if isinstance(history, str) else history # Include message history (FAQs) for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) # Get response from Hugging Face API (for FAQ) response = "" try: # Use the correct method depending on the Hugging Face model you're using result = client.completion( model="gpt-3.5-turbo", # Example, adjust to your model inputs=messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p, ) response = result['choices'][0]['text'] except Exception as e: response = str(e) return response # Endpoint for chat responses @app.route('/chat', methods=['POST']) def chat(): user_message = request.form['message'] history = request.form['history'] system_message = "You are a helpful FAQ chatbot for customer support." max_tokens = 512 temperature = 0.7 top_p = 0.9 # Get response from Hugging Face model bot_reply = respond(user_message, history, system_message, max_tokens, temperature, top_p) return jsonify({'response': bot_reply}) # Home page for the chatbot UI @app.route('/') def index(): return render_template('index.html') if __name__ == '__main__': app.run(debug=True)