import os from flask import Flask, request, jsonify, render_template from huggingface_hub import InferenceClient import requests app = Flask(__name__) # 1. Setup Clients hf_client = InferenceClient(model="meta-llama/Llama-3.2-3B-Instruct", token=os.getenv("HF_TOKEN")) TAVILY_API_KEY = os.getenv("TAVILY_API_KEY") def search_tavily(query): """Someone else's free API doing the hard work for you.""" url = "https://api.tavily.com/search" payload = { "api_key": TAVILY_API_KEY, "query": query, "search_depth": "basic", "max_results": 3 } response = requests.post(url, json=payload) results = response.json().get("results", []) # Join snippets into one string return "\n".join([f"Source: {r['url']}\nContent: {r['content']}" for r in results]) @app.route('/') def index(): return render_template('index.html') @app.route('/ask', methods=['POST']) def ask(): user_query = request.get_json().get("query") # STEP 1: Get real-time data from Tavily API web_data = search_tavily(user_query) # STEP 2: Use Llama 3.2 to synthesize the answer system_prompt = f"""You are a 2026 AI Researcher. Use the following LIVE WEB DATA to answer. If the data is about the PM of India or Super Bowl, use it! DATA: {web_data}""" response = hf_client.chat_completion( messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_query} ], max_tokens=500 ) return jsonify({"answer": response.choices[0].message.content}) if __name__ == "__main__": app.run(host="0.0.0.0", port=7860)