from fastapi import FastAPI, Request import os import requests from pydantic import BaseModel from dotenv import load_dotenv import openai # ✅ Load environment variables (from Hugging Face secrets) load_dotenv() # ✅ Initialize FastAPI app app = FastAPI(title="AI Feedback Engine") # ✅ Read secrets from environment variables OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") PULSE_API_URL = os.getenv("PULSE_API_URL") PULSE_API_KEY = os.getenv("PULSE_API_KEY") # ✅ Configure OpenAI openai.api_key = OPENAI_API_KEY # ✅ Pydantic model for chatbot message class Message(BaseModel): text: str @app.get("/") def home(): return {"message": "🚀 AI Feedback Engine is running!"} @app.post("/auto_feedback") async def auto_feedback(msg: Message): try: user_input = msg.text # Step 1️⃣: Generate AI feedback + recommendation ai_prompt = f""" You are an HR feedback assistant. A user said: "{user_input}" Generate: 1. A short, professional feedback (1–2 sentences) 2. A practical recommendation for improvement. Return as JSON with keys: 'feedback' and 'recommendation'. """ completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": "system", "content": ai_prompt}] ) ai_text = completion.choices[0].message["content"] # Step 2️⃣: Send to Pulse Survey API pulse_response = requests.post( f"{PULSE_API_URL}/pulse-survey-answers/store", headers={"Authorization": f"Bearer {PULSE_API_KEY}"}, json={"question": user_input, "answer": ai_text}, timeout=10 ) # Step 3️⃣: Return structured result to chatbot return { "status": "success", "user_input": user_input, "ai_response": ai_text, "pulse_status": pulse_response.status_code, } except Exception as e: return {"status": "error", "message": str(e)} # ✅ This part ensures it runs locally too (optional) if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=7860)