Spaces:
Runtime error
Runtime error
| import openai | |
| import os | |
| from dotenv import load_dotenv | |
| import uvicorn | |
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from sentence_transformers import SentenceTransformer | |
| import faiss | |
| import numpy as np | |
| import json | |
| from collections import defaultdict | |
| # Initialize FastAPI instance | |
| app = FastAPI() | |
| # --- Load Environment Variables --- | |
| load_dotenv() | |
| api_key = os.getenv("PROXY_API_KEY") | |
| api_base = os.getenv("PROXY_API_URL") | |
| if not api_key or not api_base: | |
| raise RuntimeError("Missing API key or base URL in environment variables.") | |
| openai.api_key = api_key | |
| openai.api_base = api_base | |
| # --- Load Discourse Data --- | |
| try: | |
| with open("data/discourse_posts.json", "r", encoding="utf-8") as f: | |
| posts_data = json.load(f) | |
| except FileNotFoundError: | |
| raise RuntimeError("Could not find 'data/discourse_posts.json'. Ensure the file is in the correct location.") | |
| # Group posts by topic | |
| topics = defaultdict(lambda: {"topic_title": "", "posts": []}) | |
| for post in posts_data: | |
| tid = post["topic_id"] | |
| topics[tid]["posts"].append(post) | |
| if "topic_title" in post: | |
| topics[tid]["topic_title"] = post["topic_title"] | |
| # Sort posts within topics by post_number | |
| for topic in topics.values(): | |
| topic["posts"].sort(key=lambda x: x.get("post_number", 0)) | |
| # --- Embedding Setup --- | |
| def normalize(v): | |
| norm = np.linalg.norm(v) | |
| return v / norm if norm != 0 else v | |
| embedder = SentenceTransformer("all-MiniLM-L6-v2") | |
| embedding_data = [] | |
| embeddings = [] | |
| # Process topics for FAISS | |
| for tid, data in topics.items(): | |
| posts = data["posts"] | |
| title = data["topic_title"] | |
| reply_map = defaultdict(list) | |
| by_number = {} | |
| for p in posts: | |
| pn = p.get("post_number") | |
| if pn is not None: | |
| by_number[pn] = p | |
| parent = p.get("reply_to_post_number") | |
| reply_map[parent].append(p) | |
| def extract(pn): | |
| collected = [] | |
| def dfs(n): | |
| if n not in by_number: | |
| return | |
| p = by_number[n] | |
| collected.append(p) | |
| for child in reply_map.get(n, []): | |
| dfs(child.get("post_number")) | |
| dfs(pn) | |
| return collected | |
| roots = [p for p in posts if not p.get("reply_to_post_number")] | |
| for root in roots: | |
| root_num = root.get("post_number", 1) | |
| thread = extract(root_num) | |
| text = f"Topic: {title}\n\n" + "\n\n---\n\n".join( | |
| p.get("content", "").strip() for p in thread if p.get("content") | |
| ) | |
| emb = normalize(embedder.encode(text, convert_to_numpy=True)) | |
| embedding_data.append({ | |
| "topic_id": tid, | |
| "topic_title": title, | |
| "root_post_number": root_num, | |
| "post_numbers": [p.get("post_number") for p in thread], | |
| "combined_text": text | |
| }) | |
| embeddings.append(emb) | |
| # Create FAISS index | |
| index = faiss.IndexFlatIP(len(embeddings[0])) | |
| index.add(np.vstack(embeddings).astype("float32")) | |
| # --- API Input Model --- | |
| class QuestionInput(BaseModel): | |
| question: str | |
| image: str = None # Optional image input, unused here | |
| # --- Ask Endpoint --- | |
| async def ask(question: QuestionInput): | |
| return {"answer": f"Response for question: {question.question}"} | |
| # --- API Endpoint for Answer Generation --- | |
| async def answer_question(payload: QuestionInput): | |
| q = payload.question | |
| # Ensure query is valid | |
| if not q: | |
| raise HTTPException(status_code=400, detail="Question field cannot be empty.") | |
| q_emb = normalize(embedder.encode(q, convert_to_numpy=True)).astype("float32") | |
| D, I = index.search(np.array([q_emb]), 3) | |
| top_results = [] | |
| for score, idx in zip(D[0], I[0]): | |
| data = embedding_data[idx] | |
| top_results.append({ | |
| "score": float(score), | |
| "text": data["combined_text"], | |
| "topic_id": data["topic_id"], | |
| "url": f"https://discourse.onlinedegree.iitm.ac.in/t/{data['topic_id']}" | |
| }) | |
| context = "\n\n".join(r["text"] for r in top_results) | |
| prompt = f"Answer the question based on the following forum discussion:\n\n{context}\n\nQuestion: {q}\nAnswer:" | |
| try: | |
| response = openai.ChatCompletion.create( | |
| model="gpt-4o-mini", | |
| messages=[ | |
| {"role": "system", "content": "You are a helpful assistant that answers based on forum discussions."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| temperature=0.7 | |
| ) | |
| answer = response.choices[0].message.content | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Error fetching response: {str(e)}") | |
| links = [{"url": r["url"], "text": r["text"][:120]} for r in top_results] | |
| return {"answer": answer, "links": links} | |
| # --- Run the Server --- | |
| if __name__ == "__main__": | |
| uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True) |