ReqSolver-API / app.py
YchKhan's picture
Update app.py
8b502d9 verified
raw
history blame
6.06 kB
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from typing import List, Optional
import os
import json
# ---- Requirements Models ----
class RequirementInfo(BaseModel):
"""Represents an extracted requirement info."""
context: str = Field(..., description="Context for the requirement.")
requirement: str = Field(..., description="The requirement itself.")
document: Optional[str] = Field('', description="The document the requirement is extracted from.")
class ReqGroupingCategory(BaseModel):
"""Represents the category of requirements grouped together."""
id: int = Field(..., description="ID of the grouping category")
title: str = Field(..., description="Title given to the grouping category")
requirements: List[RequirementInfo] = Field(
..., description="List of grouped requirements")
class ReqGroupingResponse(BaseModel):
categories: List[ReqGroupingCategory]
# ---- Solution Models ----
class SolutionModel(BaseModel):
Context: str = Field(..., description="Full context provided for this category.")
Requirements: List[str] = Field(..., description="List of each requirement as string.")
Problem_Description: str = Field(..., alias="Problem Description",
description="Description of the problem being solved.")
Solution_Description: str = Field(..., alias="Solution Description",
description="Detailed description of the solution.")
References: Optional[str] = Field('', description="References to documents used for the solution.")
class Config:
allow_population_by_field_name = True # Enables alias handling on input/output
class SolutionsResponse(BaseModel):
solutions: List[SolutionModel]
# ---- FastAPI app ----
app = FastAPI()
# ---- LLM Integration ----
def ask_llm(user_message, model='compound-beta', system_prompt="You are a helpful assistant"):
from groq import Groq # Import here so the app starts without the module if needed
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
response = client.chat.completions.create(
model=model,
messages=[
{
"role": "system",
"content": system_prompt
},
{
"role": "user",
"content": user_message
}
],
stream=False,
)
ai_reply = response.choices[0].message.content
return ai_reply
solution_prompt = """You are an expert system designer. Your task is to find a solution that addresses as many of the provided requirements as possible, while carefully considering the given context. Browse internet for reliable sources.
Respond strictly in the following JSON format:
{
"Context": "<Insert the full context provided for this category>",
"Requirements": [
"<List each requirement clearly as a string item>"
],
"Problem Description": "<Describe the problem the solution is solving without introducing it>",
"Solution Description": "<Explain the proposed solution, detailing how it meets each of the specified requirements and aligns with the given context. Prioritize completeness and practicality.>",
"References": "<The references to the documents used to write the solution>"
}
text
⚠️ Rules:
Do not omit any part of the JSON structure.
Replace newline characters with \"\\n\" (double backslash-n for JSON)
Ensure all fields are present, even if empty.
The solution must aim to maximize requirement satisfaction while respecting the context.
Provide a clear and well-reasoned description of how your solution addresses each requirement.
"""
# ---- Main Endpoint ----
@app.post("/find_solutions", response_model=SolutionsResponse)
async def find_solutions(requirements: ReqGroupingResponse):
solutions = []
for category in requirements.categories:
category_title = category.title
category_requirements = category.requirements
# Compose the LLM prompt
problem_description = solution_prompt
problem_description += f"Category title: {category_title}\n"
context_list = []
requirement_list = []
for req_item in category_requirements:
context_list.append(f"- Context: {req_item.context}")
requirement_list.append(f"- Requirement: {req_item.requirement}")
problem_description += "Contexts:\n" + "\n".join(context_list) + "\n\n"
problem_description += "Requirements:\n" + "\n".join(requirement_list)
llm_response = ask_llm(problem_description)
print(f"Solution for '{category_title}' category:")
print(llm_response)
# Clean and parse the LLM response
try:
# Remove code blocks if present
cleaned = llm_response.strip()
if cleaned.startswith('```json'):
cleaned = cleaned[7:]
if cleaned.startswith('```'):
cleaned = cleaned[3:]
if cleaned.endswith('```
cleaned = cleaned[:-3]
cleaned = cleaned.strip()
# Replace double backslashes with single if needed for parsing
cleaned = cleaned.replace('\\\\n', '\\n')
parsed = json.loads(cleaned)
# Use alias-aware population for SolutionModel
solution_obj = SolutionModel.parse_obj(parsed)
solutions.append(solution_obj)
except Exception as e:
# Append error info as a solution model (helps debug)
error_solution = SolutionModel(
Context="",
Requirements=[],
Problem_Description=f"Failed to parse LLM response: {str(e)}",
Solution_Description=f"Original LLM output: {llm_response}",
References=""
)
solutions.append(error_solution)
return SolutionsResponse(solutions=solutions)
@app.get("/")
def greet_json():
return {"Status": "OK!"}