|
|
from langgraph.graph import END
|
|
|
from fastapi import FastAPI, HTTPException
|
|
|
from fastapi.responses import JSONResponse
|
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
|
from pydantic import BaseModel
|
|
|
from typing import Dict, List, Any
|
|
|
from langgraph.graph import StateGraph
|
|
|
from service import research_task, seo_optimization_task, content_writing_task, refine_content, evaluate_content_quality, feedback_improvement, meeting_insights, upload_file
|
|
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
from service import system_prompt
|
|
|
app = FastAPI()
|
|
|
app.add_middleware(
|
|
|
CORSMiddleware,
|
|
|
allow_origins=["*"],
|
|
|
allow_credentials=True,
|
|
|
allow_methods=["*"],
|
|
|
allow_headers=["*"],
|
|
|
)
|
|
|
|
|
|
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
|
|
|
class ContentState(Dict):
|
|
|
idea: str
|
|
|
company_name: str
|
|
|
services: Dict[str, List[str]]
|
|
|
service_area: Dict[str, Dict[str, str]]
|
|
|
research_data: str
|
|
|
seo_optimization: str
|
|
|
home_page: str
|
|
|
about_us_page: str
|
|
|
service_page: str
|
|
|
individual_service_page: Dict[str, str]
|
|
|
service_area_page: Dict[str, Dict[str, str]]
|
|
|
quality_score: int
|
|
|
feedback: str
|
|
|
content: str
|
|
|
data: str
|
|
|
text:str
|
|
|
meeting_point:str
|
|
|
file_path:str
|
|
|
workflow = StateGraph(ContentState)
|
|
|
|
|
|
|
|
|
workflow.add_node("research_step", research_task)
|
|
|
workflow.add_node("seo_step", seo_optimization_task)
|
|
|
workflow.add_node("writing_step", content_writing_task)
|
|
|
workflow.add_node("refine_content", refine_content)
|
|
|
workflow.add_node("evaluate_content_quality", evaluate_content_quality)
|
|
|
workflow.add_node("feedback_improvement", feedback_improvement)
|
|
|
workflow.add_node("human_review", lambda state: state)
|
|
|
workflow.add_node("meeting_insights",meeting_insights)
|
|
|
workflow.add_node("upload_file",upload_file)
|
|
|
|
|
|
workflow.set_entry_point("research_step")
|
|
|
workflow.set_entry_point("upload_file")
|
|
|
workflow.add_edge("upload_file", "meeting_insights")
|
|
|
workflow.add_edge("research_step", "seo_step")
|
|
|
workflow.add_edge("seo_step", "writing_step")
|
|
|
workflow.add_edge("meeting_insights", "writing_step")
|
|
|
workflow.add_edge("writing_step", "refine_content")
|
|
|
workflow.add_edge("refine_content", "evaluate_content_quality")
|
|
|
|
|
|
|
|
|
workflow.add_conditional_edges(
|
|
|
"evaluate_content_quality",
|
|
|
lambda state: "feedback_improvement" if state["quality_score"] <= 7 else "human_review",
|
|
|
{
|
|
|
"feedback_improvement": "feedback_improvement",
|
|
|
"human_review": "human_review"
|
|
|
}
|
|
|
)
|
|
|
|
|
|
|
|
|
workflow.add_edge("feedback_improvement", "evaluate_content_quality")
|
|
|
|
|
|
|
|
|
workflow.add_edge("human_review", END)
|
|
|
|
|
|
|
|
|
content_graph = workflow.compile()
|
|
|
class RequestModel(BaseModel):
|
|
|
idea: str
|
|
|
company_name: str
|
|
|
services: Dict[str, List[str]]
|
|
|
service_area: List[str]
|
|
|
class UpdateRequest(BaseModel):
|
|
|
page_key: List[str]
|
|
|
user_query: str
|
|
|
|
|
|
def generate_content(data):
|
|
|
state = content_graph.invoke({
|
|
|
"idea": data["idea"],
|
|
|
"company_name": data["company_name"],
|
|
|
"services": data["services"],
|
|
|
"service_area": data["service_area"],
|
|
|
"quality_score": 0,
|
|
|
"file_path": data["file_path"]
|
|
|
})
|
|
|
|
|
|
response = {
|
|
|
"home_page": state.get("home_page", ""),
|
|
|
"about_us_page": state.get("about_us_page", ""),
|
|
|
"service_page": state.get("service_page", ""),
|
|
|
"individual_service_page": state.get("individual_service_page", {}),
|
|
|
"service_area_page": state.get("service_area_page", {})
|
|
|
}
|
|
|
|
|
|
return response
|
|
|
@app.post("/generate-content/")
|
|
|
def generate_content_endpoint(request: RequestModel):
|
|
|
"""
|
|
|
API endpoint to generate website content based on user input.
|
|
|
"""
|
|
|
data = request.dict()
|
|
|
|
|
|
response = generate_content(data)
|
|
|
|
|
|
return JSONResponse(content=response)
|
|
|
|
|
|
|
|
|
@app.put("/update-page/")
|
|
|
def update_page(state: dict, user_query: str):
|
|
|
"""
|
|
|
Updates the selected page content based on user feedback.
|
|
|
"""
|
|
|
current_content = state.get("page_content", "")
|
|
|
|
|
|
|
|
|
prompt = f"""
|
|
|
You are a professional content editor. Modify the content strictly according to the user request below.
|
|
|
|
|
|
### **Rules for Modification**
|
|
|
- Apply the requested changes **EXACTLY as specified** in the user request.
|
|
|
- **Return the entire content** with only the requested modifications applied.
|
|
|
- **DO NOT return only the changed section**—always return the full content with modifications integrated.
|
|
|
- **DO NOT rephrase or modify anything that is not explicitly requested to change.**
|
|
|
- Ensure the modified content reads naturally and maintains professional quality.
|
|
|
- **DO NOT include explanations, formatting hints, or extra commentary—only return the final updated content.**
|
|
|
|
|
|
### **User Request:**
|
|
|
{user_query}
|
|
|
|
|
|
### **Original Content:**
|
|
|
{current_content}
|
|
|
|
|
|
### **Updated Content (Return Full Updated Version Below):**
|
|
|
"""
|
|
|
|
|
|
|
|
|
updated_content = llm.invoke([
|
|
|
{"role": "system", "content": system_prompt},
|
|
|
{"role": "user", "content": prompt}
|
|
|
]).content.strip()
|
|
|
|
|
|
|
|
|
return {"page_content": updated_content}
|
|
|
|
|
|
|
|
|
|