File size: 6,109 Bytes
d80649d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
from langgraph.graph import END
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Dict, List, Any
from langgraph.graph import StateGraph
from service import research_task, seo_optimization_task, content_writing_task, refine_content, evaluate_content_quality, feedback_improvement, meeting_insights, upload_file, humanize
from langchain_google_genai import ChatGoogleGenerativeAI
from service import system_prompt
app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # Change this to specific origins if needed
    allow_credentials=True,
    allow_methods=["*"],  # Allow all methods (GET, POST, PUT, DELETE, etc.)
    allow_headers=["*"],  # Allow all headers
)

llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
class ContentState(Dict):
    idea: str
    company_name: str
    services: Dict[str, List[str]]  # Main services with their sub-services
    service_area: Dict[str, Dict[str, str]]  # Each area has multiple sub-service pages
    research_data: str
    seo_optimization: str
    home_page: str
    about_us_page: str
    service_page: str
    individual_service_page: Dict[str, str]  # Single service pages
    service_area_page: Dict[str, Dict[str, str]]  # Each area with its sub-services
    quality_score: int
    feedback: str
    content: str
    data: str
    text:str
    meeting_point:str
    file_path:str
workflow = StateGraph(ContentState)

# ✅ Define Workflow Steps
workflow.add_node("research_step", research_task)
workflow.add_node("seo_step", seo_optimization_task)
workflow.add_node("writing_step", content_writing_task)
workflow.add_node("refine_content", refine_content)
workflow.add_node("evaluate_content_quality", evaluate_content_quality)
workflow.add_node("feedback_improvement", feedback_improvement)  # Node for quality rework
workflow.add_node("human_review", lambda state: state)  # Human-in-the-loop review
workflow.add_node("meeting_insights",meeting_insights)
workflow.add_node("upload_file",upload_file)
workflow.add_node("humanized", humanize)

# ✅ Define Transitions
workflow.set_entry_point("research_step")
workflow.set_entry_point("upload_file")
workflow.add_edge("upload_file", "meeting_insights")
workflow.add_edge("research_step", "seo_step")
workflow.add_edge("seo_step", "writing_step")
workflow.add_edge("meeting_insights", "writing_step")
workflow.add_edge("writing_step", "refine_content")
workflow.add_edge("refine_content", "evaluate_content_quality")

# Conditional Flow for Quality Check & Human Review
workflow.add_conditional_edges(
    "evaluate_content_quality",
    lambda state: "feedback_improvement" if state["quality_score"] <= 7 else "humanized",
    {
        "feedback_improvement": "feedback_improvement",
        "humanized": "humanized"
    }
)

# ✅ Add Loopback from feedback_improvement to refine_content
workflow.add_edge("feedback_improvement", "evaluate_content_quality")

# ✅ Add Human-in-the-loop approval before finalization
workflow.add_edge("humanized", "human_review")
workflow.add_edge("human_review", END)

# ✅ Compile the Graph
content_graph = workflow.compile()
class RequestModel(BaseModel):
    idea: str
    company_name: str
    services: Dict[str, List[str]]
    service_area: List[str]
class UpdateRequest(BaseModel):
    page_key: List[str]
    user_query: str  

def generate_content(data):  # Remove @app.post to make it an importable function
    state = content_graph.invoke({
        "idea": data["idea"],
        "company_name": data["company_name"],
        "services": data["services"],
        "service_area": data["service_area"],
        "quality_score": 0,
        "file_path": data["file_path"]
    })

    response = {
        "home_page": state.get("home_page", ""),
        "about_us_page": state.get("about_us_page", ""),
        "service_page": state.get("service_page", ""),
        "individual_service_page": state.get("individual_service_page", {}),
        "service_area_page": state.get("service_area_page", {})
    }
    
    return response  # Return dictionary instead of JSONResponse
@app.post("/generate-content/")
def generate_content_endpoint(request: RequestModel):
    """

    API endpoint to generate website content based on user input.

    """
    data = request.dict()

    response = generate_content(data)

    return JSONResponse(content=response)


@app.put("/update-page/")
def update_page(state: dict, user_query: str):
    """

    Updates the selected page content based on user feedback.

    """
    current_content = state.get("page_content", "")

    # Define the prompt for updating the content
    prompt = f"""

    You are a professional content editor. Modify the content strictly according to the user request below.

    

    ### **Rules for Modification**  

    - Apply the requested changes **EXACTLY as specified** in the user request.  

    - **Return the entire content** with only the requested modifications applied.  

    - **DO NOT return only the changed section**—always return the full content with modifications integrated.  

    - **DO NOT rephrase or modify anything that is not explicitly requested to change.**  

    - Ensure the modified content reads naturally and maintains professional quality.  

    - **DO NOT include explanations, formatting hints, or extra commentary—only return the final updated content.**  



    ### **User Request:**  

    {user_query}



    ### **Original Content:**  

    {current_content}



    ### **Updated Content (Return Full Updated Version Below):**

    """

    # Call Gemini to process the update
    updated_content = llm.invoke([
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": prompt}
    ]).content.strip()

    # Ensure the modified content is updated correctly
    return {"page_content": updated_content}