qcm_nsi / main.py
dav74's picture
Upload 5 files
8de36f7 verified
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from typing_extensions import TypedDict
from langchain_core.messages import HumanMessage, AIMessage
from langgraph.graph import END, StateGraph, START
from langchain_core.messages import HumanMessage, AIMessage
from langgraph.graph import END, StateGraph, START
from langchain_google_genai import ChatGoogleGenerativeAI
from prompt import prompt_math, bilan_prompt, mapping
from utils import exo_choice, add_enonce_rep
GOOGLE_API_KEY = ''
#llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-lite", temperature=0.7, api_key = GOOGLE_API_KEY)
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-lite", temperature=0.7)
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=False,
allow_methods=["*"],
allow_headers=["*"],
)
class AgentStateA(TypedDict):
enonce : str
choix : str
correct : str
rep_eleve : str
rep_assistant : str
class RequestA(BaseModel):
enonce : str
choix : list[str]
correct : str
rep_eleve : str
class AgentStateB(TypedDict):
exo : str
prog : str
rep_bilan : str
class RequestB(BaseModel):
tab_q : list[str]
tab_r : list[str]
prog : str
prompt_assistant = PromptTemplate.from_template(prompt_math)
prompt_bilan = PromptTemplate.from_template(bilan_prompt)
def assistant(state : AgentStateA):
llm_assistant = prompt_assistant | llm | StrOutputParser()
reponse = llm_assistant.invoke({'enonce': state['enonce'], 'choix': state['choix'], 'correct' : state['correct'], 'rep_eleve' : state['rep_eleve']})
return {'rep_assistant' : [AIMessage(content=reponse)]}
def bilan(state : AgentStateB):
llm_bilan = prompt_bilan | llm | StrOutputParser()
reponse = llm_bilan.invoke({'exo': state['exo'], 'prog': state['prog']})
return {'rep_bilan' : [AIMessage(content=reponse)]}
workflow_a = StateGraph(AgentStateA)
workflow_a.add_node("assistant", assistant)
workflow_a.add_edge( START,"assistant")
workflow_a.add_edge( "assistant",END)
graph_a = workflow_a.compile()
workflow_b = StateGraph(AgentStateB)
workflow_b.add_node("bilan", bilan)
workflow_b.add_edge( START,"bilan")
workflow_b.add_edge( "bilan",END)
graph_b = workflow_b.compile()
@app.get('/exo')
def get_exo(file_name : str):
return exo_choice(file_name)
@app.post('/assistant')
def get_assistant(req : RequestA):
rep = graph_a.invoke({"enonce" : req.enonce,"choix": (";").join(req.choix), "correct" : req.correct, "rep_eleve" : req.rep_eleve}, stream_mode="values")
return {"reponse":rep['rep_assistant'][-1].content}
@app.post('/bilan')
def get_bilan(req : RequestB):
exo = add_enonce_rep(req.tab_q, req.tab_r)
rep = graph_b.invoke({"exo" : exo, "prog" : mapping[req.prog]}, stream_mode="values")
return {"reponse":rep['rep_bilan'][-1].content}