Spaces:
Sleeping
Sleeping
| from utils import _set_env | |
| _set_env("OPENAI_API_KEY") | |
| from utils import * | |
| def create_graph(): | |
| from langgraph.graph import StateGraph, START, END | |
| from langgraph.prebuilt import ToolNode, tools_condition | |
| ## ADD TRACKING | |
| response_model = init_chat_model("gpt-4o", temperature=0) | |
| grader_model = init_chat_model("gpt-4o", temperature=0) | |
| workflow = StateGraph(MessagesState) | |
| # Define the nodes we will cycle between | |
| workflow.add_node(generate_query_or_respond) | |
| workflow.add_node("retrieve", ToolNode([retriever_tool])) | |
| workflow.add_node(rewrite_question) | |
| workflow.add_node(generate_answer) | |
| workflow.add_edge(START, "generate_query_or_respond") | |
| # Decide whether to retrieve | |
| workflow.add_conditional_edges( | |
| "generate_query_or_respond", | |
| # Assess LLM decision (call `retriever_tool` tool or respond to the user) | |
| tools_condition, | |
| { | |
| # Translate the condition outputs to nodes in our graph | |
| "tools": "retrieve", | |
| END: END, | |
| }, | |
| ) | |
| # Edges taken after the `action` node is called. | |
| workflow.add_conditional_edges( | |
| "retrieve", | |
| # Assess agent decision | |
| grade_documents, | |
| ) | |
| workflow.add_edge("generate_answer", END) | |
| workflow.add_edge("rewrite_question", "generate_query_or_respond") | |
| # Compile | |
| graph = workflow.compile() | |
| return graph | |
| from langchain.schema import AIMessage, HumanMessage | |
| import gradio as gr | |
| from langchain.chat_models import init_chat_model | |
| ## ADD TRACKING | |
| response_model = init_chat_model("gpt-4o", temperature=0) | |
| grader_model = init_chat_model("gpt-4o", temperature=0) | |
| graph = create_graph() | |
| def predict(message, history): | |
| history_langchain_format = [] | |
| for msg in history: | |
| if msg['role'] == "user": | |
| history_langchain_format.append(HumanMessage(content=msg['content'])) | |
| elif msg['role'] == "assistant": | |
| history_langchain_format.append(AIMessage(content=msg['content'])) | |
| history_langchain_format.append(HumanMessage(content=message)) | |
| gpt_response = graph.invoke(history_langchain_format) | |
| return gpt_response.content | |
| iface = gr.ChatInterface( | |
| predict, | |
| api_name="chat", | |
| ) | |
| iface.launch() |