asa
Browse files
app.py
CHANGED
|
@@ -48,28 +48,33 @@ llm = ChatOpenAI(
|
|
| 48 |
api_key=os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 49 |
)
|
| 50 |
developer_structure_llm = llm.with_structured_output(GenerateRecommendation, method="json_mode")
|
| 51 |
-
reviewer_structure_llm = llm.with_structured_output(RecommendationQualityScore, method="json_mode")
|
| 52 |
|
| 53 |
def travel_recommender(state):
|
| 54 |
-
# Se asume que el 煤ltimo mensaje contiene las preferencias del usuario
|
| 55 |
user_requirements = state["messages"][-1].content
|
| 56 |
system_prompt = f"""
|
| 57 |
Eres un experto en recomendaciones de viajes.
|
| 58 |
Con base en las siguientes preferencias del usuario: {user_requirements},
|
| 59 |
-
selecciona el mejor destino de la siguiente base de datos
|
| 60 |
-
Responde en JSON con la clave `destination`
|
| 61 |
"""
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
|
|
|
| 65 |
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
return state
|
| 74 |
|
| 75 |
def recommendation_review(state):
|
|
@@ -132,15 +137,16 @@ graph = builder.compile()
|
|
| 132 |
def run_graph(user_input: str) -> str:
|
| 133 |
initial_state = {"messages": [HumanMessage(content=user_input)], "quality": 0, "iterations": 0}
|
| 134 |
final_state = graph.invoke(initial_state)
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
#
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
|
|
|
| 144 |
|
| 145 |
if __name__ == "__main__":
|
| 146 |
iface.launch()
|
|
|
|
| 48 |
api_key=os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 49 |
)
|
| 50 |
developer_structure_llm = llm.with_structured_output(GenerateRecommendation, method="json_mode")
|
|
|
|
| 51 |
|
| 52 |
def travel_recommender(state):
|
|
|
|
| 53 |
user_requirements = state["messages"][-1].content
|
| 54 |
system_prompt = f"""
|
| 55 |
Eres un experto en recomendaciones de viajes.
|
| 56 |
Con base en las siguientes preferencias del usuario: {user_requirements},
|
| 57 |
+
selecciona el mejor destino de la siguiente base de datos.
|
| 58 |
+
Responde en JSON con la clave `destination` y `explanation`.
|
| 59 |
"""
|
| 60 |
+
messages = [
|
| 61 |
+
SystemMessage(content=system_prompt),
|
| 62 |
+
# ... m谩s mensajes (human/AI) si lo requieres ...
|
| 63 |
+
]
|
| 64 |
|
| 65 |
+
# 1. Llamas a tu LLM con structured_output
|
| 66 |
+
recommendation_obj = developer_structure_llm.invoke(messages)
|
| 67 |
+
|
| 68 |
+
# 2. Conviertes el objeto resultante en texto
|
| 69 |
+
text_output = (
|
| 70 |
+
f"Destino recomendado: {recommendation_obj.destination}\n"
|
| 71 |
+
f"Raz贸n: {recommendation_obj.explanation}"
|
| 72 |
+
)
|
| 73 |
|
| 74 |
+
# 3. Guardas ese texto en el estado (para que otros nodos puedan revisarlo)
|
| 75 |
+
state["messages"].append(AIMessage(content=text_output))
|
| 76 |
+
|
| 77 |
+
# 4. Retornas el state modificado
|
| 78 |
return state
|
| 79 |
|
| 80 |
def recommendation_review(state):
|
|
|
|
| 137 |
def run_graph(user_input: str) -> str:
|
| 138 |
initial_state = {"messages": [HumanMessage(content=user_input)], "quality": 0, "iterations": 0}
|
| 139 |
final_state = graph.invoke(initial_state)
|
| 140 |
+
|
| 141 |
+
# Supongamos que guardaste la recomendaci贸n final en state["final_recommendation"]
|
| 142 |
+
# o en el 煤ltimo mensaje AIMessage. Por ejemplo:
|
| 143 |
+
final_messages = [msg for msg in final_state["messages"] if isinstance(msg, AIMessage)]
|
| 144 |
+
if not final_messages:
|
| 145 |
+
return "No se gener贸 una recomendaci贸n final."
|
| 146 |
+
|
| 147 |
+
# Extraes el 煤ltimo mensaje del flujo
|
| 148 |
+
return final_messages[-1].content
|
| 149 |
+
|
| 150 |
|
| 151 |
if __name__ == "__main__":
|
| 152 |
iface.launch()
|