Spaces:
Running
Running
File size: 5,439 Bytes
32a7233 5c1c37e 32a7233 774855e 32a7233 774855e 32a7233 774855e 32a7233 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | import os
from typing import TypedDict, List, Dict, Any, Optional
from langgraph.graph import StateGraph, START, END
from agent.agent_graph.StateTasks import *
from agent.llm.prompts import *
from typing import get_type_hints
import json
from langchain_core.messages import HumanMessage,SystemMessage
from agent.agent_graph.Graph_Utils import get_egp_to_usd
from agent.rag.rag import *
def answer_question(state:ProblemState):
question = state["question"]
if state["question_type"] == Available_Tasks.LAPTOP_CHOOSE.value:
guide_prompt = Tasks_prompts.LAPTOP_THINK.value
elif state["question_type"] == Available_Tasks.ROADMAP.value:
guide_prompt = Tasks_prompts.ROADMAP.value
elif ("node_output_article" in state.keys()):
guide_prompt = Tasks_prompts.ROADMAP.value +"<source>" + state["node_output_article"] +"</source>"
else:
guide_prompt = Tasks_prompts.RAG.value
state["answer"] = get_llm_answer(model_llm=state["llm"],messages=state["memory"] + [HumanMessage(content=(guide_prompt + "طلب المستخدم:\n" + question + "\nاهم معلومات المستخدم لاستخدامها ف الدلالة (بالنسبة للسعر هو نفس السعر لكن بالدولار فدائما ركز على السعر بالدولار\n)"+ str(state) + Route_prompts.FINALIZER_PROMPT.value))])
return state
def update_context(state:ProblemState):
# Control what keys can be modified to preven hullicination
keys = get_type_hints(ProblemState).keys()
keys_modifiable = [] # keep it inside the function to make sure append to it not affected by other calls
_is_rag=False
_rag_text_if_exist = ""
for i in list(keys):
# 2️⃣ Prevent modifying question_type after chat starts
if i in ["question", "answer", "node_output_article", "memory"]:
continue
# 2️⃣ Prevent modifying question_type after chat starts
# - question_type is only allowed at the very start of the chat
# - If it already exists in state → chat has started → modification not allowed
# if question exists in rag data skip it
if i == "question_type":
if "question_type" in state.keys():
continue
rag_text = state["rag_model"].get_relevant_question(state["question"])
if bool(rag_text):
_is_rag = True
_rag_text_if_exist = rag_text # store text to prevent calling twice
continue
# 3️⃣ Any key that reaches here is allowed to be returned by the LLM
keys_modifiable.append(i)
# Make the prompt
prompt_llm_new_info = Route_prompts.Context_UPDATOR.value + "\n <KEYS> \n" +str(keys_modifiable) +"\n </KEYS> <Text>"+state["question"]+"</Text>"
llm_new_info = get_llm_answer(model_llm=state["llm"],messages = [HumanMessage(prompt_llm_new_info)])
# Save and Process the returned json to prevent hallucination
try:
llm_new_info = json.loads(llm_new_info)
for key in llm_new_info.keys():
if key in keys_modifiable:
state[key] = llm_new_info[key]
# check if rag
if _is_rag:
state["question_type"] = Available_Tasks.QUESTION.value
state["node_output_article"] = _rag_text_if_exist
# Check if all_ok can be answered now
last_question = state.get("answer", "")
check_finalized_prompt = (
"عندك ده اخر سوال واجابة"+
last_question +
state["question"] +
"هل معنى ذلك ان المستخدم اكد على الفهم الصحيح؟" +
"رجع فقط BOOL (True/False)"
)
check_finalized = get_llm_answer(model_llm=state["llm"], messages = [HumanMessage(
check_finalized_prompt
)])
state['all_ok'] = check_finalized.strip().lower() == "true" # If wrong parsed it's false
except Exception as e:
print("Context was not updated due to error : ",e)
return state
def convertPriceToDollar(state:ProblemState):
if "price" in state.keys():
state["price"] = get_egp_to_usd(state["price"])
return state
def step(state:ProblemState):
next_topic = None
if "question_type" not in state.keys():
next_topic = "question_type"
else:
for i in task_steps[state.get("question_type")]:
if i not in state.keys():
next_topic = i
break
# Only after finishing the to do list of the question type we can ask for all_ok to confirm
if (not next_topic) and ("all_ok" not in state.keys() or (state["all_ok"]==False)) and "question_type" in state.keys():
next_topic = "all_ok"
step_prompt = (System_prompts.STATE_DESCRIBE.value + f"<order>{next_topic} </order> <state>{state}</state>" + Route_prompts.FINALIZER_PROMPT_STEP.value)
state['answer'] = get_llm_answer(model_llm=state["llm"],messages = [HumanMessage(step_prompt)])
return state
def search_knowledgebase(state:ProblemState):
"""
Search the vector database for relevant contexts.
"""
# fetch top 3 relevant docs
state["node_output_article"] = state["rag_model"].get_relevant_question(state["question"])
return state
def get_llm_answer(model_llm=None,messages=[HumanMessage(content="hi")]):
return model_llm.invoke(messages).content |