Spaces:
Running
Running
| import os | |
| from typing import TypedDict, List, Dict, Any, Optional | |
| from langgraph.graph import StateGraph, START, END | |
| from agent.agent_graph.StateTasks import * | |
| from agent.llm.prompts import * | |
| from typing import get_type_hints | |
| import json | |
| from langchain_core.messages import HumanMessage,SystemMessage | |
| from agent.agent_graph.Graph_Utils import get_egp_to_usd | |
| from agent.rag.rag import * | |
| def answer_question(state:ProblemState): | |
| question = state["question"] | |
| if state["question_type"] == Available_Tasks.LAPTOP_CHOOSE.value: | |
| guide_prompt = Tasks_prompts.LAPTOP_THINK.value | |
| elif state["question_type"] == Available_Tasks.ROADMAP.value: | |
| guide_prompt = Tasks_prompts.ROADMAP.value | |
| elif ("node_output_article" in state.keys()): | |
| guide_prompt = Tasks_prompts.ROADMAP.value +"<source>" + state["node_output_article"] +"</source>" | |
| else: | |
| guide_prompt = Tasks_prompts.RAG.value | |
| state["answer"] = get_llm_answer(model_llm=state["llm"],messages=state["memory"] + [HumanMessage(content=(guide_prompt + "طلب المستخدم:\n" + question + "\nاهم معلومات المستخدم لاستخدامها ف الدلالة (بالنسبة للسعر هو نفس السعر لكن بالدولار فدائما ركز على السعر بالدولار\n)"+ str(state) + Route_prompts.FINALIZER_PROMPT.value))]) | |
| return state | |
| def update_context(state:ProblemState): | |
| # Control what keys can be modified to preven hullicination | |
| keys = get_type_hints(ProblemState).keys() | |
| keys_modifiable = [] # keep it inside the function to make sure append to it not affected by other calls | |
| _is_rag=False | |
| _rag_text_if_exist = "" | |
| for i in list(keys): | |
| # 2️⃣ Prevent modifying question_type after chat starts | |
| if i in ["question", "answer", "node_output_article", "memory"]: | |
| continue | |
| # 2️⃣ Prevent modifying question_type after chat starts | |
| # - question_type is only allowed at the very start of the chat | |
| # - If it already exists in state → chat has started → modification not allowed | |
| # if question exists in rag data skip it | |
| if i == "question_type": | |
| if "question_type" in state.keys(): | |
| continue | |
| rag_text = state["rag_model"].get_relevant_question(state["question"]) | |
| if bool(rag_text): | |
| _is_rag = True | |
| _rag_text_if_exist = rag_text # store text to prevent calling twice | |
| continue | |
| # 3️⃣ Any key that reaches here is allowed to be returned by the LLM | |
| keys_modifiable.append(i) | |
| # Make the prompt | |
| prompt_llm_new_info = Route_prompts.Context_UPDATOR.value + "\n <KEYS> \n" +str(keys_modifiable) +"\n </KEYS> <Text>"+state["question"]+"</Text>" | |
| llm_new_info = get_llm_answer(model_llm=state["llm"],messages = [HumanMessage(prompt_llm_new_info)]) | |
| # Save and Process the returned json to prevent hallucination | |
| try: | |
| llm_new_info = json.loads(llm_new_info) | |
| for key in llm_new_info.keys(): | |
| if key in keys_modifiable: | |
| state[key] = llm_new_info[key] | |
| # check if rag | |
| if _is_rag: | |
| state["question_type"] = Available_Tasks.QUESTION.value | |
| state["node_output_article"] = _rag_text_if_exist | |
| # Check if all_ok can be answered now | |
| last_question = state.get("answer", "") | |
| check_finalized_prompt = ( | |
| "عندك ده اخر سوال واجابة"+ | |
| last_question + | |
| state["question"] + | |
| "هل معنى ذلك ان المستخدم اكد على الفهم الصحيح؟" + | |
| "رجع فقط BOOL (True/False)" | |
| ) | |
| check_finalized = get_llm_answer(model_llm=state["llm"], messages = [HumanMessage( | |
| check_finalized_prompt | |
| )]) | |
| state['all_ok'] = check_finalized.strip().lower() == "true" # If wrong parsed it's false | |
| except Exception as e: | |
| print("Context was not updated due to error : ",e) | |
| return state | |
| def convertPriceToDollar(state:ProblemState): | |
| if "price" in state.keys(): | |
| state["price"] = get_egp_to_usd(state["price"]) | |
| return state | |
| def step(state:ProblemState): | |
| next_topic = None | |
| if "question_type" not in state.keys(): | |
| next_topic = "question_type" | |
| else: | |
| for i in task_steps[state.get("question_type")]: | |
| if i not in state.keys(): | |
| next_topic = i | |
| break | |
| # Only after finishing the to do list of the question type we can ask for all_ok to confirm | |
| if (not next_topic) and ("all_ok" not in state.keys() or (state["all_ok"]==False)) and "question_type" in state.keys(): | |
| next_topic = "all_ok" | |
| step_prompt = (System_prompts.STATE_DESCRIBE.value + f"<order>{next_topic} </order> <state>{state}</state>" + Route_prompts.FINALIZER_PROMPT_STEP.value) | |
| state['answer'] = get_llm_answer(model_llm=state["llm"],messages = [HumanMessage(step_prompt)]) | |
| return state | |
| def search_knowledgebase(state:ProblemState): | |
| """ | |
| Search the vector database for relevant contexts. | |
| """ | |
| # fetch top 3 relevant docs | |
| state["node_output_article"] = state["rag_model"].get_relevant_question(state["question"]) | |
| return state | |
| def get_llm_answer(model_llm=None,messages=[HumanMessage(content="hi")]): | |
| return model_llm.invoke(messages).content |