Spaces:
Running
Running
Anirudh Esthuri
commited on
Commit
·
e4574f6
1
Parent(s):
e06b639
Revert gateway client endpoints
Browse files- gateway_client.py +23 -41
gateway_client.py
CHANGED
|
@@ -12,64 +12,46 @@ EXAMPLE_SERVER_PORT = os.getenv("MEMORY_SERVER_URL")
|
|
| 12 |
|
| 13 |
def ingest_and_rewrite(user_id: str, query: str, model_type: str = "openai") -> str:
|
| 14 |
"""Pass a raw user message through the memory server and get context-aware response."""
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
"query": query,
|
| 18 |
-
"model_type": model_type,
|
| 19 |
-
"skip_rewrite": False,
|
| 20 |
-
}
|
| 21 |
resp = requests.post(
|
| 22 |
f"{EXAMPLE_SERVER_PORT}/memory/store-and-search",
|
| 23 |
-
params=
|
| 24 |
-
timeout=
|
| 25 |
)
|
| 26 |
resp.raise_for_status()
|
| 27 |
-
|
| 28 |
-
return
|
| 29 |
|
| 30 |
|
| 31 |
def add_session_message(user_id: str, msg: str) -> None:
|
| 32 |
"""Add a raw message into memory via memory server."""
|
| 33 |
-
params = {
|
| 34 |
-
"persona_name": user_id,
|
| 35 |
-
"message": msg,
|
| 36 |
-
"source": "frontend",
|
| 37 |
-
}
|
| 38 |
requests.post(
|
| 39 |
f"{EXAMPLE_SERVER_PORT}/memory",
|
| 40 |
-
params=
|
| 41 |
-
timeout=
|
| 42 |
)
|
| 43 |
|
| 44 |
|
| 45 |
def create_persona_query(user_id: str, query: str) -> str:
|
| 46 |
"""Create a persona-aware query by searching memory context via memory server."""
|
| 47 |
-
|
| 48 |
-
"
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
timeout=30,
|
| 56 |
)
|
| 57 |
resp.raise_for_status()
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
if memories:
|
| 66 |
-
summary += "Memories:\n"
|
| 67 |
-
for memory in memories:
|
| 68 |
-
summary += f"- {memory.get('content', '')}\n"
|
| 69 |
-
|
| 70 |
-
if summary:
|
| 71 |
-
return f"{summary}\nQuery: {query}"
|
| 72 |
-
return f"Query: {query}"
|
| 73 |
|
| 74 |
|
| 75 |
def add_new_session_message(user_id: str, msg: str) -> None:
|
|
|
|
| 12 |
|
| 13 |
def ingest_and_rewrite(user_id: str, query: str, model_type: str = "openai") -> str:
|
| 14 |
"""Pass a raw user message through the memory server and get context-aware response."""
|
| 15 |
+
print("entered ingest_and_rewrite")
|
| 16 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
resp = requests.post(
|
| 18 |
f"{EXAMPLE_SERVER_PORT}/memory/store-and-search",
|
| 19 |
+
params={"user_id": user_id, "query": query},
|
| 20 |
+
timeout=1000,
|
| 21 |
)
|
| 22 |
resp.raise_for_status()
|
| 23 |
+
|
| 24 |
+
return resp.text
|
| 25 |
|
| 26 |
|
| 27 |
def add_session_message(user_id: str, msg: str) -> None:
|
| 28 |
"""Add a raw message into memory via memory server."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
requests.post(
|
| 30 |
f"{EXAMPLE_SERVER_PORT}/memory",
|
| 31 |
+
params={"user_id": user_id, "query": msg},
|
| 32 |
+
timeout=5,
|
| 33 |
)
|
| 34 |
|
| 35 |
|
| 36 |
def create_persona_query(user_id: str, query: str) -> str:
|
| 37 |
"""Create a persona-aware query by searching memory context via memory server."""
|
| 38 |
+
resp = requests.get(
|
| 39 |
+
f"{EXAMPLE_SERVER_PORT}/memory",
|
| 40 |
+
params={
|
| 41 |
+
"query": query,
|
| 42 |
+
"user_id": user_id,
|
| 43 |
+
"timestamp": datetime.now().isoformat(),
|
| 44 |
+
},
|
| 45 |
+
timeout=1000,
|
|
|
|
| 46 |
)
|
| 47 |
resp.raise_for_status()
|
| 48 |
+
|
| 49 |
+
search_results = resp.json()
|
| 50 |
+
|
| 51 |
+
if search_results.get("profile"):
|
| 52 |
+
return f"Based on your profile: {search_results['profile']}\n\nQuery: {query}"
|
| 53 |
+
else:
|
| 54 |
+
return f"Query: {query}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
|
| 57 |
def add_new_session_message(user_id: str, msg: str) -> None:
|