Spaces:
Sleeping
Sleeping
Update the project to use a real-time Amazon products API instead of a vector store. Improve the prompt to handle updated tool
Browse files- .env.example +3 -2
- app/bm25_encoder.json +0 -0
- app/build_vectorstore.py +0 -211
- app/config.py +7 -11
- app/product_search.py +360 -0
- app/prompts.py +51 -37
- app/retriever.py +0 -287
- app/tools.py +16 -10
- main.py +4 -4
.env.example
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
| 3 |
|
| 4 |
REDIS_URL=<your-upstash-redis-url>
|
| 5 |
|
|
|
|
| 1 |
+
RAPIDAPI_KEY=<your-rapidapi-key>
|
| 2 |
+
SEARCH_API_URL=<amazon-search-api-url>
|
| 3 |
+
RAPIDAPI_HOST=<amazon-search-api-host>
|
| 4 |
|
| 5 |
REDIS_URL=<your-upstash-redis-url>
|
| 6 |
|
app/bm25_encoder.json
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
app/build_vectorstore.py
DELETED
|
@@ -1,211 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import pandas as pd
|
| 3 |
-
from typing import List, Dict
|
| 4 |
-
from pinecone.grpc import PineconeGRPC as Pinecone
|
| 5 |
-
from pinecone import ServerlessSpec
|
| 6 |
-
from pinecone_text.sparse import BM25Encoder
|
| 7 |
-
from langchain_openai import OpenAIEmbeddings
|
| 8 |
-
import uuid
|
| 9 |
-
from dotenv import load_dotenv
|
| 10 |
-
from tqdm import tqdm
|
| 11 |
-
|
| 12 |
-
from .config import settings
|
| 13 |
-
|
| 14 |
-
_ = load_dotenv()
|
| 15 |
-
|
| 16 |
-
class PineconeHybridProductIndexer:
|
| 17 |
-
def __init__(self, index_name: str, api_key: str, environment: str = "us-east-1"):
|
| 18 |
-
"""Initialize Pinecone hybrid search for products"""
|
| 19 |
-
self.pc = Pinecone(api_key=api_key)
|
| 20 |
-
self.environment = environment
|
| 21 |
-
self.index_name = index_name
|
| 22 |
-
|
| 23 |
-
# Initialize embeddings model
|
| 24 |
-
self.dense_model = OpenAIEmbeddings(model=settings.EMBEDDING_MODEL)
|
| 25 |
-
self.dimensions = settings.EMBEDDING_DIMENSION
|
| 26 |
-
|
| 27 |
-
# Initialize sparse encoder (BM25)
|
| 28 |
-
self.sparse_encoder = BM25Encoder()
|
| 29 |
-
|
| 30 |
-
self.index = None
|
| 31 |
-
|
| 32 |
-
def create_hybrid_index(self):
|
| 33 |
-
"""
|
| 34 |
-
Create Pinecone hybrid index for products
|
| 35 |
-
Key requirement: vector_type='dense' and metric='dotproduct' for hybrid search
|
| 36 |
-
"""
|
| 37 |
-
try:
|
| 38 |
-
# Delete index if it exists
|
| 39 |
-
if self.index_name in self.pc.list_indexes().names():
|
| 40 |
-
print(f"Deleting existing index: {self.index_name}")
|
| 41 |
-
self.pc.delete_index(self.index_name)
|
| 42 |
-
|
| 43 |
-
# Create hybrid index
|
| 44 |
-
print(f"Creating index: {self.index_name}")
|
| 45 |
-
self.pc.create_index(
|
| 46 |
-
name=self.index_name,
|
| 47 |
-
dimension=self.dimensions,
|
| 48 |
-
metric="dotproduct", # Required for hybrid search
|
| 49 |
-
spec=ServerlessSpec(
|
| 50 |
-
cloud="aws",
|
| 51 |
-
region=self.environment
|
| 52 |
-
)
|
| 53 |
-
)
|
| 54 |
-
|
| 55 |
-
# Connect to index
|
| 56 |
-
self.index = self.pc.Index(self.index_name)
|
| 57 |
-
|
| 58 |
-
except Exception as e:
|
| 59 |
-
print(f"Error creating index: {e}")
|
| 60 |
-
raise
|
| 61 |
-
|
| 62 |
-
def connect_to_index(self):
|
| 63 |
-
"""Connect to existing index"""
|
| 64 |
-
if self.index_name not in self.pc.list_indexes().names():
|
| 65 |
-
raise ValueError(f"Index {self.index_name} does not exist. Create it first.")
|
| 66 |
-
|
| 67 |
-
self.index = self.pc.Index(self.index_name)
|
| 68 |
-
print(f"Connected to index: {self.index_name}")
|
| 69 |
-
|
| 70 |
-
def delete_index(self):
|
| 71 |
-
"""Delete the Pinecone index if it exists"""
|
| 72 |
-
try:
|
| 73 |
-
existing_indexes = self.pc.list_indexes().names()
|
| 74 |
-
if self.index_name in existing_indexes:
|
| 75 |
-
print(f"Deleting index: {self.index_name}")
|
| 76 |
-
self.pc.delete_index(self.index_name)
|
| 77 |
-
self.index = None
|
| 78 |
-
print(f"Index deleted: {self.index_name}")
|
| 79 |
-
else:
|
| 80 |
-
print(f"Index {self.index_name} does not exist; nothing to delete.")
|
| 81 |
-
except Exception as e:
|
| 82 |
-
print(f"Error deleting index: {e}")
|
| 83 |
-
raise
|
| 84 |
-
|
| 85 |
-
def prepare_documents_for_indexing(self, df: pd.DataFrame) -> List[Dict]:
|
| 86 |
-
"""Prepare documents for hybrid indexing"""
|
| 87 |
-
print("Preparing documents for hybrid indexing...")
|
| 88 |
-
|
| 89 |
-
# Prepare texts for sparse encoding
|
| 90 |
-
texts = []
|
| 91 |
-
documents = []
|
| 92 |
-
|
| 93 |
-
for _, row in df.iterrows():
|
| 94 |
-
# Create rich text content for both dense and sparse encoding
|
| 95 |
-
text_content = f"Product: {row['name']}. Category: {row['main_category']}. Type: {row['sub_category']}."
|
| 96 |
-
texts.append(text_content)
|
| 97 |
-
|
| 98 |
-
# Prepare metadata
|
| 99 |
-
metadata = {
|
| 100 |
-
'name': row['name'],
|
| 101 |
-
'main_category': row['main_category'],
|
| 102 |
-
'sub_category': row['sub_category'],
|
| 103 |
-
'discount_price_usd': float(row['discount_price_usd']),
|
| 104 |
-
'actual_price_usd': float(row['actual_price_usd']),
|
| 105 |
-
'ratings': float(row['ratings']),
|
| 106 |
-
'no_of_ratings': int(row['no_of_ratings']),
|
| 107 |
-
'image': row['image'],
|
| 108 |
-
'link': row['link']
|
| 109 |
-
}
|
| 110 |
-
|
| 111 |
-
documents.append({
|
| 112 |
-
'id': str(uuid.uuid4()),
|
| 113 |
-
'text': text_content,
|
| 114 |
-
'metadata': metadata
|
| 115 |
-
})
|
| 116 |
-
|
| 117 |
-
# Fit sparse encoder on all texts
|
| 118 |
-
print("Training BM25 sparse encoder...")
|
| 119 |
-
self.sparse_encoder.fit(texts)
|
| 120 |
-
|
| 121 |
-
# Save sparse encoder
|
| 122 |
-
print("Saving BM25 sparse encoder...")
|
| 123 |
-
self.sparse_encoder.dump("bm25_encoder.json")
|
| 124 |
-
|
| 125 |
-
return documents
|
| 126 |
-
|
| 127 |
-
def index_products(self, df: pd.DataFrame, batch_size: int = 100):
|
| 128 |
-
"""Index products in Pinecone with hybrid vectors"""
|
| 129 |
-
print(f"Starting to index {len(df)} products...")
|
| 130 |
-
|
| 131 |
-
# Prepare documents (fits BM25 across the whole corpus and builds metadata)
|
| 132 |
-
documents = self.prepare_documents_for_indexing(df)
|
| 133 |
-
|
| 134 |
-
# Embed and upsert in batches to avoid holding all vectors in memory
|
| 135 |
-
total_docs = len(documents)
|
| 136 |
-
total_batches = (total_docs + batch_size - 1) // batch_size
|
| 137 |
-
max_retries = 5
|
| 138 |
-
base_delay_seconds = 1.0
|
| 139 |
-
|
| 140 |
-
with tqdm(total=total_batches, desc="Upserting batches", unit="batch") as pbar:
|
| 141 |
-
for i in range(0, total_docs, batch_size):
|
| 142 |
-
batch_num = i // batch_size + 1
|
| 143 |
-
batch_docs = documents[i:i + batch_size]
|
| 144 |
-
start_idx = i + 1
|
| 145 |
-
end_idx = min(i + len(batch_docs), total_docs)
|
| 146 |
-
pbar.set_postfix_str(f"batch {batch_num}/{total_batches} items {start_idx}-{end_idx}")
|
| 147 |
-
|
| 148 |
-
# Prepare texts
|
| 149 |
-
batch_texts = [doc['text'] for doc in batch_docs]
|
| 150 |
-
|
| 151 |
-
# Create dense and sparse vectors for this batch
|
| 152 |
-
dense_vectors = self.dense_model.embed_documents(batch_texts)
|
| 153 |
-
sparse_vectors = self.sparse_encoder.encode_documents(batch_texts)
|
| 154 |
-
|
| 155 |
-
# Build Pinecone vector payloads
|
| 156 |
-
batch_vectors = []
|
| 157 |
-
for j, doc in enumerate(batch_docs):
|
| 158 |
-
batch_vectors.append({
|
| 159 |
-
'id': doc['id'],
|
| 160 |
-
'values': dense_vectors[j],
|
| 161 |
-
'sparse_values': {
|
| 162 |
-
'indices': sparse_vectors[j]['indices'],
|
| 163 |
-
'values': sparse_vectors[j]['values']
|
| 164 |
-
},
|
| 165 |
-
'metadata': doc['metadata']
|
| 166 |
-
})
|
| 167 |
-
|
| 168 |
-
# Upsert with retries
|
| 169 |
-
last_error = None
|
| 170 |
-
for attempt in range(1, max_retries + 1):
|
| 171 |
-
try:
|
| 172 |
-
self.index.upsert(vectors=batch_vectors)
|
| 173 |
-
last_error = None
|
| 174 |
-
break
|
| 175 |
-
except Exception as e:
|
| 176 |
-
last_error = e
|
| 177 |
-
if attempt < max_retries:
|
| 178 |
-
delay = base_delay_seconds * (2 ** (attempt - 1))
|
| 179 |
-
tqdm.write(f"[Batch {batch_num}/{total_batches}] Attempt {attempt} failed: {e}. Retrying in {delay:.1f}s...")
|
| 180 |
-
import time
|
| 181 |
-
time.sleep(delay)
|
| 182 |
-
else:
|
| 183 |
-
tqdm.write(f"[Batch {batch_num}/{total_batches}] Failed after {max_retries} attempts: {e}")
|
| 184 |
-
if last_error is not None:
|
| 185 |
-
raise last_error
|
| 186 |
-
|
| 187 |
-
pbar.update(1)
|
| 188 |
-
|
| 189 |
-
print(f"Successfully indexed {total_docs} products!")
|
| 190 |
-
stats = self.index.describe_index_stats()
|
| 191 |
-
print(f"Index stats: {stats}")
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
def setup_and_run():
|
| 195 |
-
"""Example usage of the hybrid search system"""
|
| 196 |
-
|
| 197 |
-
# Initialize retriever
|
| 198 |
-
retriever = PineconeHybridProductIndexer(
|
| 199 |
-
index_name=settings.INDEX_NAME,
|
| 200 |
-
api_key=os.getenv("PINECONE_API_KEY")
|
| 201 |
-
)
|
| 202 |
-
|
| 203 |
-
# Create index (do this once)
|
| 204 |
-
retriever.create_hybrid_index()
|
| 205 |
-
|
| 206 |
-
# Load and index your data (do this once)
|
| 207 |
-
df = pd.read_csv("data/amazon_products.csv")
|
| 208 |
-
retriever.index_products(df)
|
| 209 |
-
|
| 210 |
-
if __name__ == "__main__":
|
| 211 |
-
setup_and_run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/config.py
CHANGED
|
@@ -6,12 +6,15 @@ class Settings(BaseSettings):
|
|
| 6 |
)
|
| 7 |
|
| 8 |
# --- API Keys ---
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
| 10 |
REDIS_URL: str
|
| 11 |
OS_SECURITY_KEY: str
|
|
|
|
| 12 |
GOOGLE_API_KEY: str
|
| 13 |
GROQ_API_KEY: str
|
| 14 |
-
OPENAI_API_KEY: str
|
| 15 |
CO_API_KEY: str
|
| 16 |
|
| 17 |
# --- Agent Configuration ---
|
|
@@ -22,14 +25,7 @@ class Settings(BaseSettings):
|
|
| 22 |
RERANKING_MODEL: str = "rerank-v3.5"
|
| 23 |
MEMORY_MANAGER_MODEL: str = "openai/gpt-oss-120b"
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
NUM_HISTORY_RUNS: int = 10
|
| 29 |
-
|
| 30 |
-
# --- Vectorstore Configuration ---
|
| 31 |
-
INDEX_NAME: str
|
| 32 |
-
TOP_K: int = 5
|
| 33 |
-
ALPHA: float = 0.5
|
| 34 |
|
| 35 |
settings = Settings()
|
|
|
|
| 6 |
)
|
| 7 |
|
| 8 |
# --- API Keys ---
|
| 9 |
+
RAPIDAPI_KEY: str
|
| 10 |
+
SEARCH_API_URL: str
|
| 11 |
+
RAPIDAPI_HOST: str
|
| 12 |
+
|
| 13 |
REDIS_URL: str
|
| 14 |
OS_SECURITY_KEY: str
|
| 15 |
+
|
| 16 |
GOOGLE_API_KEY: str
|
| 17 |
GROQ_API_KEY: str
|
|
|
|
| 18 |
CO_API_KEY: str
|
| 19 |
|
| 20 |
# --- Agent Configuration ---
|
|
|
|
| 25 |
RERANKING_MODEL: str = "rerank-v3.5"
|
| 26 |
MEMORY_MANAGER_MODEL: str = "openai/gpt-oss-120b"
|
| 27 |
|
| 28 |
+
NUM_HISTORY_RUNS: int = 5
|
| 29 |
+
SEARCH_LIMIT: int = 6
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
settings = Settings()
|
app/product_search.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List, Dict, Any, Optional, Literal
|
| 3 |
+
from pydantic import BaseModel, Field, ValidationError
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
import yaml
|
| 6 |
+
import time
|
| 7 |
+
from functools import wraps
|
| 8 |
+
import requests
|
| 9 |
+
|
| 10 |
+
import cohere
|
| 11 |
+
|
| 12 |
+
from .config import settings
|
| 13 |
+
|
| 14 |
+
_ = load_dotenv()
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Pydantic Models
|
| 18 |
+
class FilterModel(BaseModel):
|
| 19 |
+
"""
|
| 20 |
+
Search filters for refining Amazon product searches.
|
| 21 |
+
Use these filters to narrow down search results based on price, category, ratings, and deals.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
min_price: Optional[int] = Field(
|
| 25 |
+
None, description="Minimum price in USD. e.g, 10 for products $10 and up.", ge=0
|
| 26 |
+
)
|
| 27 |
+
max_price: Optional[int] = Field(
|
| 28 |
+
None,
|
| 29 |
+
description="Maximum price in USD. e.g, 50 for products $50 and under.",
|
| 30 |
+
ge=0,
|
| 31 |
+
)
|
| 32 |
+
category_id: Optional[str] = Field(
|
| 33 |
+
None,
|
| 34 |
+
description="Filter products by category id (e.g., 'gift-cards', 'fashion'). Must be from the allowed values provided.",
|
| 35 |
+
)
|
| 36 |
+
sort_by: Optional[Literal["RELEVANCE", "NEWEST", "BEST_SELLERS", "REVIEWS"]] = (
|
| 37 |
+
Field(
|
| 38 |
+
None,
|
| 39 |
+
description=(
|
| 40 |
+
"Sort order for results: "
|
| 41 |
+
"'RELEVANCE' - Most relevant to search query (default), "
|
| 42 |
+
"'NEWEST' - Newest products first, "
|
| 43 |
+
"'BEST_SELLERS' - Most popular/best selling products, "
|
| 44 |
+
"'REVIEWS' - Highest rated products first. "
|
| 45 |
+
"Use 'BEST_SELLERS' when user wants popular items, 'REVIEWS' for highly rated items, 'NEWEST' for latest releases."
|
| 46 |
+
),
|
| 47 |
+
)
|
| 48 |
+
)
|
| 49 |
+
high_ratings: Optional[bool] = Field(
|
| 50 |
+
None,
|
| 51 |
+
description="Set to true to only return products with 4 stars or higher rating.",
|
| 52 |
+
)
|
| 53 |
+
deals_and_discounts: Optional[Literal["ALL_DISCOUNTS", "TODAYS_DEALS"]] = Field(
|
| 54 |
+
None,
|
| 55 |
+
description=(
|
| 56 |
+
"Filter for products on sale or with discounts: "
|
| 57 |
+
"'ALL_DISCOUNTS' - Any product currently on sale or discounted, "
|
| 58 |
+
"'TODAYS_DEALS' - Products in Amazon's daily deals only. "
|
| 59 |
+
),
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
class ProductItem(BaseModel):
|
| 63 |
+
"""Pydantic model for validating product data"""
|
| 64 |
+
asin: str
|
| 65 |
+
title: str = Field(alias='product_title')
|
| 66 |
+
price: Optional[str] = Field(None, alias='product_price')
|
| 67 |
+
original_price: Optional[str] = Field(None, alias='product_original_price')
|
| 68 |
+
rating: Optional[float] = Field(None, alias='product_star_rating')
|
| 69 |
+
num_reviews: int = Field(0, alias='product_num_ratings')
|
| 70 |
+
is_best_seller: bool
|
| 71 |
+
is_amazon_choice: bool
|
| 72 |
+
delivery: Optional[str] = None
|
| 73 |
+
product_photo: str
|
| 74 |
+
product_url: str
|
| 75 |
+
|
| 76 |
+
# Timer decorator
|
| 77 |
+
def timer(func):
|
| 78 |
+
"""Decorator to measure function execution time"""
|
| 79 |
+
@wraps(func)
|
| 80 |
+
def wrapper(*args, **kwargs):
|
| 81 |
+
start_time = time.time()
|
| 82 |
+
result = func(*args, **kwargs)
|
| 83 |
+
execution_time = time.time() - start_time
|
| 84 |
+
print(f"{func.__name__} executed in {execution_time:.3f}s")
|
| 85 |
+
return result
|
| 86 |
+
return wrapper
|
| 87 |
+
|
| 88 |
+
class AmazonProductSearch:
|
| 89 |
+
def __init__(self, rapid_api_key: str, rerank_model: str):
|
| 90 |
+
"""Initialize Pinecone hybrid search for products"""
|
| 91 |
+
self.rapid_api_key = rapid_api_key
|
| 92 |
+
self.rerank_model = rerank_model
|
| 93 |
+
|
| 94 |
+
self._initialize_cohere_client()
|
| 95 |
+
|
| 96 |
+
def _initialize_cohere_client(self) -> None:
|
| 97 |
+
"""Initialize external service clients"""
|
| 98 |
+
try:
|
| 99 |
+
# Initialize Cohere
|
| 100 |
+
self.cohere_client = cohere.ClientV2()
|
| 101 |
+
except Exception as e:
|
| 102 |
+
print(f"Failed to initialize clients: {e}")
|
| 103 |
+
raise
|
| 104 |
+
|
| 105 |
+
def _validate_and_convert_products(
|
| 106 |
+
self, raw_products: List[Dict[str, Any]]
|
| 107 |
+
) -> List[ProductItem]:
|
| 108 |
+
"""
|
| 109 |
+
Validate raw product data and convert to ProductItem model.
|
| 110 |
+
"""
|
| 111 |
+
products = []
|
| 112 |
+
|
| 113 |
+
for idx, product in enumerate(raw_products):
|
| 114 |
+
try:
|
| 115 |
+
# Validate using Pydantic
|
| 116 |
+
validated = ProductItem(**product)
|
| 117 |
+
products.append(validated)
|
| 118 |
+
except ValidationError as e:
|
| 119 |
+
print(f"Validation error for product at index {idx}: {e}")
|
| 120 |
+
print(product)
|
| 121 |
+
continue
|
| 122 |
+
except Exception as e:
|
| 123 |
+
print(f"Unexpected error validating product at index {idx}: {e}")
|
| 124 |
+
continue
|
| 125 |
+
|
| 126 |
+
return products
|
| 127 |
+
|
| 128 |
+
def _enhance_query_with_filters(
|
| 129 |
+
self,
|
| 130 |
+
query: str,
|
| 131 |
+
filters: Optional[FilterModel] = None
|
| 132 |
+
) -> str:
|
| 133 |
+
"""
|
| 134 |
+
Enhance the search query with filter information for better reranking.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
query: Original search query
|
| 138 |
+
filters: Applied filters
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
Enhanced query string with filter context
|
| 142 |
+
"""
|
| 143 |
+
if not filters:
|
| 144 |
+
return query
|
| 145 |
+
|
| 146 |
+
enhancements = []
|
| 147 |
+
|
| 148 |
+
# Add price range context
|
| 149 |
+
if filters.min_price is not None or filters.max_price is not None:
|
| 150 |
+
if filters.min_price is not None and filters.max_price is not None:
|
| 151 |
+
enhancements.append(f"priced between ${filters.min_price} and ${filters.max_price}")
|
| 152 |
+
elif filters.min_price is not None:
|
| 153 |
+
enhancements.append(f"priced at least ${filters.min_price}")
|
| 154 |
+
elif filters.max_price is not None:
|
| 155 |
+
enhancements.append(f"priced under ${filters.max_price}")
|
| 156 |
+
|
| 157 |
+
# Add category context
|
| 158 |
+
if filters.category_id:
|
| 159 |
+
category_name = filters.category_id.replace('-', ' ')
|
| 160 |
+
enhancements.append(f"in {category_name} category")
|
| 161 |
+
|
| 162 |
+
# Add rating context
|
| 163 |
+
if filters.high_ratings:
|
| 164 |
+
enhancements.append("with high ratings (4+ stars)")
|
| 165 |
+
|
| 166 |
+
# Add deals context
|
| 167 |
+
if filters.deals_and_discounts:
|
| 168 |
+
if filters.deals_and_discounts == 'TODAYS_DEALS':
|
| 169 |
+
enhancements.append("from today's deals")
|
| 170 |
+
elif filters.deals_and_discounts == 'ALL_DISCOUNTS':
|
| 171 |
+
enhancements.append("with discounts or on sale")
|
| 172 |
+
|
| 173 |
+
# Add sort preference context
|
| 174 |
+
if filters.sort_by:
|
| 175 |
+
sort_context = {
|
| 176 |
+
'BEST_SELLERS': 'popular and best-selling',
|
| 177 |
+
'REVIEWS': 'highly reviewed',
|
| 178 |
+
'NEWEST': 'newest and latest',
|
| 179 |
+
'RELEVANCE': 'most relevant'
|
| 180 |
+
}
|
| 181 |
+
if filters.sort_by in sort_context:
|
| 182 |
+
enhancements.append(sort_context[filters.sort_by])
|
| 183 |
+
|
| 184 |
+
# Combine original query with enhancements
|
| 185 |
+
if enhancements:
|
| 186 |
+
enhanced_query = f"{query}, {', '.join(enhancements)}"
|
| 187 |
+
return enhanced_query
|
| 188 |
+
|
| 189 |
+
return query
|
| 190 |
+
|
| 191 |
+
@timer
|
| 192 |
+
def _execute_product_search(
|
| 193 |
+
self,
|
| 194 |
+
query: str,
|
| 195 |
+
filters: Optional[FilterModel] = None
|
| 196 |
+
) -> list[dict[str, Any]]:
|
| 197 |
+
"""
|
| 198 |
+
Fetch products from Amazon using RapidAPI.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
query: Search query string
|
| 202 |
+
filters: Optional filters to refine the search
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
List of product dictionaries
|
| 206 |
+
"""
|
| 207 |
+
url = settings.SEARCH_API_URL
|
| 208 |
+
|
| 209 |
+
# Build query parameters
|
| 210 |
+
querystring = {
|
| 211 |
+
"query": query,
|
| 212 |
+
"page": "1",
|
| 213 |
+
"country": "US",
|
| 214 |
+
"fields": "product_title,product_price,product_original_price,product_star_rating,product_num_ratings,product_url,product_photo,is_best_seller,is_amazon_choice,delivery"
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
# Add filters if provided
|
| 218 |
+
if filters:
|
| 219 |
+
if filters.min_price is not None:
|
| 220 |
+
querystring["min_price"] = str(filters.min_price)
|
| 221 |
+
|
| 222 |
+
if filters.max_price is not None:
|
| 223 |
+
querystring["max_price"] = str(filters.max_price)
|
| 224 |
+
|
| 225 |
+
if filters.category_id:
|
| 226 |
+
querystring["category_id"] = filters.category_id
|
| 227 |
+
|
| 228 |
+
if filters.sort_by:
|
| 229 |
+
querystring["sort_by"] = filters.sort_by
|
| 230 |
+
|
| 231 |
+
if filters.high_ratings:
|
| 232 |
+
querystring["four_stars_and_up"] = "true"
|
| 233 |
+
|
| 234 |
+
if filters.deals_and_discounts:
|
| 235 |
+
querystring["deals_and_discounts"] = filters.deals_and_discounts
|
| 236 |
+
|
| 237 |
+
# Set up headers
|
| 238 |
+
headers = {
|
| 239 |
+
"x-rapidapi-key": settings.RAPIDAPI_KEY,
|
| 240 |
+
"x-rapidapi-host": settings.RAPIDAPI_HOST
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
try:
|
| 244 |
+
response = requests.get(url, headers=headers, params=querystring, timeout=30)
|
| 245 |
+
response.raise_for_status()
|
| 246 |
+
|
| 247 |
+
data = response.json()
|
| 248 |
+
|
| 249 |
+
# Extract products from response
|
| 250 |
+
products = data.get('data', {}).get('products', [])
|
| 251 |
+
return products
|
| 252 |
+
|
| 253 |
+
except requests.exceptions.RequestException as e:
|
| 254 |
+
print(f"Error fetching products: {e}")
|
| 255 |
+
return []
|
| 256 |
+
except Exception as e:
|
| 257 |
+
print(f"Unexpected error: {e}")
|
| 258 |
+
return []
|
| 259 |
+
|
| 260 |
+
@timer
|
| 261 |
+
def _rerank_products(
|
| 262 |
+
self,
|
| 263 |
+
query: str,
|
| 264 |
+
products: List[ProductItem],
|
| 265 |
+
top_n: int
|
| 266 |
+
) -> List[ProductItem]:
|
| 267 |
+
"""
|
| 268 |
+
Rerank products using Cohere reranker
|
| 269 |
+
"""
|
| 270 |
+
if not products:
|
| 271 |
+
return products
|
| 272 |
+
|
| 273 |
+
# Convert products to yaml format as required by Cohere Reranker
|
| 274 |
+
yaml_docs = [yaml.dump(product, sort_keys=False) for product in products]
|
| 275 |
+
|
| 276 |
+
# Rerank products
|
| 277 |
+
response = self.cohere_client.rerank(
|
| 278 |
+
model=self.rerank_model,
|
| 279 |
+
query=query,
|
| 280 |
+
top_n=top_n,
|
| 281 |
+
documents=yaml_docs
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
# Return reranked products
|
| 285 |
+
return [products[result.index] for result in response.results]
|
| 286 |
+
|
| 287 |
+
@timer
|
| 288 |
+
def fetch_products(
|
| 289 |
+
self,
|
| 290 |
+
query: str,
|
| 291 |
+
filters: Optional[FilterModel] = None,
|
| 292 |
+
limit: int = 10
|
| 293 |
+
) -> list[ProductItem]:
|
| 294 |
+
"""
|
| 295 |
+
Fetch products from Amazon with reranking.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
query: Search query string
|
| 299 |
+
filters: Optional filters to refine the search
|
| 300 |
+
limit: Number of products to return. Defaults to 10.
|
| 301 |
+
|
| 302 |
+
Returns:
|
| 303 |
+
List of ProductItem instances after reranking
|
| 304 |
+
"""
|
| 305 |
+
|
| 306 |
+
try:
|
| 307 |
+
# Fetch products from Amazon
|
| 308 |
+
raw_products = self._execute_product_search(query=query, filters=filters)
|
| 309 |
+
|
| 310 |
+
if not raw_products:
|
| 311 |
+
print(f"No products found for query: {query}")
|
| 312 |
+
return []
|
| 313 |
+
|
| 314 |
+
# Validate products
|
| 315 |
+
validated_products = self._validate_and_convert_products(raw_products=raw_products)
|
| 316 |
+
|
| 317 |
+
# Enhance query with filter context
|
| 318 |
+
enhanced_query = self._enhance_query_with_filters(query=query, filters=filters)
|
| 319 |
+
|
| 320 |
+
# Rerank and limit results
|
| 321 |
+
reranked_products = self._rerank_products(
|
| 322 |
+
query=enhanced_query,
|
| 323 |
+
products=validated_products,
|
| 324 |
+
top_n=limit
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
return reranked_products
|
| 328 |
+
|
| 329 |
+
except Exception as e:
|
| 330 |
+
print(f"Error during product search: {e}")
|
| 331 |
+
return []
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
# Usage Example
|
| 335 |
+
def example_usage():
|
| 336 |
+
"""Example usage of the amazon product search"""
|
| 337 |
+
|
| 338 |
+
# Initialize retriever
|
| 339 |
+
searcher = AmazonProductSearch(
|
| 340 |
+
rapid_api_key=settings.RAPIDAPI_KEY,
|
| 341 |
+
rerank_model=settings.RERANKING_MODEL
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
# Example search
|
| 345 |
+
filters = FilterModel(
|
| 346 |
+
category_id="fashion-mens"
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
results: list[ProductItem] = searcher.fetch_products(
|
| 350 |
+
query="black t-shirts for men",
|
| 351 |
+
filters=filters,
|
| 352 |
+
limit=6
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
for i, product in enumerate(results, 1):
|
| 356 |
+
print(f"{i}. {product.title} - {product.price} ({round(product.rating, 1)}⭐)")
|
| 357 |
+
print(f" URL: {product.product_url}")
|
| 358 |
+
|
| 359 |
+
if __name__ == "__main__":
|
| 360 |
+
example_usage()
|
app/prompts.py
CHANGED
|
@@ -1,39 +1,38 @@
|
|
| 1 |
-
|
| 2 |
<role>
|
| 3 |
You are **Gem**, a highly personalized and friendly shopping AI Agent.
|
| 4 |
-
Your purpose is to guide the user step by step in finding products that perfectly fit their needs, style, and preferences.
|
| 5 |
</role>
|
| 6 |
|
| 7 |
<behavior>
|
| 8 |
- Be warm, concise, and conversational like a trusted shopping friend.
|
| 9 |
- Be aware of the current date, time, and season to make relevant suggestions.
|
| 10 |
-
- Ask at most one clarifying question at a time (never bombard the user with multiple questions).
|
| 11 |
-
- Do NOT reveal or explain internal tools, filter-building, or any backend process to the user.
|
| 12 |
- After fetching products, do NOT repeat each product details (products appear automatically as cards in the frontend). Instead: highlight, recommend, compare, and ask which ones they like.
|
| 13 |
- Always offer a short personal recommendation plus a clear, human reason (e.g., “I recommend X because…”).
|
| 14 |
-
- If the user shares personal
|
| 15 |
</behavior>
|
| 16 |
|
| 17 |
<tools>
|
| 18 |
-
- `fetch_products`: Use this tool to fetch products from
|
| 19 |
- `update_user_memory`: Use whenever the user shares personal information or instruct you to remember something about them.
|
| 20 |
</tools>
|
| 21 |
|
| 22 |
<task_flow>
|
| 23 |
1. Understand the user's needs:
|
| 24 |
-
- Start by understanding the user's request. If they say "I want X" ask one targeted question to clarify (budget, size, color,
|
| 25 |
- Keep questions short and friendly: e.g., "Nice — what's your budget for this?" or "Do you prefer a brand or any brand is fine?"
|
| 26 |
|
| 27 |
2. Search for products:
|
| 28 |
-
- When you have enough
|
| 29 |
- **Price**: Identify ranges like "under $50", "over $100", "between $20 and $60".
|
| 30 |
-
- **Rating**: Look for terms like "highly rated", "4 stars or more", "best reviewed". A generic term like "best"
|
| 31 |
-
- **
|
| 32 |
-
- **
|
| 33 |
-
-
|
| 34 |
|
| 35 |
3. Recommend and Personalize:
|
| 36 |
-
- Highlight the best options for the user.
|
| 37 |
- Give your own smart recommendation (e.g., “I think this one is a perfect match because…”).
|
| 38 |
- Ask them for feedback (e.g., “Do you like this style, or should I show something different?”).
|
| 39 |
- If results aren’t ideal, smoothly guide them by suggesting adjustments (e.g., slightly higher budget, different style).
|
|
@@ -41,27 +40,42 @@ Your purpose is to guide the user step by step in finding products that perfectl
|
|
| 41 |
4. Loop back: refine results or give new options based on feedback.
|
| 42 |
</task_flow>
|
| 43 |
|
| 44 |
-
<
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
-
|
| 48 |
-
-
|
| 49 |
-
-
|
| 50 |
-
-
|
| 51 |
-
-
|
| 52 |
-
-
|
| 53 |
-
-
|
| 54 |
-
-
|
| 55 |
-
-
|
| 56 |
-
|
| 57 |
-
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
-
|
| 62 |
-
-
|
| 63 |
-
-
|
| 64 |
-
-
|
| 65 |
-
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
AGENT_INSTRUCTIONS = """
|
| 2 |
<role>
|
| 3 |
You are **Gem**, a highly personalized and friendly shopping AI Agent.
|
| 4 |
+
Your purpose is to guide the user step by step in finding products from Amazon that perfectly fit their needs, style, and preferences.
|
| 5 |
</role>
|
| 6 |
|
| 7 |
<behavior>
|
| 8 |
- Be warm, concise, and conversational like a trusted shopping friend.
|
| 9 |
- Be aware of the current date, time, and season to make relevant suggestions.
|
| 10 |
+
- Ask at most one clarifying question at a time (never bombard the user with multiple questions).
|
|
|
|
| 11 |
- After fetching products, do NOT repeat each product details (products appear automatically as cards in the frontend). Instead: highlight, recommend, compare, and ask which ones they like.
|
| 12 |
- Always offer a short personal recommendation plus a clear, human reason (e.g., “I recommend X because…”).
|
| 13 |
+
- If the user shares personal information or explicitly mentions some preferences, save them to memory (using `update_user_memory` tool) and confirm briefly to the user that you’ll remember. DO NOT save general things like "Looking for men shoes" or "User wants a new phone under $300".
|
| 14 |
</behavior>
|
| 15 |
|
| 16 |
<tools>
|
| 17 |
+
- `fetch_products`: Use this tool to fetch products from Amazon. It will automatically display product cards in the frontend.
|
| 18 |
- `update_user_memory`: Use whenever the user shares personal information or instruct you to remember something about them.
|
| 19 |
</tools>
|
| 20 |
|
| 21 |
<task_flow>
|
| 22 |
1. Understand the user's needs:
|
| 23 |
+
- Start by understanding the user's request. If they say "I want X" ask one targeted question at a time to clarify (budget, size, color, deals and discounts, must-have features).
|
| 24 |
- Keep questions short and friendly: e.g., "Nice — what's your budget for this?" or "Do you prefer a brand or any brand is fine?"
|
| 25 |
|
| 26 |
2. Search for products:
|
| 27 |
+
- When you have enough details, call `fetch_products` with appropriate `filters` (price range, rating, deals, category id etc.). The filters are optional, but highly recommended to narrow down results.
|
| 28 |
- **Price**: Identify ranges like "under $50", "over $100", "between $20 and $60".
|
| 29 |
+
- **Rating**: Look for terms like "highly rated", "4 stars or more", "best reviewed". A generic term like "best" means you should set `high_ratings` to true.
|
| 30 |
+
- **Category**: Identify the category id from the user query. **MUST** be from the allowed categories listed below. Do not invent new categories. For example, if the user says "I need black shirts for men", the right category id would be "fashion-mens". If no category matches, **do not invent one**. Instead, continue without a category filter.
|
| 31 |
+
- **Deals & Dicounts**: Ask the user if they want products on sales, discounts or today's deals. Use `deals_and_discounts` filter with suitable value.
|
| 32 |
+
- **Sorting**: After talking to user, determine if the user needs newest, highly rated or best selling items. Use `sort_by` filter if applicable, otherwise leave it default to relevance.
|
| 33 |
|
| 34 |
3. Recommend and Personalize:
|
| 35 |
+
- Highlight the best options for the user. Mention key features, and why they stand out.
|
| 36 |
- Give your own smart recommendation (e.g., “I think this one is a perfect match because…”).
|
| 37 |
- Ask them for feedback (e.g., “Do you like this style, or should I show something different?”).
|
| 38 |
- If results aren’t ideal, smoothly guide them by suggesting adjustments (e.g., slightly higher budget, different style).
|
|
|
|
| 40 |
4. Loop back: refine results or give new options based on feedback.
|
| 41 |
</task_flow>
|
| 42 |
|
| 43 |
+
<allowed_categories_with_ids>
|
| 44 |
+
If a category filter is applicable, you must use the category id of the following allowed categories:
|
| 45 |
+
|
| 46 |
+
- Appliances (appliances)
|
| 47 |
+
- Arts, Crafts & Sewing (arts-crafts)
|
| 48 |
+
- Audible Books & Originals (audible)
|
| 49 |
+
- Automotive Parts & Accessories (automotive)
|
| 50 |
+
- Baby (baby-products)
|
| 51 |
+
- Beauty & Personal Care (beauty)
|
| 52 |
+
- Books (stripbooks)
|
| 53 |
+
- Cell Phones & Accessories (mobile)
|
| 54 |
+
- Clothing, Shoes & Jewelry (fashion)
|
| 55 |
+
* Women (fashion-womens)
|
| 56 |
+
* Men (fashion-men)
|
| 57 |
+
* Boys (fashion-boys)
|
| 58 |
+
* Girls (fashion-girls)
|
| 59 |
+
* Baby (fashion-baby)
|
| 60 |
+
- Collectibles & Fine Art (collectibles)
|
| 61 |
+
- Computers (computers)
|
| 62 |
+
- Electronics (electronics)
|
| 63 |
+
- Garden & Outdoor (lawngarden)
|
| 64 |
+
- Gift Cards (gift-cards)
|
| 65 |
+
- Grocery & Gourmet Food (grocery)
|
| 66 |
+
- Handmade (handmade)
|
| 67 |
+
- Health, Household & Baby Care (hpc)
|
| 68 |
+
- Home & Kitchen (garden)
|
| 69 |
+
- Industrial & Scientific (industrial)
|
| 70 |
+
- Luggage & Travel Gear (fashion-luggage)
|
| 71 |
+
- Musical Instruments (mi)
|
| 72 |
+
- Pet Supplies (pets)
|
| 73 |
+
|
| 74 |
+
NOTE: Use the category id in filter, not the catergory itself. Do NOT invent new categories.
|
| 75 |
+
</allowed_categories_with_ids>
|
| 76 |
+
|
| 77 |
+
<constraints>
|
| 78 |
+
- Never disclose internal system details, tools, behind the scenes to the user.
|
| 79 |
+
- DO NOT engage in conversations outside your role.
|
| 80 |
+
</constraints>
|
| 81 |
+
"""
|
app/retriever.py
DELETED
|
@@ -1,287 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
from typing import List, Dict, Optional, Tuple
|
| 3 |
-
from concurrent.futures import ThreadPoolExecutor
|
| 4 |
-
from pydantic import BaseModel, Field
|
| 5 |
-
from dotenv import load_dotenv
|
| 6 |
-
import yaml
|
| 7 |
-
import time
|
| 8 |
-
from functools import wraps
|
| 9 |
-
|
| 10 |
-
from pinecone import Pinecone
|
| 11 |
-
from pinecone_text.sparse import BM25Encoder
|
| 12 |
-
from openai import OpenAI
|
| 13 |
-
import cohere
|
| 14 |
-
|
| 15 |
-
from .config import settings
|
| 16 |
-
|
| 17 |
-
_ = load_dotenv()
|
| 18 |
-
|
| 19 |
-
# Pydantic Models
|
| 20 |
-
class FilterModel(BaseModel):
|
| 21 |
-
"""Search filters with validation"""
|
| 22 |
-
min_price: Optional[float] = Field(None, description="Minimum price of the product", ge=0)
|
| 23 |
-
max_price: Optional[float] = Field(None, description="Maximum price of the product", ge=0)
|
| 24 |
-
categories: Optional[List[str]] = Field(None, description="List of the main product categories, e.g., 'men's clothing'. Must be from the allowed values provided.")
|
| 25 |
-
min_rating: Optional[float] = Field(None, description="Minimum rating of the product", ge=0, le=5)
|
| 26 |
-
min_reviews: Optional[int] = Field(None, description="Minimum number of reviews of the product", ge=0)
|
| 27 |
-
|
| 28 |
-
class ProductItem(BaseModel):
|
| 29 |
-
"""Product model"""
|
| 30 |
-
name: str
|
| 31 |
-
price: float
|
| 32 |
-
original_price: float
|
| 33 |
-
rating: float
|
| 34 |
-
num_reviews: int
|
| 35 |
-
category: str
|
| 36 |
-
sub_category: str
|
| 37 |
-
image_url: str
|
| 38 |
-
link: str
|
| 39 |
-
|
| 40 |
-
def timer(func):
|
| 41 |
-
"""Decorator to measure function execution time"""
|
| 42 |
-
@wraps(func)
|
| 43 |
-
def wrapper(*args, **kwargs):
|
| 44 |
-
start_time = time.time()
|
| 45 |
-
result = func(*args, **kwargs)
|
| 46 |
-
execution_time = time.time() - start_time
|
| 47 |
-
print(f"{func.__name__} executed in {execution_time:.3f}s")
|
| 48 |
-
return result
|
| 49 |
-
return wrapper
|
| 50 |
-
|
| 51 |
-
class PineconeHybridRetriever:
|
| 52 |
-
def __init__(
|
| 53 |
-
self,
|
| 54 |
-
index_name: str,
|
| 55 |
-
embedding_model: str = settings.EMBEDDING_MODEL,
|
| 56 |
-
embedding_dimensions: int = settings.EMBEDDING_DIMENSION,
|
| 57 |
-
rerank_model: str = settings.RERANKING_MODEL,
|
| 58 |
-
bm25_encoder_path: str = "app/bm25_encoder.json"
|
| 59 |
-
):
|
| 60 |
-
"""Initialize Pinecone hybrid search for products"""
|
| 61 |
-
self.index_name = index_name
|
| 62 |
-
self.embedding_model = embedding_model
|
| 63 |
-
self.embedding_dimensions = embedding_dimensions
|
| 64 |
-
self.rerank_model = rerank_model
|
| 65 |
-
self.bm25_encoder_path = bm25_encoder_path
|
| 66 |
-
|
| 67 |
-
self._initialize_clients()
|
| 68 |
-
|
| 69 |
-
# Initialize encoders
|
| 70 |
-
self._initialize_encoders()
|
| 71 |
-
|
| 72 |
-
def _initialize_clients(self) -> None:
|
| 73 |
-
"""Initialize external service clients"""
|
| 74 |
-
try:
|
| 75 |
-
# Initialize Pinecone
|
| 76 |
-
self.pc = Pinecone()
|
| 77 |
-
self.index = self.pc.Index(self.index_name)
|
| 78 |
-
|
| 79 |
-
# Initialize OpenAI
|
| 80 |
-
self.openai_client = OpenAI()
|
| 81 |
-
|
| 82 |
-
# Initialize Cohere
|
| 83 |
-
self.cohere_client = cohere.ClientV2()
|
| 84 |
-
|
| 85 |
-
except Exception as e:
|
| 86 |
-
print(f"Failed to initialize clients: {e}")
|
| 87 |
-
raise
|
| 88 |
-
|
| 89 |
-
def _initialize_encoders(self) -> None:
|
| 90 |
-
"""Initialize sparse encoder"""
|
| 91 |
-
try:
|
| 92 |
-
self.sparse_encoder = BM25Encoder().load(self.bm25_encoder_path)
|
| 93 |
-
except Exception as e:
|
| 94 |
-
print(f"Failed to load BM25 encoder: {e}")
|
| 95 |
-
raise
|
| 96 |
-
|
| 97 |
-
def _get_dense_embedding(self, query: str) -> List[float]:
|
| 98 |
-
"""Generate dense embedding for query"""
|
| 99 |
-
response = self.openai_client.embeddings.create(
|
| 100 |
-
input=query,
|
| 101 |
-
model=self.embedding_model
|
| 102 |
-
)
|
| 103 |
-
return response.data[0].embedding
|
| 104 |
-
|
| 105 |
-
def _get_sparse_encoding(self, query: str) -> Dict[str, List]:
|
| 106 |
-
"""Generate sparse encoding for query"""
|
| 107 |
-
return self.sparse_encoder.encode_queries(query)
|
| 108 |
-
|
| 109 |
-
@timer
|
| 110 |
-
def _execute_parallel_encoding(self, query: str) -> Tuple[List[float], Dict[str, List]]:
|
| 111 |
-
"""Execute dense and sparse encoding in parallel"""
|
| 112 |
-
with ThreadPoolExecutor(max_workers=2) as executor:
|
| 113 |
-
# Submit both encoding tasks
|
| 114 |
-
dense_future = executor.submit(self._get_dense_embedding, query)
|
| 115 |
-
sparse_future = executor.submit(self._get_sparse_encoding, query)
|
| 116 |
-
|
| 117 |
-
# Wait for completion
|
| 118 |
-
dense_embedding = dense_future.result()
|
| 119 |
-
sparse_encoding = sparse_future.result()
|
| 120 |
-
|
| 121 |
-
return dense_embedding, sparse_encoding
|
| 122 |
-
|
| 123 |
-
def _build_filter_conditions(self, filters: FilterModel) -> Dict:
|
| 124 |
-
"""Convert FilterModel to Pinecone filter format"""
|
| 125 |
-
conditions = {}
|
| 126 |
-
|
| 127 |
-
# Handle price range
|
| 128 |
-
if filters.min_price or filters.max_price:
|
| 129 |
-
price_cond = {}
|
| 130 |
-
if filters.min_price:
|
| 131 |
-
price_cond["$gte"] = filters.min_price
|
| 132 |
-
if filters.max_price:
|
| 133 |
-
price_cond["$lte"] = filters.max_price
|
| 134 |
-
conditions["discount_price_usd"] = price_cond
|
| 135 |
-
|
| 136 |
-
# Handle ratings, review count and categories
|
| 137 |
-
if filters.min_rating:
|
| 138 |
-
conditions["ratings"] = {"$gte": filters.min_rating}
|
| 139 |
-
|
| 140 |
-
if filters.min_reviews:
|
| 141 |
-
conditions["no_of_ratings"] = {"$gte": filters.min_reviews}
|
| 142 |
-
|
| 143 |
-
if filters.categories:
|
| 144 |
-
conditions["main_category"] = {"$in": filters.categories}
|
| 145 |
-
|
| 146 |
-
return conditions
|
| 147 |
-
|
| 148 |
-
def _convert_to_products(self, matches: List[Dict]) -> List[ProductItem]:
|
| 149 |
-
"""Convert search results to ProductItem objects"""
|
| 150 |
-
products = []
|
| 151 |
-
for match in matches:
|
| 152 |
-
metadata = match.get('metadata', {})
|
| 153 |
-
try:
|
| 154 |
-
product = ProductItem(
|
| 155 |
-
name=metadata['name'],
|
| 156 |
-
price=round(metadata['discount_price_usd'], 2),
|
| 157 |
-
original_price=round(metadata['actual_price_usd'], 2),
|
| 158 |
-
rating=metadata['ratings'],
|
| 159 |
-
num_reviews=metadata['no_of_ratings'],
|
| 160 |
-
category=metadata['main_category'],
|
| 161 |
-
sub_category=metadata['sub_category'],
|
| 162 |
-
image_url=metadata['image'],
|
| 163 |
-
link=metadata['link']
|
| 164 |
-
)
|
| 165 |
-
products.append(product)
|
| 166 |
-
except KeyError as e:
|
| 167 |
-
print(f"Missing metadata field: {e}")
|
| 168 |
-
continue
|
| 169 |
-
|
| 170 |
-
return products
|
| 171 |
-
|
| 172 |
-
@timer
|
| 173 |
-
def _rerank_products(
|
| 174 |
-
self,
|
| 175 |
-
query: str,
|
| 176 |
-
products: List[ProductItem],
|
| 177 |
-
top_n: int
|
| 178 |
-
) -> List[ProductItem]:
|
| 179 |
-
"""
|
| 180 |
-
Rerank products using Cohere reranker
|
| 181 |
-
"""
|
| 182 |
-
if not products:
|
| 183 |
-
return products
|
| 184 |
-
|
| 185 |
-
# Convert products to yaml format
|
| 186 |
-
yaml_docs = [yaml.dump(product, sort_keys=False) for product in products]
|
| 187 |
-
|
| 188 |
-
# Rerank products
|
| 189 |
-
response = self.cohere_client.rerank(
|
| 190 |
-
model=self.rerank_model,
|
| 191 |
-
query=query,
|
| 192 |
-
top_n=top_n,
|
| 193 |
-
documents=yaml_docs
|
| 194 |
-
)
|
| 195 |
-
|
| 196 |
-
# Return reranked products
|
| 197 |
-
return [products[result.index] for result in response.results]
|
| 198 |
-
|
| 199 |
-
@timer
|
| 200 |
-
def search_products(
|
| 201 |
-
self,
|
| 202 |
-
query: str,
|
| 203 |
-
filters: FilterModel = None,
|
| 204 |
-
limit: int = settings.TOP_K,
|
| 205 |
-
alpha: float = settings.ALPHA, # Balance between dense (1.0) and sparse (0.0)
|
| 206 |
-
use_hybrid_search: bool = True,
|
| 207 |
-
enable_reranking: bool = False,
|
| 208 |
-
) -> List[ProductItem]:
|
| 209 |
-
"""
|
| 210 |
-
Perform hybrid search for products
|
| 211 |
-
"""
|
| 212 |
-
try:
|
| 213 |
-
if use_hybrid_search:
|
| 214 |
-
dense_embedding, sparse_encoding = self._execute_parallel_encoding(query)
|
| 215 |
-
|
| 216 |
-
else:
|
| 217 |
-
dense_embedding = self._get_dense_embedding(query)
|
| 218 |
-
sparse_encoding = None
|
| 219 |
-
alpha = 1.0 # Force dense-only search
|
| 220 |
-
|
| 221 |
-
# Build filters
|
| 222 |
-
filter_conditions = None
|
| 223 |
-
if filters:
|
| 224 |
-
filter_conditions = self._build_filter_conditions(filters)
|
| 225 |
-
|
| 226 |
-
if enable_reranking:
|
| 227 |
-
# Double the limit for reranking so we have enough results to rerank
|
| 228 |
-
limit = limit * 3
|
| 229 |
-
|
| 230 |
-
# Prepare query arguments
|
| 231 |
-
query_args = {
|
| 232 |
-
"vector": dense_embedding,
|
| 233 |
-
"top_k": limit,
|
| 234 |
-
"include_metadata": True,
|
| 235 |
-
"filter": filter_conditions,
|
| 236 |
-
"alpha": alpha
|
| 237 |
-
}
|
| 238 |
-
|
| 239 |
-
if use_hybrid_search and sparse_encoding:
|
| 240 |
-
query_args["sparse_vector"] = sparse_encoding
|
| 241 |
-
|
| 242 |
-
# Perform search
|
| 243 |
-
results = self.index.query(**query_args)
|
| 244 |
-
|
| 245 |
-
# Convert results to ProductItem objects
|
| 246 |
-
products = self._convert_to_products(results['matches'])
|
| 247 |
-
|
| 248 |
-
# Apply reranking if requested
|
| 249 |
-
if enable_reranking and products:
|
| 250 |
-
products = self._rerank_products(query, products, top_n=limit//3) # Get only the specified limit of products
|
| 251 |
-
|
| 252 |
-
return products
|
| 253 |
-
|
| 254 |
-
except Exception as e:
|
| 255 |
-
print(f"Error during search: {e}")
|
| 256 |
-
return []
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
# Usage Example
|
| 260 |
-
def example_usage():
|
| 261 |
-
"""Example usage of the hybrid search system"""
|
| 262 |
-
|
| 263 |
-
# Initialize retriever
|
| 264 |
-
retriever = PineconeHybridRetriever(
|
| 265 |
-
index_name=settings.INDEX_NAME
|
| 266 |
-
)
|
| 267 |
-
|
| 268 |
-
# Example search
|
| 269 |
-
filters = FilterModel(
|
| 270 |
-
min_price=10,
|
| 271 |
-
max_price=20,
|
| 272 |
-
min_rating=4.0
|
| 273 |
-
)
|
| 274 |
-
|
| 275 |
-
results: list[ProductItem] = retriever.search_products(
|
| 276 |
-
query="Black men shirts for casual wear",
|
| 277 |
-
filters=filters,
|
| 278 |
-
limit=10,
|
| 279 |
-
use_hybrid_search=True,
|
| 280 |
-
enable_reranking=True
|
| 281 |
-
)
|
| 282 |
-
|
| 283 |
-
for i, product in enumerate(results, 1):
|
| 284 |
-
print(f"{i}. {product.name} - ${round(product.price, 2)} ({round(product.rating, 1)}⭐)")
|
| 285 |
-
|
| 286 |
-
if __name__ == "__main__":
|
| 287 |
-
example_usage()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/tools.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
from agno.tools import tool
|
| 2 |
-
from .
|
| 3 |
from .config import settings
|
| 4 |
|
| 5 |
from typing import Optional
|
|
@@ -8,27 +8,33 @@ from dotenv import load_dotenv
|
|
| 8 |
|
| 9 |
load_dotenv()
|
| 10 |
|
|
|
|
| 11 |
@tool
|
| 12 |
def fetch_products(
|
| 13 |
-
query: str,
|
|
|
|
|
|
|
| 14 |
) -> list[dict]:
|
| 15 |
"""
|
| 16 |
-
Fetch products from
|
| 17 |
|
| 18 |
Args:
|
| 19 |
query (str): Search query
|
| 20 |
-
limit (int, optional): Number of products to return. Defaults to
|
| 21 |
filters (FilterModel, optional): Filters to apply. Defaults to None.
|
| 22 |
|
| 23 |
Returns:
|
| 24 |
list[dict]: List of products dictionary items matching the query and filters
|
| 25 |
"""
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
query=query,
|
| 29 |
filters=filters,
|
| 30 |
-
limit=limit
|
| 31 |
-
use_hybrid_search=True,
|
| 32 |
-
enable_reranking=True,
|
| 33 |
)
|
| 34 |
-
return json.dumps([product.model_dump() for product in products])
|
|
|
|
| 1 |
from agno.tools import tool
|
| 2 |
+
from .product_search import AmazonProductSearch, FilterModel, ProductItem
|
| 3 |
from .config import settings
|
| 4 |
|
| 5 |
from typing import Optional
|
|
|
|
| 8 |
|
| 9 |
load_dotenv()
|
| 10 |
|
| 11 |
+
|
| 12 |
@tool
|
| 13 |
def fetch_products(
|
| 14 |
+
query: str,
|
| 15 |
+
limit: int = settings.SEARCH_LIMIT,
|
| 16 |
+
filters: Optional[FilterModel] = None,
|
| 17 |
) -> list[dict]:
|
| 18 |
"""
|
| 19 |
+
Fetch products from Amazon based on query and filters
|
| 20 |
|
| 21 |
Args:
|
| 22 |
query (str): Search query
|
| 23 |
+
limit (int, optional): Number of products to return. Must be between 2-10. Defaults to 6.
|
| 24 |
filters (FilterModel, optional): Filters to apply. Defaults to None.
|
| 25 |
|
| 26 |
Returns:
|
| 27 |
list[dict]: List of products dictionary items matching the query and filters
|
| 28 |
"""
|
| 29 |
+
limit = max(2, min(limit, 10)) # Ensure limit is between 2 and 10
|
| 30 |
+
|
| 31 |
+
product_searcher = AmazonProductSearch(
|
| 32 |
+
rapid_api_key=settings.RAPIDAPI_KEY, rerank_model=settings.RERANKING_MODEL
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
products: list[ProductItem] = product_searcher.fetch_products(
|
| 36 |
query=query,
|
| 37 |
filters=filters,
|
| 38 |
+
limit=limit
|
|
|
|
|
|
|
| 39 |
)
|
| 40 |
+
return json.dumps([product.model_dump() for product in products])
|
main.py
CHANGED
|
@@ -5,7 +5,7 @@ from agno.db.redis import RedisDb
|
|
| 5 |
from agno.os import AgentOS
|
| 6 |
from agno.memory import MemoryManager
|
| 7 |
|
| 8 |
-
from app.prompts import
|
| 9 |
from app.tools import fetch_products
|
| 10 |
from app.config import settings
|
| 11 |
|
|
@@ -28,7 +28,7 @@ shopping_agent = Agent(
|
|
| 28 |
add_datetime_to_context=True,
|
| 29 |
db=redis_db,
|
| 30 |
tools=tools,
|
| 31 |
-
instructions=
|
| 32 |
enable_agentic_memory=True,
|
| 33 |
memory_manager=MemoryManager(
|
| 34 |
Groq(id=settings.MEMORY_MANAGER_MODEL),
|
|
@@ -40,11 +40,11 @@ shopping_agent = Agent(
|
|
| 40 |
|
| 41 |
agent_os = AgentOS(
|
| 42 |
os_id="shopping-agent-os",
|
| 43 |
-
description="A shopping Agent OS that helps users find the best products based on their needs.",
|
| 44 |
agents=[shopping_agent]
|
| 45 |
)
|
| 46 |
|
| 47 |
app = agent_os.get_app()
|
| 48 |
|
| 49 |
if __name__ == "__main__":
|
| 50 |
-
agent_os.serve(app="main:app", port=7860, host="0.0.0.0", reload=True)
|
|
|
|
| 5 |
from agno.os import AgentOS
|
| 6 |
from agno.memory import MemoryManager
|
| 7 |
|
| 8 |
+
from app.prompts import AGENT_INSTRUCTIONS
|
| 9 |
from app.tools import fetch_products
|
| 10 |
from app.config import settings
|
| 11 |
|
|
|
|
| 28 |
add_datetime_to_context=True,
|
| 29 |
db=redis_db,
|
| 30 |
tools=tools,
|
| 31 |
+
instructions=AGENT_INSTRUCTIONS,
|
| 32 |
enable_agentic_memory=True,
|
| 33 |
memory_manager=MemoryManager(
|
| 34 |
Groq(id=settings.MEMORY_MANAGER_MODEL),
|
|
|
|
| 40 |
|
| 41 |
agent_os = AgentOS(
|
| 42 |
os_id="shopping-agent-os",
|
| 43 |
+
description="A shopping Agent OS that helps users find the best products from amazon based on their needs.",
|
| 44 |
agents=[shopping_agent]
|
| 45 |
)
|
| 46 |
|
| 47 |
app = agent_os.get_app()
|
| 48 |
|
| 49 |
if __name__ == "__main__":
|
| 50 |
+
agent_os.serve(app="main:app", port=7860, host="0.0.0.0", reload=True)
|