Spaces:
Sleeping
Sleeping
jxp commited on
Commit ·
7231663
1
Parent(s): a86e3b2
Langgraph and llamaindex examples
Browse files- langgraph/lg-test.py +258 -0
- llamaindex-examples/llamaindex-agent.py +34 -0
- llamaindex-examples/llamaindex-hf-inference.py +15 -0
- llamaindex-examples/llamaindex-query.py +48 -0
- pyproject.toml +6 -0
- uv.lock +0 -0
langgraph/lg-test.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import TypedDict, List, Dict, Any, Optional
|
| 3 |
+
from langgraph.graph import StateGraph, START, END
|
| 4 |
+
from langchain_openai import ChatOpenAI
|
| 5 |
+
from langchain_core.messages import HumanMessage
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class EmailState(TypedDict):
|
| 9 |
+
# The email being processed
|
| 10 |
+
email: Dict[str, Any] # Contains subject, sender, body, etc.
|
| 11 |
+
|
| 12 |
+
# Category of the email (inquiry, complaint, etc.)
|
| 13 |
+
email_category: Optional[str]
|
| 14 |
+
|
| 15 |
+
# Reason why the email was marked as spam
|
| 16 |
+
spam_reason: Optional[str]
|
| 17 |
+
|
| 18 |
+
# Analysis and decisions
|
| 19 |
+
is_spam: Optional[bool]
|
| 20 |
+
|
| 21 |
+
# Response generation
|
| 22 |
+
email_draft: Optional[str]
|
| 23 |
+
|
| 24 |
+
# Processing metadata
|
| 25 |
+
messages: List[Dict[str, Any]]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# Initialize our LLM
|
| 29 |
+
model = ChatOpenAI(temperature=0)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def read_email(state: EmailState):
|
| 33 |
+
"""Alfred reads and logs the incoming email"""
|
| 34 |
+
email = state["email"]
|
| 35 |
+
|
| 36 |
+
# Here we might do some initial preprocessing
|
| 37 |
+
print(f"Alfred is processing an email from {email['sender']} with subject: {email['subject']}")
|
| 38 |
+
|
| 39 |
+
# No state changes needed here
|
| 40 |
+
return {}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def classify_email(state: EmailState):
|
| 44 |
+
"""Alfred uses an LLM to determine if the email is spam or legitimate"""
|
| 45 |
+
email = state["email"]
|
| 46 |
+
|
| 47 |
+
# Prepare our prompt for the LLM
|
| 48 |
+
prompt = f"""
|
| 49 |
+
As Alfred the butler, analyze this email and determine if it is spam or legitimate.
|
| 50 |
+
|
| 51 |
+
Email:
|
| 52 |
+
From: {email['sender']}
|
| 53 |
+
Subject: {email['subject']}
|
| 54 |
+
Body: {email['body']}
|
| 55 |
+
|
| 56 |
+
First, determine if this email is spam or not.
|
| 57 |
+
If it is spam, explain why.
|
| 58 |
+
If it is legitimate, categorize it ('inquiry', 'complaint', 'thank you', 'request', 'information').
|
| 59 |
+
|
| 60 |
+
Respond in the following format:
|
| 61 |
+
```
|
| 62 |
+
<classification>spam/not spam</classification>
|
| 63 |
+
<reason>[if spam]</reason>
|
| 64 |
+
<category>[if not spam]</category>
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
### Example 1
|
| 68 |
+
<classification>spam</classification>
|
| 69 |
+
<reason>Unsolicited promotional content</reason>
|
| 70 |
+
<category></category>
|
| 71 |
+
|
| 72 |
+
### Example 2
|
| 73 |
+
<classification>not spam</classification>
|
| 74 |
+
<reason></reason>
|
| 75 |
+
<category>inquiry</category>
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
# Call the LLM
|
| 79 |
+
messages = [HumanMessage(content=prompt)]
|
| 80 |
+
response = model.invoke(messages)
|
| 81 |
+
|
| 82 |
+
# Simple logic to parse the response (in a real app, you'd want more robust parsing)
|
| 83 |
+
response_text = response.content.lower()
|
| 84 |
+
# parse classification
|
| 85 |
+
classification = None
|
| 86 |
+
if "<classification>" in response_text:
|
| 87 |
+
classification = (
|
| 88 |
+
response_text.split("<classification>")[1].split("</classification>")[0].strip()
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
is_spam = classification == "spam"
|
| 92 |
+
|
| 93 |
+
# Extract a reason if it's spam
|
| 94 |
+
spam_reason = None
|
| 95 |
+
if is_spam and "<reason>" in response_text:
|
| 96 |
+
spam_reason = response_text.split("<reason>")[1].split("</reason>")[0].strip()
|
| 97 |
+
|
| 98 |
+
# Determine category if legitimate
|
| 99 |
+
email_category = None
|
| 100 |
+
if not is_spam:
|
| 101 |
+
categories = ["inquiry", "complaint", "thank you", "request", "information"]
|
| 102 |
+
for category in categories:
|
| 103 |
+
if f"<category>{category}</category>" in response_text:
|
| 104 |
+
email_category = category
|
| 105 |
+
break
|
| 106 |
+
|
| 107 |
+
# Update messages for tracking
|
| 108 |
+
new_messages = state.get("messages", []) + [
|
| 109 |
+
{"role": "user", "content": prompt},
|
| 110 |
+
{"role": "assistant", "content": response.content},
|
| 111 |
+
]
|
| 112 |
+
|
| 113 |
+
# Return state updates
|
| 114 |
+
return {
|
| 115 |
+
"is_spam": is_spam,
|
| 116 |
+
"spam_reason": spam_reason,
|
| 117 |
+
"email_category": email_category,
|
| 118 |
+
"messages": new_messages,
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def handle_spam(state: EmailState):
|
| 123 |
+
"""Alfred discards spam email with a note"""
|
| 124 |
+
print(f"Alfred has marked the email as spam. Reason: {state['spam_reason']}")
|
| 125 |
+
print("The email has been moved to the spam folder.")
|
| 126 |
+
|
| 127 |
+
# We're done processing this email
|
| 128 |
+
return {}
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def draft_response(state: EmailState):
|
| 132 |
+
"""Alfred drafts a preliminary response for legitimate emails"""
|
| 133 |
+
email = state["email"]
|
| 134 |
+
category = state["email_category"] or "general"
|
| 135 |
+
|
| 136 |
+
# Prepare our prompt for the LLM
|
| 137 |
+
prompt = f"""
|
| 138 |
+
As Alfred the butler, draft a polite preliminary response to this email.
|
| 139 |
+
|
| 140 |
+
Email:
|
| 141 |
+
From: {email['sender']}
|
| 142 |
+
Subject: {email['subject']}
|
| 143 |
+
Body: {email['body']}
|
| 144 |
+
|
| 145 |
+
This email has been categorized as: {category}
|
| 146 |
+
|
| 147 |
+
Draft a brief, professional response that Mr. Hugg can review and personalize before sending.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
# Call the LLM
|
| 151 |
+
messages = [HumanMessage(content=prompt)]
|
| 152 |
+
response = model.invoke(messages)
|
| 153 |
+
|
| 154 |
+
# Update messages for tracking
|
| 155 |
+
new_messages = state.get("messages", []) + [
|
| 156 |
+
{"role": "user", "content": prompt},
|
| 157 |
+
{"role": "assistant", "content": response.content},
|
| 158 |
+
]
|
| 159 |
+
|
| 160 |
+
# Return state updates
|
| 161 |
+
return {"email_draft": response.content, "messages": new_messages}
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def notify_mr_hugg(state: EmailState):
|
| 165 |
+
"""Alfred notifies Mr. Hugg about the email and presents the draft response"""
|
| 166 |
+
email = state["email"]
|
| 167 |
+
|
| 168 |
+
print("\n" + "=" * 50)
|
| 169 |
+
print(f"Sir, you've received an email from {email['sender']}.")
|
| 170 |
+
print(f"Subject: {email['subject']}")
|
| 171 |
+
print(f"Category: {state['email_category']}")
|
| 172 |
+
print("\nI've prepared a draft response for your review:")
|
| 173 |
+
print("-" * 50)
|
| 174 |
+
print(state["email_draft"])
|
| 175 |
+
print("=" * 50 + "\n")
|
| 176 |
+
|
| 177 |
+
# We're done processing this email
|
| 178 |
+
return {}
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def route_email(state: EmailState) -> str:
|
| 182 |
+
"""Determine the next step based on spam classification"""
|
| 183 |
+
if state["is_spam"]:
|
| 184 |
+
return "spam"
|
| 185 |
+
else:
|
| 186 |
+
return "legitimate"
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# Graph
|
| 190 |
+
email_graph = StateGraph(EmailState)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
# Add nodes
|
| 194 |
+
email_graph.add_node("read_email", read_email)
|
| 195 |
+
email_graph.add_node("classify_email", classify_email)
|
| 196 |
+
email_graph.add_node("handle_spam", handle_spam)
|
| 197 |
+
email_graph.add_node("draft_response", draft_response)
|
| 198 |
+
email_graph.add_node("notify_mr_hugg", notify_mr_hugg)
|
| 199 |
+
|
| 200 |
+
# Start the edges
|
| 201 |
+
email_graph.add_edge(START, "read_email")
|
| 202 |
+
# Add edges - defining the flow
|
| 203 |
+
email_graph.add_edge("read_email", "classify_email")
|
| 204 |
+
|
| 205 |
+
# Add conditional branching from classify_email
|
| 206 |
+
email_graph.add_conditional_edges(
|
| 207 |
+
"classify_email", route_email, {"spam": "handle_spam", "legitimate": "draft_response"}
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
# Add the final edges
|
| 211 |
+
email_graph.add_edge("handle_spam", END)
|
| 212 |
+
email_graph.add_edge("draft_response", "notify_mr_hugg")
|
| 213 |
+
email_graph.add_edge("notify_mr_hugg", END)
|
| 214 |
+
|
| 215 |
+
# Compile the graph
|
| 216 |
+
compiled_graph = email_graph.compile()
|
| 217 |
+
|
| 218 |
+
# Example legitimate email
|
| 219 |
+
legitimate_email = {
|
| 220 |
+
"sender": "john.smith@example.com",
|
| 221 |
+
"subject": "Question about your services",
|
| 222 |
+
"body": "Dear Mr. Hugg, I was referred to you by a colleague and I'm interested in learning more about your consulting services. Could we schedule a call next week? Best regards, John Smith",
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
# Process the legitimate email
|
| 227 |
+
print("\nProcessing legitimate email...")
|
| 228 |
+
initial_state = {
|
| 229 |
+
"email": legitimate_email,
|
| 230 |
+
"is_spam": None,
|
| 231 |
+
"spam_reason": None,
|
| 232 |
+
"email_category": None,
|
| 233 |
+
"email_draft": None,
|
| 234 |
+
"messages": [],
|
| 235 |
+
}
|
| 236 |
+
# legitimate_result = compiled_graph.invoke(initial_state)
|
| 237 |
+
# print(legitimate_result)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
# Example spam email
|
| 241 |
+
spam_email = {
|
| 242 |
+
"sender": "winner@lottery-intl.com",
|
| 243 |
+
"subject": "YOU HAVE WON $5,000,000!!!",
|
| 244 |
+
"body": "CONGRATULATIONS! You have been selected as the winner of our international lottery! To claim your $5,000,000 prize, please send us your bank details and a processing fee of $100."
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
# Process the spam email
|
| 249 |
+
print("\nProcessing spam email...")
|
| 250 |
+
spam_result = compiled_graph.invoke({
|
| 251 |
+
"email": spam_email,
|
| 252 |
+
"is_spam": None,
|
| 253 |
+
"spam_reason": None,
|
| 254 |
+
"email_category": None,
|
| 255 |
+
"email_draft": None,
|
| 256 |
+
"messages": []
|
| 257 |
+
})
|
| 258 |
+
print(spam_result)
|
llamaindex-examples/llamaindex-agent.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 3 |
+
from llama_index.core.agent.workflow import AgentWorkflow
|
| 4 |
+
from llama_index.core.tools import FunctionTool
|
| 5 |
+
from llama_index.core.workflow import Context
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def multiply_int(a: int, b: int) -> int:
|
| 11 |
+
"""Multiplies two integers and returns the resulting integer"""
|
| 12 |
+
return a * b
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
with open("hf_token.txt") as f:
|
| 16 |
+
hf_token = f.read().strip()
|
| 17 |
+
|
| 18 |
+
llm = HuggingFaceInferenceAPI(
|
| 19 |
+
model_name="Qwen/Qwen2.5-Coder-32B-Instruct", token=hf_token, provider="auto"
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
agent = AgentWorkflow.from_tools_or_functions([FunctionTool.from_defaults(multiply_int)], llm=llm)
|
| 23 |
+
ctx = Context(agent)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def run_agent(input_text: str) -> str:
|
| 27 |
+
response = agent.run(input_text)
|
| 28 |
+
response = agent.run("My name is Bob.", ctx=ctx)
|
| 29 |
+
response = agent.run("What was my name again?", ctx=ctx)
|
| 30 |
+
return response
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
response = run_agent("What is 2 times 2?")
|
| 34 |
+
print(response)
|
llamaindex-examples/llamaindex-hf-inference.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 2 |
+
|
| 3 |
+
with open("hf_token.txt") as f:
|
| 4 |
+
hf_token = f.read().strip()
|
| 5 |
+
|
| 6 |
+
llm = HuggingFaceInferenceAPI(
|
| 7 |
+
model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 8 |
+
temperature=0.7,
|
| 9 |
+
max_tokens=100,
|
| 10 |
+
token=hf_token,
|
| 11 |
+
provider="auto",
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
response = llm.complete("Write a Python function to add two numbers.")
|
| 15 |
+
print(response)
|
llamaindex-examples/llamaindex-query.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
|
| 3 |
+
import chromadb
|
| 4 |
+
from llama_index.core import Document, VectorStoreIndex
|
| 5 |
+
from llama_index.core.evaluation import FaithfulnessEvaluator
|
| 6 |
+
from llama_index.core.node_parser import SentenceSplitter
|
| 7 |
+
from llama_index.core.ingestion import IngestionPipeline
|
| 8 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
| 9 |
+
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 10 |
+
from llama_index.vector_stores.chroma import ChromaVectorStore
|
| 11 |
+
|
| 12 |
+
with open("hf_token.txt") as f:
|
| 13 |
+
hf_token = f.read().strip()
|
| 14 |
+
|
| 15 |
+
inference_model_name = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
|
| 16 |
+
db = chromadb.PersistentClient(path="./alfred_chroma_db")
|
| 17 |
+
chroma_collection = db.get_or_create_collection(name="alfred_collection")
|
| 18 |
+
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
| 19 |
+
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
|
| 20 |
+
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
| 21 |
+
llm = HuggingFaceInferenceAPI(model_name=inference_model_name, token=hf_token, provider="auto")
|
| 22 |
+
query_engine = index.as_query_engine(llm=llm, response_mode="tree_summarize")
|
| 23 |
+
|
| 24 |
+
pipeline = IngestionPipeline(
|
| 25 |
+
transformations=[
|
| 26 |
+
SentenceSplitter(chunk_size=25, chunk_overlap=0),
|
| 27 |
+
embed_model,
|
| 28 |
+
],
|
| 29 |
+
vector_store=vector_store,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
evaluator = FaithfulnessEvaluator(llm=llm)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def create_index():
|
| 36 |
+
pipeline.run(documents=[Document.example()])
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def query_index():
|
| 40 |
+
response = query_engine.query("What is the meaning of life?")
|
| 41 |
+
print(response)
|
| 42 |
+
eval_result = evaluator.evaluate_response(response=response)
|
| 43 |
+
print(eval_result.passing)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
if __name__ == "__main__":
|
| 47 |
+
create_index()
|
| 48 |
+
query_index()
|
pyproject.toml
CHANGED
|
@@ -6,4 +6,10 @@ readme = "README.md"
|
|
| 6 |
requires-python = ">=3.12"
|
| 7 |
dependencies = [
|
| 8 |
"huggingface-hub[inference]>=0.34.4",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
]
|
|
|
|
| 6 |
requires-python = ">=3.12"
|
| 7 |
dependencies = [
|
| 8 |
"huggingface-hub[inference]>=0.34.4",
|
| 9 |
+
"langchain-openai>=0.3.31",
|
| 10 |
+
"langgraph>=0.6.6",
|
| 11 |
+
"llama-index-embeddings-huggingface>=0.6.0",
|
| 12 |
+
"llama-index-llms-huggingface-api>=0.6.0",
|
| 13 |
+
"llama-index-vector-stores-chroma>=0.5.2",
|
| 14 |
+
"python-dotenv>=1.1.1",
|
| 15 |
]
|
uv.lock
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|