mishrabp commited on
Commit
49b2277
·
verified ·
1 Parent(s): 8aedc51

Upload folder using huggingface_hub

Browse files
pyproject.toml CHANGED
@@ -19,6 +19,7 @@ dependencies = [
19
  "langchain_google_genai>=3.0.3",
20
  "langchain_groq>=1.0.1",
21
 
 
22
  # =======================
23
  # LANGCHAIN / LANGGRAPH
24
  # =======================
@@ -30,6 +31,7 @@ dependencies = [
30
  "langchain-text-splitters>=1.0.0",
31
  "langchain-chroma>=1.0.0",
32
  "html2text>=2025.4.15",
 
33
 
34
  # =======================
35
  # VECTOR DB / INDEXING
@@ -119,10 +121,7 @@ dependencies = [
119
  # =======================
120
  # OBSERVABILITY
121
  # =======================
122
- "opentelemetry-api",
123
- "opentelemetry-sdk",
124
- "opentelemetry-exporter-otlp",
125
- "opentelemetry-instrumentation-openai",
126
  ]
127
 
128
  [dependency-groups]
 
19
  "langchain_google_genai>=3.0.3",
20
  "langchain_groq>=1.0.1",
21
 
22
+
23
  # =======================
24
  # LANGCHAIN / LANGGRAPH
25
  # =======================
 
31
  "langchain-text-splitters>=1.0.0",
32
  "langchain-chroma>=1.0.0",
33
  "html2text>=2025.4.15",
34
+ "langfuse>=2.0.0",
35
 
36
  # =======================
37
  # VECTOR DB / INDEXING
 
121
  # =======================
122
  # OBSERVABILITY
123
  # =======================
124
+
 
 
 
125
  ]
126
 
127
  [dependency-groups]
src/chatbot/aagents/orchestrator_agent.py CHANGED
@@ -8,6 +8,8 @@ from aagents.input_validation_agent import input_validation_guardrail
8
  from agents import Agent, OpenAIChatCompletionsModel, Runner, function_tool
9
  from openai import AsyncOpenAI
10
  from dotenv import load_dotenv
 
 
11
 
12
  load_dotenv()
13
 
@@ -15,6 +17,7 @@ load_dotenv()
15
  GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
16
  google_api_key = os.getenv("GOOGLE_API_KEY")
17
  gemini_client = AsyncOpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
 
18
  gemini_model = OpenAIChatCompletionsModel(
19
  model="gemini-2.0-flash",
20
  openai_client=gemini_client
 
8
  from agents import Agent, OpenAIChatCompletionsModel, Runner, function_tool
9
  from openai import AsyncOpenAI
10
  from dotenv import load_dotenv
11
+ from langsmith import wrappers
12
+
13
 
14
  load_dotenv()
15
 
 
17
  GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
18
  google_api_key = os.getenv("GOOGLE_API_KEY")
19
  gemini_client = AsyncOpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
20
+ gemini_client = wrappers.wrap_openai(gemini_client)
21
  gemini_model = OpenAIChatCompletionsModel(
22
  model="gemini-2.0-flash",
23
  openai_client=gemini_client
src/chatbot/app.py CHANGED
@@ -2,10 +2,24 @@ import os
2
  import glob
3
  import uuid
4
  import asyncio
 
 
5
  import streamlit as st
6
  from aagents.orchestrator_agent import orchestrator_agent
7
  from agents import Runner, trace, SQLiteSession
8
  from agents.exceptions import InputGuardrailTripwireTriggered
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  # -----------------------------
11
  # Configuration & Utils
@@ -16,63 +30,14 @@ st.set_page_config(
16
  page_icon="🤖"
17
  )
18
 
19
- # ------------------------------------------------------------------------------
20
- # OpenTelemetry Setup
21
- # ------------------------------------------------------------------------------
22
- from opentelemetry import trace as trace_api
23
- # from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
24
- from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
25
- from opentelemetry.instrumentation.openai import OpenAIInstrumentor
26
- from opentelemetry.sdk.resources import Resource
27
- from opentelemetry.sdk.trace import TracerProvider
28
- from opentelemetry.sdk.trace.export import BatchSpanProcessor
29
-
30
- otel_endpoint = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT")
31
-
32
- if otel_endpoint:
33
- enable_otel = True
34
- else:
35
- # Local development default
36
- otel_endpoint = "https://myotel.azurewebsites.net/v1/traces"
37
- enable_otel = True
38
-
39
- if enable_otel:
40
- try:
41
- # Monkeypatch the internals of the OpenAI instrumentation to fix "Invalid type Omit" error
42
- # This is necessary because the official instrumentation library doesn't handle 'Omit' types correctly
43
- # and blindly tries to set them as attributes, causing console errors.
44
- import opentelemetry.instrumentation.openai.shared
45
- original_set_span_attribute = opentelemetry.instrumentation.openai.shared._set_span_attribute
46
-
47
- def patched_set_span_attribute(span, name, value):
48
- # Check for Omit/NotGiven types by name to avoid importing internal types
49
- if value is not None and type(value).__name__ in ["Omit", "NotGiven"]:
50
- return
51
- original_set_span_attribute(span, name, value)
52
-
53
- opentelemetry.instrumentation.openai.shared._set_span_attribute = patched_set_span_attribute
54
-
55
- # Set up telemetry span exporter.
56
- # otel_exporter = OTLPSpanExporter(endpoint=otel_endpoint, insecure=True)
57
- otel_exporter = OTLPSpanExporter(endpoint=otel_endpoint)
58
- span_processor = BatchSpanProcessor(otel_exporter)
59
-
60
- # Set up telemetry trace provider.
61
- tracer_provider = TracerProvider(resource=Resource({"service.name": "chatbot"}))
62
- tracer_provider.add_span_processor(span_processor)
63
- trace_api.set_tracer_provider(tracer_provider)
64
-
65
- # Instrument the OpenAI Python library
66
- OpenAIInstrumentor().instrument()
67
- print(f"OpenTelemetry enabled with endpoint: {otel_endpoint}")
68
- except Exception as e:
69
- print(f"Failed to initialize OpenTelemetry: {e}")
70
- else:
71
- print("OpenTelemetry disabled (Running in HF Space with no configured endpoint).")
72
-
73
- # Get a tracer (works even if OTEL is disabled, returning a NoOp tracer)
74
- tracer = trace_api.get_tracer("chatbot")
75
 
 
 
 
 
76
 
77
  def load_prompts(folder="prompts"):
78
  prompts = []
@@ -83,7 +48,8 @@ def load_prompts(folder="prompts"):
83
  content = f.read().strip()
84
  if content:
85
  prompts.append(content)
86
- prompt_labels.append(os.path.basename(file_path).replace("_", " ").replace(".txt", "").title())
 
87
  return prompts, prompt_labels
88
 
89
  prompts, prompt_labels = load_prompts()
@@ -256,19 +222,18 @@ st.markdown("""
256
  # -----------------------------
257
  # Logic
258
  # -----------------------------
 
 
259
  async def get_ai_response(prompt: str) -> str:
260
  try:
261
  agent = orchestrator_agent
262
  # Ensure session is valid
263
  current_session = st.session_state.ai_session
264
  current_session = st.session_state.ai_session
265
- with trace("Chatbot Agent Run"): # Keep existing custom trace wrapper if it exists, or just use new tracer
266
- with tracer.start_as_current_span("get_ai_response") as span:
267
- span.set_attribute("input.prompt", prompt)
268
- # Run agent
269
- result = await Runner.run(agent, prompt, session=current_session)
270
- span.set_attribute("output.response", result.final_output)
271
- return result.final_output
272
  except InputGuardrailTripwireTriggered as e:
273
  reasoning = getattr(e, "reasoning", None) \
274
  or getattr(getattr(e, "output", None), "reasoning", None) \
@@ -333,6 +298,13 @@ if prompt := (st.chat_input("Type your message...") or selected_prompt):
333
  with st.spinner("Thinking..."):
334
  response_text = asyncio.run(get_ai_response(prompt))
335
  st.markdown(response_text)
 
 
 
 
 
 
 
336
 
337
  st.session_state.messages.append({"role": "assistant", "content": response_text})
338
 
 
2
  import glob
3
  import uuid
4
  import asyncio
5
+ import langfuse
6
+ import logging
7
  import streamlit as st
8
  from aagents.orchestrator_agent import orchestrator_agent
9
  from agents import Runner, trace, SQLiteSession
10
  from agents.exceptions import InputGuardrailTripwireTriggered
11
+ from langsmith import traceable
12
+
13
+ # Make Langfuse optional to avoid "Client will be disabled" errors
14
+ if os.environ.get("LANGFUSE_PUBLIC_KEY"):
15
+ from langfuse import observe
16
+ else:
17
+ # Dummy decorator if keys are missing
18
+ def observe(*args, **kwargs):
19
+ def decorator(func):
20
+ return func
21
+ return decorator
22
+
23
 
24
  # -----------------------------
25
  # Configuration & Utils
 
30
  page_icon="🤖"
31
  )
32
 
33
+ # Load environment variables explicitly
34
+ from dotenv import load_dotenv
35
+ load_dotenv(override=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
+ # Configure debug logging for Langfuse only if enabled
38
+ if os.environ.get("LANGFUSE_PUBLIC_KEY"):
39
+ logging.basicConfig()
40
+ logging.getLogger("langfuse").setLevel(logging.INFO)
41
 
42
  def load_prompts(folder="prompts"):
43
  prompts = []
 
48
  content = f.read().strip()
49
  if content:
50
  prompts.append(content)
51
+
52
+ prompt_labels.append(os.path.basename(file_path).replace("_", " ").replace(".txt", "").title())
53
  return prompts, prompt_labels
54
 
55
  prompts, prompt_labels = load_prompts()
 
222
  # -----------------------------
223
  # Logic
224
  # -----------------------------
225
+ @observe()
226
+ @traceable(name="Chatbot Interaction")
227
  async def get_ai_response(prompt: str) -> str:
228
  try:
229
  agent = orchestrator_agent
230
  # Ensure session is valid
231
  current_session = st.session_state.ai_session
232
  current_session = st.session_state.ai_session
233
+ with trace("Chatbot Agent Run"): # Keep existing custom trace wrapper
234
+ # Run agent
235
+ result = await Runner.run(agent, prompt, session=current_session)
236
+ return result.final_output
 
 
 
237
  except InputGuardrailTripwireTriggered as e:
238
  reasoning = getattr(e, "reasoning", None) \
239
  or getattr(getattr(e, "output", None), "reasoning", None) \
 
298
  with st.spinner("Thinking..."):
299
  response_text = asyncio.run(get_ai_response(prompt))
300
  st.markdown(response_text)
301
+
302
+ # Ensure traces are sent before the script may stop/rerun
303
+ if os.environ.get("LANGFUSE_PUBLIC_KEY"):
304
+ try:
305
+ langfuse.Langfuse().flush()
306
+ except:
307
+ pass
308
 
309
  st.session_state.messages.append({"role": "assistant", "content": response_text})
310
 
src/chatbot/test_trace.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from dotenv import load_dotenv
4
+ import logging
5
+
6
+ # Load envs
7
+ load_dotenv(override=True)
8
+
9
+ # Enable debug logs
10
+ import sys
11
+ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
12
+ logging.getLogger("langfuse").setLevel(logging.DEBUG)
13
+
14
+
15
+ import langfuse
16
+ from langfuse import observe
17
+
18
+ # 1. Print config to verify keys
19
+ print(f"Host: {os.environ.get('LANGFUSE_HOST')}")
20
+ print(f"Public Key: {os.environ.get('LANGFUSE_PUBLIC_KEY')}")
21
+
22
+ # 2. Define observed function
23
+ @observe(name="test-trace-script")
24
+ def run_test():
25
+ print("Executing observed function...")
26
+ time.sleep(0.1)
27
+ return "Test successful"
28
+
29
+ # 3. Run
30
+ run_test()
31
+
32
+ # 4. Flush / Wait
33
+ print("Waiting for background upload...")
34
+ time.sleep(3)
35
+
36
+ try:
37
+ from langfuse import Langfuse
38
+ # Try to flush using a new client instance (hoping for shared state or just to test connection)
39
+ client = Langfuse()
40
+ client.flush()
41
+ print("Flush called on client instance.")
42
+ except Exception as e:
43
+ print(f"Flush failed: {e}")
44
+
45
+ print("Script finished.")
46
+
47
+
uv.lock CHANGED
@@ -35,6 +35,7 @@ dependencies = [
35
  { name = "langchain-ollama" },
36
  { name = "langchain-openai" },
37
  { name = "langchain-text-splitters" },
 
38
  { name = "langgraph" },
39
  { name = "langgraph-checkpoint-sqlite" },
40
  { name = "langsmith" },
@@ -45,10 +46,6 @@ dependencies = [
45
  { name = "openai" },
46
  { name = "openai-agents" },
47
  { name = "openai-whisper" },
48
- { name = "opentelemetry-api" },
49
- { name = "opentelemetry-exporter-otlp" },
50
- { name = "opentelemetry-instrumentation-openai" },
51
- { name = "opentelemetry-sdk" },
52
  { name = "playwright" },
53
  { name = "plotly" },
54
  { name = "polygon-api-client" },
@@ -108,6 +105,7 @@ requires-dist = [
108
  { name = "langchain-ollama", specifier = ">=1.0.0" },
109
  { name = "langchain-openai", specifier = ">=1.0.3" },
110
  { name = "langchain-text-splitters", specifier = ">=1.0.0" },
 
111
  { name = "langgraph", specifier = ">=1.0.3" },
112
  { name = "langgraph-checkpoint-sqlite", specifier = ">=3.0.0" },
113
  { name = "langsmith", specifier = ">=0.4.43" },
@@ -118,10 +116,6 @@ requires-dist = [
118
  { name = "openai", specifier = ">=2.8.1" },
119
  { name = "openai-agents", specifier = ">=0.5.1" },
120
  { name = "openai-whisper", specifier = ">=1.0.0" },
121
- { name = "opentelemetry-api" },
122
- { name = "opentelemetry-exporter-otlp" },
123
- { name = "opentelemetry-instrumentation-openai" },
124
- { name = "opentelemetry-sdk" },
125
  { name = "playwright", specifier = ">=1.51.0" },
126
  { name = "plotly", specifier = ">=6.5.0" },
127
  { name = "polygon-api-client", specifier = ">=1.16.3" },
@@ -1797,6 +1791,27 @@ wheels = [
1797
  { url = "https://files.pythonhosted.org/packages/1e/97/d362353ab04f865af6f81d4d46e7aa428734aa032de0017934b771fc34b7/langchain_text_splitters-1.0.0-py3-none-any.whl", hash = "sha256:f00c8219d3468f2c5bd951b708b6a7dd9bc3c62d0cfb83124c377f7170f33b2e", size = 33851, upload-time = "2025-10-17T14:33:40.46Z" },
1798
  ]
1799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800
  [[package]]
1801
  name = "langgraph"
1802
  version = "1.0.3"
@@ -2486,19 +2501,6 @@ wheels = [
2486
  { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" },
2487
  ]
2488
 
2489
- [[package]]
2490
- name = "opentelemetry-exporter-otlp"
2491
- version = "1.38.0"
2492
- source = { registry = "https://pypi.org/simple" }
2493
- dependencies = [
2494
- { name = "opentelemetry-exporter-otlp-proto-grpc" },
2495
- { name = "opentelemetry-exporter-otlp-proto-http" },
2496
- ]
2497
- sdist = { url = "https://files.pythonhosted.org/packages/c2/2d/16e3487ddde2dee702bd746dd41950a8789b846d22a1c7e64824aac5ebea/opentelemetry_exporter_otlp-1.38.0.tar.gz", hash = "sha256:2f55acdd475e4136117eff20fbf1b9488b1b0b665ab64407516e1ac06f9c3f9d", size = 6147, upload-time = "2025-10-16T08:35:52.53Z" }
2498
- wheels = [
2499
- { url = "https://files.pythonhosted.org/packages/fd/8a/81cd252b16b7d95ec1147982b6af81c7932d23918b4c3b15372531242ddd/opentelemetry_exporter_otlp-1.38.0-py3-none-any.whl", hash = "sha256:bc6562cef229fac8887ed7109fc5abc52315f39d9c03fd487bb8b4ef8fbbc231", size = 7018, upload-time = "2025-10-16T08:35:32.995Z" },
2500
- ]
2501
-
2502
  [[package]]
2503
  name = "opentelemetry-exporter-otlp-proto-common"
2504
  version = "1.38.0"
@@ -2562,21 +2564,6 @@ wheels = [
2562
  { url = "https://files.pythonhosted.org/packages/10/f5/7a40ff3f62bfe715dad2f633d7f1174ba1a7dd74254c15b2558b3401262a/opentelemetry_instrumentation-0.59b0-py3-none-any.whl", hash = "sha256:44082cc8fe56b0186e87ee8f7c17c327c4c2ce93bdbe86496e600985d74368ee", size = 33020, upload-time = "2025-10-16T08:38:31.463Z" },
2563
  ]
2564
 
2565
- [[package]]
2566
- name = "opentelemetry-instrumentation-openai"
2567
- version = "0.49.8"
2568
- source = { registry = "https://pypi.org/simple" }
2569
- dependencies = [
2570
- { name = "opentelemetry-api" },
2571
- { name = "opentelemetry-instrumentation" },
2572
- { name = "opentelemetry-semantic-conventions" },
2573
- { name = "opentelemetry-semantic-conventions-ai" },
2574
- ]
2575
- sdist = { url = "https://files.pythonhosted.org/packages/44/03/a04b74790ae3c5ea80aa257fae07698a9111ad1c58714ef78eb40f070414/opentelemetry_instrumentation_openai-0.49.8.tar.gz", hash = "sha256:2efe4efea59f2708ef3fc470a10d6db11eb7c48328a2729383d9adef89b6b2da", size = 32254, upload-time = "2025-12-11T20:32:53.415Z" }
2576
- wheels = [
2577
- { url = "https://files.pythonhosted.org/packages/85/e7/36e0d15a1dfb94faf5fcc70721c6706ccbcf58323b31395b857884c0eb91/opentelemetry_instrumentation_openai-0.49.8-py3-none-any.whl", hash = "sha256:2555694d0f009b2d43776d718a7467229d49e04bb2ab78e2a9880d52674b8393", size = 43003, upload-time = "2025-12-11T20:32:20.844Z" },
2578
- ]
2579
-
2580
  [[package]]
2581
  name = "opentelemetry-proto"
2582
  version = "1.38.0"
@@ -2616,15 +2603,6 @@ wheels = [
2616
  { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" },
2617
  ]
2618
 
2619
- [[package]]
2620
- name = "opentelemetry-semantic-conventions-ai"
2621
- version = "0.4.13"
2622
- source = { registry = "https://pypi.org/simple" }
2623
- sdist = { url = "https://files.pythonhosted.org/packages/ba/e6/40b59eda51ac47009fb47afcdf37c6938594a0bd7f3b9fadcbc6058248e3/opentelemetry_semantic_conventions_ai-0.4.13.tar.gz", hash = "sha256:94efa9fb4ffac18c45f54a3a338ffeb7eedb7e1bb4d147786e77202e159f0036", size = 5368, upload-time = "2025-08-22T10:14:17.387Z" }
2624
- wheels = [
2625
- { url = "https://files.pythonhosted.org/packages/35/b5/cf25da2218910f0d6cdf7f876a06bed118c4969eacaf60a887cbaef44f44/opentelemetry_semantic_conventions_ai-0.4.13-py3-none-any.whl", hash = "sha256:883a30a6bb5deaec0d646912b5f9f6dcbb9f6f72557b73d0f2560bf25d13e2d5", size = 6080, upload-time = "2025-08-22T10:14:16.477Z" },
2626
- ]
2627
-
2628
  [[package]]
2629
  name = "orjson"
2630
  version = "3.11.4"
 
35
  { name = "langchain-ollama" },
36
  { name = "langchain-openai" },
37
  { name = "langchain-text-splitters" },
38
+ { name = "langfuse" },
39
  { name = "langgraph" },
40
  { name = "langgraph-checkpoint-sqlite" },
41
  { name = "langsmith" },
 
46
  { name = "openai" },
47
  { name = "openai-agents" },
48
  { name = "openai-whisper" },
 
 
 
 
49
  { name = "playwright" },
50
  { name = "plotly" },
51
  { name = "polygon-api-client" },
 
105
  { name = "langchain-ollama", specifier = ">=1.0.0" },
106
  { name = "langchain-openai", specifier = ">=1.0.3" },
107
  { name = "langchain-text-splitters", specifier = ">=1.0.0" },
108
+ { name = "langfuse", specifier = ">=2.0.0" },
109
  { name = "langgraph", specifier = ">=1.0.3" },
110
  { name = "langgraph-checkpoint-sqlite", specifier = ">=3.0.0" },
111
  { name = "langsmith", specifier = ">=0.4.43" },
 
116
  { name = "openai", specifier = ">=2.8.1" },
117
  { name = "openai-agents", specifier = ">=0.5.1" },
118
  { name = "openai-whisper", specifier = ">=1.0.0" },
 
 
 
 
119
  { name = "playwright", specifier = ">=1.51.0" },
120
  { name = "plotly", specifier = ">=6.5.0" },
121
  { name = "polygon-api-client", specifier = ">=1.16.3" },
 
1791
  { url = "https://files.pythonhosted.org/packages/1e/97/d362353ab04f865af6f81d4d46e7aa428734aa032de0017934b771fc34b7/langchain_text_splitters-1.0.0-py3-none-any.whl", hash = "sha256:f00c8219d3468f2c5bd951b708b6a7dd9bc3c62d0cfb83124c377f7170f33b2e", size = 33851, upload-time = "2025-10-17T14:33:40.46Z" },
1792
  ]
1793
 
1794
+ [[package]]
1795
+ name = "langfuse"
1796
+ version = "3.10.6"
1797
+ source = { registry = "https://pypi.org/simple" }
1798
+ dependencies = [
1799
+ { name = "backoff" },
1800
+ { name = "httpx" },
1801
+ { name = "openai" },
1802
+ { name = "opentelemetry-api" },
1803
+ { name = "opentelemetry-exporter-otlp-proto-http" },
1804
+ { name = "opentelemetry-sdk" },
1805
+ { name = "packaging" },
1806
+ { name = "pydantic" },
1807
+ { name = "requests" },
1808
+ { name = "wrapt" },
1809
+ ]
1810
+ sdist = { url = "https://files.pythonhosted.org/packages/e6/70/4ff19dd1085bb4d5007f008a696c8cf989a0ad76eabc512a5cd19ee4a0b7/langfuse-3.10.6.tar.gz", hash = "sha256:fced9ca0416ba7499afa45fbedf831afc0ec824cb283719b9cf429bf5713f205", size = 223656, upload-time = "2025-12-12T13:29:24.048Z" }
1811
+ wheels = [
1812
+ { url = "https://files.pythonhosted.org/packages/ce/f0/fac7d56ce1136afbbebaddd1dc119fb1b94b5a7489944d0b4c2dcee99ed7/langfuse-3.10.6-py3-none-any.whl", hash = "sha256:36ca490cd64e372b1b94c28063b3fea39b1a8446cabd20172b524d01011a34e1", size = 399347, upload-time = "2025-12-12T13:29:22.462Z" },
1813
+ ]
1814
+
1815
  [[package]]
1816
  name = "langgraph"
1817
  version = "1.0.3"
 
2501
  { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" },
2502
  ]
2503
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2504
  [[package]]
2505
  name = "opentelemetry-exporter-otlp-proto-common"
2506
  version = "1.38.0"
 
2564
  { url = "https://files.pythonhosted.org/packages/10/f5/7a40ff3f62bfe715dad2f633d7f1174ba1a7dd74254c15b2558b3401262a/opentelemetry_instrumentation-0.59b0-py3-none-any.whl", hash = "sha256:44082cc8fe56b0186e87ee8f7c17c327c4c2ce93bdbe86496e600985d74368ee", size = 33020, upload-time = "2025-10-16T08:38:31.463Z" },
2565
  ]
2566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2567
  [[package]]
2568
  name = "opentelemetry-proto"
2569
  version = "1.38.0"
 
2603
  { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" },
2604
  ]
2605
 
 
 
 
 
 
 
 
 
 
2606
  [[package]]
2607
  name = "orjson"
2608
  version = "3.11.4"