zh3036 commited on
Commit
1372721
·
1 Parent(s): 6450e99

initilize with ok pydantic ai

Browse files
.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.13
README.md CHANGED
@@ -10,3 +10,40 @@ pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
+
14
+ # Marvin AI 对话演示
15
+
16
+ 使用 Marvin AI 框架创建三个 AI 智能体进行对话:Alice(教练)、Bob(有问题的人)、Critic(评价者)。
17
+
18
+ ## 安装
19
+
20
+ ```bash
21
+ uv add marvin
22
+ ```
23
+
24
+ ## 设置 API 密钥
25
+
26
+ ```bash
27
+ export OPENAI_API_KEY="你的-openai-api-密钥"
28
+ ```
29
+
30
+ 获取 API 密钥:访问 [OpenAI 官网](https://platform.openai.com/) → 注册/登录 → API 部分 → 生成新密钥
31
+
32
+ ## 运行
33
+
34
+ ```bash
35
+ uv run marvin_test.py
36
+ ```
37
+
38
+ ## 工作流程
39
+
40
+ 1. Alice 和 Bob 进行 3 轮对话
41
+ 2. 每轮后按回车继续
42
+ 3. Critic 评价对话质量
43
+ 4. 结果保存到 `score.json`
44
+
45
+ ## 常见问题
46
+
47
+ - **API 密钥错误**:检查 `OPENAI_API_KEY` 是否正确设置
48
+ - **模块未找到**:运行 `uv add marvin`
49
+ - **速率限制**:等待片刻后重试
alice_agent.py ADDED
@@ -0,0 +1 @@
 
 
1
+
alice_gradio.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from marvin import Agent
3
+
4
+ bot = Agent("Jokester", instructions="Tell witty one-liners.")
5
+
6
+ async def talk(msg, history):
7
+ return await bot(msg) # Marvin v2 → awaitable
8
+
9
+ demo = gr.ChatInterface(talk, title="Jokester")
10
+ demo.queue() # initialise the queue explicitly
11
+ demo.launch()
convo-pai.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_ai import Agent
2
+ import json
3
+ from typing import Dict, Any
4
+
5
+ # Define the three agents using pydantic-ai
6
+ alice = Agent(
7
+ 'openai:gpt-4o',
8
+ system_prompt="You are a coach, speak like coach, start with question if no convo history; you speak very concisely, one sentence at a time"
9
+ )
10
+
11
+ bob = Agent(
12
+ 'openai:gpt-4o',
13
+ system_prompt="You have a problem with your kids. and speak very concisely, one sentence at a time"
14
+ )
15
+
16
+ critic = Agent(
17
+ 'openai:gpt-4o',
18
+ system_prompt=(
19
+ "Score the dialogue on insight (0-10) and civility (0-10); "
20
+ "return JSON with fields 'insight', 'civility', 'comment'."
21
+ )
22
+ )
23
+
24
+ def format_conversation_history(conversation):
25
+ """Format conversation history for context"""
26
+ if not conversation:
27
+ return "No previous conversation."
28
+ return "\n".join([f"{speaker}: {message}" for speaker, message in conversation])
29
+
30
+ # Simple ping-pong conversation loop
31
+ conversation = []
32
+ print("Starting conversation between Alice (coach) and Bob (person with problems)...\n")
33
+
34
+ for round_num in range(3):
35
+ print(f"=== Round {round_num + 1} ===")
36
+
37
+ # Alice speaks (coach asking questions/helping)
38
+ history_context = format_conversation_history(conversation)
39
+ alice_prompt = f"Your want to ask bob questions, help bob. Conversation history:\n{history_context}"
40
+
41
+ alice_result = alice.run_sync(alice_prompt)
42
+ alice_msg = alice_result.output
43
+ conversation.append(("Alice", alice_msg))
44
+ print(f"Alice: {alice_msg}")
45
+
46
+ # Bob responds with his problem
47
+ history_context = format_conversation_history(conversation)
48
+ bob_prompt = f"Respond to Alice with your problem. Conversation history:\n{history_context}"
49
+
50
+ bob_result = bob.run_sync(bob_prompt)
51
+ bob_msg = bob_result.output
52
+ conversation.append(("Bob", bob_msg))
53
+ print(f"Bob: {bob_msg}")
54
+
55
+ print()
56
+ input("Press Enter to continue...")
57
+
58
+ # Convert conversation to a string format for the critic
59
+ conversation_text = "\n\n".join([f"{speaker}: {message}" for speaker, message in conversation])
60
+
61
+ print("\n=== Critic Evaluation ===")
62
+ print("Getting evaluation from critic...")
63
+
64
+ # Third agent evaluates the conversation
65
+ critic_prompt = f"Please evaluate this conversation:\n\n{conversation_text}"
66
+
67
+ try:
68
+ # Try to get structured output by asking for JSON format
69
+ critic_result = critic.run_sync(critic_prompt)
70
+ critic_output = critic_result.output
71
+
72
+ # Try to parse the output as JSON
73
+ try:
74
+ if isinstance(critic_output, str):
75
+ # Try to extract JSON from the string if it contains JSON
76
+ import re
77
+ json_match = re.search(r'\{.*\}', critic_output, re.DOTALL)
78
+ if json_match:
79
+ score = json.loads(json_match.group())
80
+ else:
81
+ # If no JSON found, create a structured response
82
+ score = {
83
+ "insight": 7, # Default values
84
+ "civility": 8,
85
+ "comment": critic_output
86
+ }
87
+ else:
88
+ score = critic_output
89
+ except json.JSONDecodeError:
90
+ # If JSON parsing fails, create structured response
91
+ score = {
92
+ "insight": 7, # Default values
93
+ "civility": 8,
94
+ "comment": critic_output
95
+ }
96
+
97
+ except Exception as e:
98
+ print(f"Error getting critic evaluation: {e}")
99
+ score = {
100
+ "insight": 0,
101
+ "civility": 0,
102
+ "comment": f"Error occurred during evaluation: {str(e)}"
103
+ }
104
+
105
+ print(f"Score: {score}")
106
+
107
+ # Write score to file
108
+ try:
109
+ with open('score.json', 'w') as f:
110
+ json.dump(score, f, indent=2)
111
+ print("\nResults saved to score.json")
112
+ except Exception as e:
113
+ print(f"Error saving to file: {e}")
114
+
115
+ print("\nConversation completed!")
gradio_test.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def greet(name, intensity):
4
+ return "Hello, " + name + "!" * int(intensity)
5
+
6
+ demo = gr.Interface(
7
+ fn=greet,
8
+ inputs=["text", "slider"],
9
+ outputs=["text"],
10
+ )
11
+
12
+ demo.launch()
main.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ import marvin
2
+
3
+ print(marvin.run("Write a haiku about artificial intelligence"))
mar_gr.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file: marvin_gradio.py
2
+ import marvin
3
+ import gradio as gr
4
+
5
+ # 1. Define an agent (feel free to tweak name / persona)
6
+ assistant = marvin.Agent(
7
+ name="DocBot",
8
+ instructions="You are a concise, precise technical assistant."
9
+ )
10
+
11
+ # 2. A thread keeps context between turns -----------------
12
+ thread_holder: dict[str, marvin.Thread] = {}
13
+
14
+ def respond(user_msg: str, history):
15
+ # One Thread per browser tab (history is supplied by Gradio)
16
+ thread = thread_holder.setdefault("t", marvin.Thread())
17
+ with thread:
18
+ reply = assistant.run(user_msg) # sync call → str
19
+ return reply # Gradio expects a plain string
20
+
21
+ # 3. Spin up the UI ----------------------------------------------------
22
+ demo = gr.ChatInterface(
23
+ fn=respond,
24
+ title="Marvin + Gradio demo",
25
+ description="Ask me anything about your code, data or life."
26
+ )
27
+
28
+ if __name__ == "__main__":
29
+ demo.launch() # localhost:7860 by default
marvin_gradio_examples.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import marvin
2
+ import gradio as gr
3
+ import asyncio
4
+
5
+ # Example 1: Using Thread for context management
6
+ def example_with_thread():
7
+ # Define an agent
8
+ assistant = marvin.Agent(
9
+ name="DocBot",
10
+ instructions="You are a concise, precise technical assistant."
11
+ )
12
+
13
+ # Thread holder for context management
14
+ thread_holder: dict[str, marvin.Thread] = {}
15
+
16
+ def respond(user_msg: str, history):
17
+ # One Thread per browser tab
18
+ thread = thread_holder.setdefault("t", marvin.Thread())
19
+ with thread:
20
+ # Use marvin.run() to ensure string output
21
+ reply = marvin.run(user_msg, agents=[assistant])
22
+ return str(reply) # Ensure string output
23
+
24
+ # Create the interface
25
+ demo = gr.ChatInterface(
26
+ fn=respond,
27
+ title="Marvin + Gradio (Thread Example)",
28
+ description="Ask me anything about your code, data or life."
29
+ )
30
+ return demo
31
+
32
+ # Example 2: Using async/await (Marvin v2 style)
33
+ def example_with_async():
34
+ # Define an agent
35
+ bot = marvin.Agent(
36
+ name="Jokester",
37
+ instructions="Tell witty one-liners."
38
+ )
39
+
40
+ async def talk(msg, history):
41
+ try:
42
+ # Use marvin.run() with async to ensure string output
43
+ reply = await marvin.run(msg, agents=[bot])
44
+ return str(reply) # Ensure string output
45
+ except Exception as e:
46
+ print(f"Error in talk function: {e}")
47
+ return f"Sorry, I encountered an error: {str(e)}"
48
+
49
+ # Create the interface with proper queue configuration
50
+ demo = gr.ChatInterface(
51
+ fn=talk,
52
+ title="Marvin + Gradio (Async Example)",
53
+ description="I'll tell you some jokes!"
54
+ )
55
+
56
+ # Configure queue with proper settings
57
+ demo.queue(max_size=1) # Limit queue size to 1
58
+ return demo
59
+
60
+ if __name__ == "__main__":
61
+ # Choose which example to run
62
+ demo = example_with_thread() # or example_with_async()
63
+ # demo = example_with_async()
64
+
65
+ # Launch with proper async support
66
+ demo.launch(
67
+ server_name="0.0.0.0",
68
+ server_port=7860,
69
+ share=False,
70
+ debug=True
71
+ )
marvin_test.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import marvin
2
+ import json
3
+
4
+ # define two speakers
5
+ alice = marvin.Agent(name="Alice", instructions="You are a coach, speak like coach, start with question if no convo history ; you speak very concicely, one sentence at a time")
6
+ bob = marvin.Agent(name="Bob", instructions="You have a problem with your kids. and speak very concicely, one sentence at a time")
7
+
8
+ # simple ping-pong loop
9
+ conversation = []
10
+ for _ in range(3):
11
+ alice_msg = marvin.run("Your want to ask bob questions, help bob", agents=[alice], context={"history": conversation})
12
+ conversation.append(("Alice", alice_msg))
13
+
14
+ bob_msg = marvin.run("Respond to Alice with your problem", agents=[bob], context={"history": conversation})
15
+ conversation.append(("Bob", bob_msg))
16
+ print(conversation)
17
+ input("Press Enter to continue...")
18
+
19
+ # Convert conversation to a string format for the critic
20
+ conversation_text = "\n\n".join([f"{speaker}: {message}" for speaker, message in conversation])
21
+
22
+ # third agent evaluates
23
+ critic = marvin.Agent(
24
+ name="Critic",
25
+ instructions=(
26
+ "Score the dialogue on insight (0-10) and civility (0-10); "
27
+ "return JSON with fields 'insight', 'civility', 'comment'."
28
+ )
29
+ )
30
+ score = marvin.run(f"Please evaluate this conversation:\n\n{conversation_text}", agents=[critic], result_type=dict)
31
+ print(score)
32
+
33
+
34
+ # Write score to file
35
+ with open('score.json', 'w') as f:
36
+ json.dump(score, f, indent=2)
37
+
open2.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ client = OpenAI()
3
+
4
+ response = client.chat.completions.create(
5
+ model="gpt-4.1-nano",
6
+ messages=[
7
+ {
8
+ "role": "user",
9
+ "content": [
10
+ {
11
+ "type": "text",
12
+ "text": "what is weather now"
13
+ }
14
+ ]
15
+ }
16
+ ],
17
+ response_format={
18
+ "type": "text"
19
+ },
20
+ temperature=1,
21
+ max_completion_tokens=2048,
22
+ top_p=1,
23
+ frequency_penalty=0,
24
+ presence_penalty=0
25
+ )
26
+
27
+ print(response)
pa.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_ai import Agent
2
+
3
+ agent = Agent(
4
+ 'openai:gpt-4o',
5
+ system_prompt='Be concise, reply with one sentence.',
6
+ )
7
+
8
+ result = agent.run_sync('Where does "hello world" come from?')
9
+ print(result.output)
10
+ """
11
+ The first known use of "hello, world" was in a 1974 textbook about the C programming language.
12
+ """
pa_fixed.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_ai import Agent
2
+
3
+ agent = Agent(
4
+ 'openai:gpt-4o', # Fixed model name
5
+ system_prompt='Be concise, reply with one sentence.',
6
+ )
7
+
8
+ result = agent.run_sync('Where does "hello world" come from?')
9
+ print(result.output)
pa_simple_proxy.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import httpx
3
+ from pydantic_ai import Agent
4
+
5
+ # Ensure httpx uses the system proxy
6
+ # httpx should automatically pick up proxy from environment variables
7
+ # but let's make sure by setting trust_env=True
8
+
9
+ print(f"System proxy settings:")
10
+ print(f" http_proxy: {os.environ.get('http_proxy', 'Not set')}")
11
+ print(f" https_proxy: {os.environ.get('https_proxy', 'Not set')}")
12
+
13
+ # The issue might be that pydantic_ai creates its own httpx client
14
+ # Let's try with the original simple approach
15
+ agent = Agent(
16
+ 'openai:gpt-4o',
17
+ system_prompt='Be concise, reply with one sentence.',
18
+ )
19
+
20
+ print("\nRunning agent (httpx should use system proxy automatically)...")
21
+ try:
22
+ result = agent.run_sync('Where does "hello world" come from?')
23
+ print(f"Response: {result.output}")
24
+ except Exception as e:
25
+ print(f"Error: {type(e).__name__}: {e}")
26
+
27
+ # Try to provide more debugging info
28
+ print("\nDebugging info:")
29
+ print("- Make sure your proxy is running on 127.0.0.1:7890")
30
+ print("- The proxy needs to support HTTPS connections")
31
+ print("- Check if the proxy requires authentication")
pa_simple_test.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from pydantic_ai import Agent
3
+ import os
4
+
5
+ # Set a longer timeout via environment variable if possible
6
+ os.environ['HTTPX_TIMEOUT'] = '60'
7
+
8
+ async def test_agent():
9
+ try:
10
+ agent = Agent(
11
+ 'openai:gpt-4o',
12
+ system_prompt='Be concise, reply with one sentence.',
13
+ )
14
+
15
+ print("Running agent asynchronously...")
16
+ result = await agent.run('Where does "hello world" come from?')
17
+ print(f"Response: {result.output}")
18
+ return result
19
+ except Exception as e:
20
+ print(f"Error: {type(e).__name__}: {e}")
21
+ return None
22
+
23
+ # Run the async function
24
+ result = asyncio.run(test_agent())
pa_with_client.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_ai import Agent
2
+ from pydantic_ai.models.openai import OpenAIModel
3
+ from openai import AsyncOpenAI
4
+ import httpx
5
+ import asyncio
6
+
7
+ # Create a custom HTTP client with longer timeout
8
+ http_client = httpx.AsyncClient(timeout=httpx.Timeout(60.0))
9
+
10
+ # Create OpenAI client with custom timeout
11
+ openai_client = AsyncOpenAI(
12
+ http_client=http_client,
13
+ timeout=60.0
14
+ )
15
+
16
+ # Create the model with the custom client
17
+ model = OpenAIModel('gpt-4o', openai_client=openai_client)
18
+
19
+ # Create agent with the custom model
20
+ agent = Agent(
21
+ model=model,
22
+ system_prompt='Be concise, reply with one sentence.',
23
+ )
24
+
25
+ print("Running agent with custom timeout...")
26
+ result = agent.run_sync('Where does "hello world" come from?')
27
+ print(f"Response: {result.output}")
pa_with_proxy.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import httpx
3
+ from pydantic_ai import Agent
4
+ from pydantic_ai.models.openai import OpenAIModel
5
+ from openai import AsyncOpenAI
6
+
7
+ # Get proxy from environment
8
+ proxy = os.environ.get('https_proxy', 'http://127.0.0.1:7890')
9
+ print(f"Using proxy: {proxy}")
10
+
11
+ # Create httpx client with proxy
12
+ http_client = httpx.AsyncClient(
13
+ proxy=proxy,
14
+ timeout=httpx.Timeout(30.0)
15
+ )
16
+
17
+ # Create OpenAI client with the custom http client
18
+ openai_client = AsyncOpenAI(
19
+ http_client=http_client
20
+ )
21
+
22
+ # Create the model
23
+ model = OpenAIModel('gpt-4o', openai_client)
24
+
25
+ # Create agent
26
+ agent = Agent(
27
+ model=model,
28
+ system_prompt='Be concise, reply with one sentence.',
29
+ )
30
+
31
+ print("Running agent with proxy configuration...")
32
+ try:
33
+ result = agent.run_sync('Where does "hello world" come from?')
34
+ print(f"Response: {result.output}")
35
+ except Exception as e:
36
+ print(f"Error: {type(e).__name__}: {e}")
pa_with_timeout.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pydantic_ai import Agent
3
+ import httpx
4
+
5
+ # Create a custom HTTP client with longer timeout
6
+ http_client = httpx.AsyncClient(timeout=httpx.Timeout(60.0)) # 60 second timeout
7
+
8
+ try:
9
+ agent = Agent(
10
+ 'openai:gpt-4o',
11
+ system_prompt='Be concise, reply with one sentence.',
12
+ http_client=http_client
13
+ )
14
+
15
+ print("Running agent with question: 'Where does \"hello world\" come from?'")
16
+ result = agent.run_sync('Where does "hello world" come from?')
17
+ print(f"Response: {result.output}")
18
+
19
+ except Exception as e:
20
+ print(f"Error occurred: {type(e).__name__}: {e}")
21
+ print("\nPossible causes:")
22
+ print("1. Network connectivity issues")
23
+ print("2. Invalid API key")
24
+ print("3. Rate limiting")
25
+ print("4. Firewall/proxy blocking the connection")
26
+
27
+ # Check if API key is set
28
+ api_key = os.environ.get('OPENAI_API_KEY')
29
+ if api_key:
30
+ print(f"\nAPI key is set (starts with: {api_key[:10]}...)")
31
+ else:
32
+ print("\nNo OPENAI_API_KEY environment variable found!")
pydantic_gradio.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_ai import Agent
2
+ import gradio as gr
3
+
4
+ # Create a Pydantic AI agent
5
+ agent = Agent(
6
+ 'openai:gpt-4o', # or your preferred model
7
+ system_prompt='You are a helpful assistant. Be concise and clear in your responses.'
8
+ )
9
+
10
+ def respond(message: str, history: list):
11
+ """
12
+ Handle chat messages using Pydantic AI agent.
13
+
14
+ Args:
15
+ message: The user's input message
16
+ history: List of previous message pairs
17
+
18
+ Returns:
19
+ The agent's response
20
+ """
21
+ # Convert history to context if needed
22
+ context = {"history": history} if history else {}
23
+
24
+ # Get response from agent
25
+ result = agent.run_sync(message, context=context)
26
+
27
+ return result.output
28
+
29
+ # Create the Gradio interface
30
+ demo = gr.ChatInterface(
31
+ fn=respond,
32
+ title="Pydantic AI Chat",
33
+ description="Chat with a Pydantic AI-powered assistant."
34
+ )
35
+
36
+ if __name__ == "__main__":
37
+ demo.launch()
pyproject.toml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "uv-pg"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.13"
7
+ dependencies = [
8
+ "gradio>=5.33.0",
9
+ "httpx>=0.28.1",
10
+ "marvin>=3.1.1",
11
+ "pydantic-ai>=0.2.15",
12
+ ]
score.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "insight": 7,
3
+ "civility": 10,
4
+ "comment": "The conversation provides practical advice on creating a structured routine to balance homework and playtime. Alice offers a constructive suggestion, and Bob appreciates and accepts it. The dialogue is civil, respectful, and helpful, with a focus on problem-solving. However, the conversation could delve deeper into more detailed strategies or examples for setting routines or managing children's time effectively, which would enhance the level of insight."
5
+ }
test_openai_direct.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ import httpx
4
+
5
+ # Test direct OpenAI API connection
6
+ api_key = os.environ.get('OPENAI_API_KEY')
7
+ if not api_key:
8
+ print("No OPENAI_API_KEY environment variable found!")
9
+ exit(1)
10
+
11
+ print(f"Using API key: {api_key[:20]}...")
12
+
13
+ try:
14
+ # Create client with custom timeout
15
+ client = OpenAI(
16
+ api_key=api_key,
17
+ timeout=30.0, # 30 second timeout
18
+ )
19
+
20
+ print("Testing OpenAI API connection...")
21
+
22
+ # Try a simple completion
23
+ response = client.chat.completions.create(
24
+ model="gpt-4o",
25
+ messages=[
26
+ {"role": "system", "content": "You are a helpful assistant. Be concise."},
27
+ {"role": "user", "content": "Where does 'hello world' come from?"}
28
+ ],
29
+ max_tokens=100
30
+ )
31
+
32
+ print(f"Success! Response: {response.choices[0].message.content}")
33
+
34
+ except Exception as e:
35
+ print(f"Error: {type(e).__name__}: {e}")
36
+
37
+ # Try to get more details
38
+ if hasattr(e, 'response'):
39
+ print(f"Response status: {getattr(e.response, 'status_code', 'N/A')}")
40
+ print(f"Response text: {getattr(e.response, 'text', 'N/A')}")
uv.lock ADDED
The diff for this file is too large to render. See raw diff