File size: 3,255 Bytes
1221662
 
 
 
05245c5
1221662
 
05245c5
 
 
 
1221662
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from genmi_method import History, get_answer, get_answer_generator, get_answer_single, get_answers_parallel

import google.generativeai as genai
from google.generativeai import types
import os

# Configure the API
GOOGLE_API_KEY = os.getenv('Gemini_API_key')
if not GOOGLE_API_KEY:
    raise ValueError("GOOGLE_API_KEY environment variable is not set")
genai.configure(api_key=GOOGLE_API_KEY)
# Initialize the model
model = genai.GenerativeModel('gemini-2.5-flash')

def test_basic_response():
    print("\n=== Testing Basic Response ===")
    system_prompt = "You are a helpful teaching assistant."
    user_prompt = "What are the key principles of effective teaching?"
    full_prompt = f"{system_prompt}\n\n{user_prompt}"
    
    response = model.generate_content(full_prompt)
    print("\nResponse:", response.text)

def test_streaming_response():
    print("\n=== Testing Streaming Response ===")
    system_prompt = "You are a helpful teaching assistant."
    user_prompt = "Explain the concept of active learning in 3 points."
    full_prompt = f"{system_prompt}\n\n{user_prompt}"
    
    print("Streaming response:")
    response = model.generate_content(full_prompt, stream=True)
    for chunk in response:
        print(chunk.text, end="")

def test_single_response():
    print("\n=== Testing Single Response ===")
    system_prompt = "You are a helpful teaching assistant."
    user_prompt = "What is the difference between formative and summative assessment?"
    full_prompt = f"{system_prompt}\n\n{user_prompt}"
    
    response = model.generate_content(full_prompt)
    print("\nResponse:", response.text)

def test_parallel_responses():
    print("\n=== Testing Parallel Responses ===")
    system_prompt = "You are a helpful teaching assistant."
    user_prompt = "Give me 3 tips for classroom management."
    full_prompt = f"{system_prompt}\n\n{user_prompt}"
    
    responses = []
    for _ in range(2):
        response = model.generate_content(full_prompt)
        responses.append(response.text)
    
    print("\nResponses:")
    for i, response in enumerate(responses, 1):
        print(f"\nResponse {i}:", response)

def test_conversation_history():
    print("\n=== Testing Conversation History ===")
    system_prompt = "You are a helpful teaching assistant."
    
    # First message
    user_prompt1 = "What is the importance of lesson planning?"
    full_prompt1 = f"{system_prompt}\n\n{user_prompt1}"
    response1 = model.generate_content(full_prompt1)
    print("\nFirst response:", response1.text)
    
    # Follow-up message with context
    user_prompt2 = "How can I improve my lesson planning based on your previous answer?"
    full_prompt2 = f"{system_prompt}\n\nPrevious context: {response1.text}\n\n{user_prompt2}"
    response2 = model.generate_content(full_prompt2)
    print("\nFollow-up response:", response2.text)

if __name__ == "__main__":
    print("Starting Gemini API Tests...")
    
    try:
        test_basic_response()
        test_streaming_response()
        test_single_response()
        test_parallel_responses()
        test_conversation_history()
        
        print("\nAll tests completed successfully!")
    except Exception as e:
        print(f"\nError during testing: {str(e)}")