Spaces:
Sleeping
Sleeping
| from genmi_method import History, get_answer, get_answer_generator, get_answer_single, get_answers_parallel | |
| import google.generativeai as genai | |
| from google.generativeai import types | |
| import os | |
| # Configure the API | |
| GOOGLE_API_KEY = os.getenv('Gemini_API_key') | |
| if not GOOGLE_API_KEY: | |
| raise ValueError("GOOGLE_API_KEY environment variable is not set") | |
| genai.configure(api_key=GOOGLE_API_KEY) | |
| # Initialize the model | |
| model = genai.GenerativeModel('gemini-2.5-flash') | |
| def test_basic_response(): | |
| print("\n=== Testing Basic Response ===") | |
| system_prompt = "You are a helpful teaching assistant." | |
| user_prompt = "What are the key principles of effective teaching?" | |
| full_prompt = f"{system_prompt}\n\n{user_prompt}" | |
| response = model.generate_content(full_prompt) | |
| print("\nResponse:", response.text) | |
| def test_streaming_response(): | |
| print("\n=== Testing Streaming Response ===") | |
| system_prompt = "You are a helpful teaching assistant." | |
| user_prompt = "Explain the concept of active learning in 3 points." | |
| full_prompt = f"{system_prompt}\n\n{user_prompt}" | |
| print("Streaming response:") | |
| response = model.generate_content(full_prompt, stream=True) | |
| for chunk in response: | |
| print(chunk.text, end="") | |
| def test_single_response(): | |
| print("\n=== Testing Single Response ===") | |
| system_prompt = "You are a helpful teaching assistant." | |
| user_prompt = "What is the difference between formative and summative assessment?" | |
| full_prompt = f"{system_prompt}\n\n{user_prompt}" | |
| response = model.generate_content(full_prompt) | |
| print("\nResponse:", response.text) | |
| def test_parallel_responses(): | |
| print("\n=== Testing Parallel Responses ===") | |
| system_prompt = "You are a helpful teaching assistant." | |
| user_prompt = "Give me 3 tips for classroom management." | |
| full_prompt = f"{system_prompt}\n\n{user_prompt}" | |
| responses = [] | |
| for _ in range(2): | |
| response = model.generate_content(full_prompt) | |
| responses.append(response.text) | |
| print("\nResponses:") | |
| for i, response in enumerate(responses, 1): | |
| print(f"\nResponse {i}:", response) | |
| def test_conversation_history(): | |
| print("\n=== Testing Conversation History ===") | |
| system_prompt = "You are a helpful teaching assistant." | |
| # First message | |
| user_prompt1 = "What is the importance of lesson planning?" | |
| full_prompt1 = f"{system_prompt}\n\n{user_prompt1}" | |
| response1 = model.generate_content(full_prompt1) | |
| print("\nFirst response:", response1.text) | |
| # Follow-up message with context | |
| user_prompt2 = "How can I improve my lesson planning based on your previous answer?" | |
| full_prompt2 = f"{system_prompt}\n\nPrevious context: {response1.text}\n\n{user_prompt2}" | |
| response2 = model.generate_content(full_prompt2) | |
| print("\nFollow-up response:", response2.text) | |
| if __name__ == "__main__": | |
| print("Starting Gemini API Tests...") | |
| try: | |
| test_basic_response() | |
| test_streaming_response() | |
| test_single_response() | |
| test_parallel_responses() | |
| test_conversation_history() | |
| print("\nAll tests completed successfully!") | |
| except Exception as e: | |
| print(f"\nError during testing: {str(e)}") |