TRuCAL / tests /test_ollama.py
johnaugustine's picture
Upload 24 files
e2847fd verified
"""
Test script to verify Ollama integration.
"""
from components.llm_integration import CustomLLMResponder
def test_ollama():
# Initialize the LLM with Ollama
print("Initializing LLM with Ollama...")
llm = CustomLLMResponder(
use_ollama=True,
ollama_model="llama2", # Using llama2 which has a smaller memory footprint
ollama_base_url="http://localhost:11434"
)
# Test prompt
test_prompt = """
Analyze the following ethical dilemma from multiple perspectives:
A self-driving car must choose between hitting a pedestrian crossing illegally or swerving and risking the passenger's life.
Please provide a detailed ethical analysis considering different frameworks.
"""
print("\nSending request to Ollama...")
# Use smaller values for max_length to reduce memory usage
response = llm.generate(test_prompt, max_length=100, temperature=0.7)
print("\nResponse from Ollama:")
print("-" * 80)
print(response)
print("-" * 80)
if __name__ == "__main__":
test_ollama()