nusaibah0110's picture
Add comprehensive LLM endpoints and enhance FastAPI backend
26c7cba
"""
Test script for Pathora Colposcopy API endpoints
Demonstrates how to use both AI model and LLM endpoints
"""
import requests
import json
import base64
from pathlib import Path
# API Configuration
BASE_URL = "http://localhost:8000" # Change to your deployment URL
API_KEY = "your_gemini_api_key_here" # For local testing
def test_health_check():
"""Test the health check endpoint"""
print("=" * 60)
print("Testing Health Check Endpoint")
print("=" * 60)
response = requests.get(f"{BASE_URL}/health")
print(f"Status Code: {response.status_code}")
print(f"Response: {json.dumps(response.json(), indent=2)}")
print()
def test_acetowhite_detection(image_path: str):
"""Test acetowhite contour detection"""
print("=" * 60)
print("Testing Acetowhite Contour Detection")
print("=" * 60)
with open(image_path, 'rb') as f:
files = {'file': f}
data = {'conf_threshold': 0.5}
response = requests.post(
f"{BASE_URL}/api/infer-aw-contour",
files=files,
data=data
)
print(f"Status Code: {response.status_code}")
result = response.json()
# Print without base64 image for readability
print(f"Status: {result.get('status')}")
print(f"Detections: {result.get('detections')}")
print(f"Contours: {len(result.get('contours', []))}")
print(f"Confidence Threshold: {result.get('confidence_threshold')}")
# Save result image if available
if result.get('result_image'):
output_path = "test_output_aw.png"
img_data = base64.b64decode(result['result_image'])
with open(output_path, 'wb') as f:
f.write(img_data)
print(f"Result image saved to: {output_path}")
print()
def test_cervix_detection(image_path: str):
"""Test cervix bounding box detection"""
print("=" * 60)
print("Testing Cervix Bounding Box Detection")
print("=" * 60)
with open(image_path, 'rb') as f:
files = {'file': f}
data = {'conf_threshold': 0.4}
response = requests.post(
f"{BASE_URL}/api/infer-cervix-bbox",
files=files,
data=data
)
print(f"Status Code: {response.status_code}")
result = response.json()
print(f"Status: {result.get('status')}")
print(f"Detections: {result.get('detections')}")
print(f"Bounding Boxes: {json.dumps(result.get('bounding_boxes', []), indent=2)}")
# Save result image if available
if result.get('result_image'):
output_path = "test_output_cervix.png"
img_data = base64.b64decode(result['result_image'])
with open(output_path, 'wb') as f:
f.write(img_data)
print(f"Result image saved to: {output_path}")
print()
def test_batch_inference(image_paths: list):
"""Test batch inference on multiple images"""
print("=" * 60)
print("Testing Batch Inference")
print("=" * 60)
files = [('files', open(img, 'rb')) for img in image_paths]
data = {'conf_threshold': 0.5}
response = requests.post(
f"{BASE_URL}/api/batch-infer",
files=files,
data=data
)
# Close file handles
for _, f in files:
f.close()
print(f"Status Code: {response.status_code}")
result = response.json()
print(f"Status: {result.get('status')}")
print(f"Total Files: {result.get('total_files')}")
for i, res in enumerate(result.get('results', [])):
print(f"\nImage {i+1}: {res.get('filename')}")
print(f" Status: {res.get('status')}")
print(f" Detections: {res.get('detections')}")
print()
def test_chat():
"""Test LLM chat endpoint"""
print("=" * 60)
print("Testing Chat Endpoint")
print("=" * 60)
payload = {
"message": "What are the typical signs of a high-grade squamous intraepithelial lesion (HSIL) on colposcopy?",
"history": []
}
response = requests.post(
f"{BASE_URL}/api/chat",
json=payload
)
print(f"Status Code: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"Status: {result.get('status')}")
print(f"Model: {result.get('model')}")
print(f"Response:\n{result.get('response')}")
else:
print(f"Error: {response.json()}")
print()
def test_chat_with_history():
"""Test chat with conversation history"""
print("=" * 60)
print("Testing Chat with History")
print("=" * 60)
payload = {
"message": "What about low-grade lesions?",
"history": [
{
"role": "user",
"text": "What are high-grade lesions?"
},
{
"role": "bot",
"text": "High-grade lesions (HSIL) show dense acetowhite epithelium, coarse punctation, and sharp borders."
}
]
}
response = requests.post(
f"{BASE_URL}/api/chat",
json=payload
)
print(f"Status Code: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"Response:\n{result.get('response')}")
else:
print(f"Error: {response.json()}")
print()
def test_report_generation():
"""Test report generation endpoint"""
print("=" * 60)
print("Testing Report Generation")
print("=" * 60)
payload = {
"patient_data": {
"age": 35,
"gravida": 2,
"para": 2,
"lmp": "2024-02-01",
"indication": "Abnormal Pap smear - ASCUS",
"menstrual_status": "Regular"
},
"exam_findings": {
"native": {
"cervix_visible": True,
"transformation_zone": "Type 1 (fully visible)",
"ectropion": "Mild",
"discharge": "None"
},
"acetic_acid": {
"acetowhite_lesions": True,
"location": "6-9 o'clock position",
"density": "Dense white",
"borders": "Sharp, well-defined",
"size": "Moderate (covering 2 quadrants)"
},
"green_filter": {
"vascular_patterns": "Coarse punctation",
"mosaic": "Present",
"atypical_vessels": "None"
},
"lugol": {
"iodine_uptake": "Partial iodine negative area",
"pattern": "Corresponds to acetowhite area"
}
}
}
response = requests.post(
f"{BASE_URL}/api/generate-report",
json=payload
)
print(f"Status Code: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"Status: {result.get('status')}")
print(f"Model: {result.get('model')}")
print(f"\nGenerated Report:\n{'-' * 60}")
print(result.get('report'))
print('-' * 60)
else:
print(f"Error: {response.json()}")
print()
def main():
"""Run all tests"""
print("\n" + "=" * 60)
print("PATHORA COLPOSCOPY API TEST SUITE")
print("=" * 60 + "\n")
# Test health check
test_health_check()
# Test AI model endpoints (you'll need to provide actual image paths)
# Uncomment and add your image paths:
# test_acetowhite_detection("path/to/your/image.jpg")
# test_cervix_detection("path/to/your/image.jpg")
# test_batch_inference(["image1.jpg", "image2.jpg"])
# Test LLM endpoints
test_chat()
test_chat_with_history()
test_report_generation()
print("\n" + "=" * 60)
print("ALL TESTS COMPLETED")
print("=" * 60 + "\n")
if __name__ == "__main__":
# Check if requests is installed
try:
import requests
except ImportError:
print("Please install requests: pip install requests")
exit(1)
main()