Spaces:
Sleeping
Sleeping
| import requests | |
| import json | |
| from typing import List, Dict, Any | |
| # Endpoint URL (assuming the FastAPI server is running locally on port 7860) | |
| BASE_URL = "http://localhost:7860" | |
| def create_sample_transactions(num_transactions: int = 3) -> List[Dict[str, Any]]: | |
| """ | |
| Generate sample transaction data for testing the /llm-analyse endpoint. | |
| Includes all 22 required fields: fraud_score, STATUS, cc_num, merchant, category, | |
| amt, gender, state, zip, lat, long, city_pop, job, unix_time, merch_lat, | |
| merch_long, is_fraud, age, trans_hour, trans_day, trans_month, trans_weekday, distance. | |
| """ | |
| samples = [] | |
| for i in range(num_transactions): | |
| transaction = { | |
| "fraud_score": round(10 + (i * 20), 2), # Vary fraud_score: 10, 30, 50 for example | |
| "STATUS": "approved" if i < 2 else "declined", # Mix statuses | |
| "cc_num": 4532015112830366 + i, # Fake CC numbers | |
| "merchant": f"merchant_{i+1}", | |
| "category": ["gas", "grocery", "entertainment"][i % 3], | |
| "amt": round(50 + (i * 100), 2), # Increasing amounts: 50, 150, 250 | |
| "gender": "F" if i % 2 == 0 else "M", | |
| "state": ["NY", "CA", "TX"][i % 3], | |
| "zip": 10001 + i * 100, | |
| "lat": 40.7128 + (i * 0.1), | |
| "long": -74.0060 + (i * 0.1), | |
| "city_pop": 8000000 - (i * 1000000), | |
| "job": ["Lawyer", "Doctor", "Engineer"][i % 3], | |
| "unix_time": 1640995200 + (i * 3600), # Sequential hours | |
| "merch_lat": 40.7589 + (i * 0.05), | |
| "merch_long": -73.9851 + (i * 0.05), | |
| "is_fraud": 0 if i < 2 else 1, | |
| "age": 30 + i * 5, | |
| "trans_hour": (12 + i) % 24, | |
| "trans_day": i + 1, | |
| "trans_month": 12, | |
| "trans_weekday": (i % 7) + 1, | |
| "distance": round(5 + (i * 10), 2) # Increasing distance | |
| } | |
| samples.append(transaction) | |
| return samples | |
| def test_llm_analyse(): | |
| """ | |
| Test the /llm-analyse endpoint by sending sample transactions and printing the response. | |
| """ | |
| endpoint = f"{BASE_URL}/llm-analyse" | |
| # Prepare payload | |
| payload = { | |
| "transactions": create_sample_transactions(3) | |
| } | |
| print("π€ Sending request to /llm-analyse...") | |
| print(json.dumps(payload, indent=2)) | |
| print("-" * 50) | |
| try: | |
| response = requests.post(endpoint, json=payload) | |
| response.raise_for_status() # Raise an HTTPError for bad responses | |
| result = response.json() | |
| print("β Response received:") | |
| print(json.dumps(result, indent=2)) | |
| # Additional checks | |
| if "fraud_score" in result and "explanation" in result: | |
| fraud_score = result["fraud_score"] | |
| explanation = result["explanation"] | |
| print(f"\nπ Overall Fraud Score: {fraud_score} ({fraud_score * 100:.1f}%)") | |
| print(f"π‘ Explanation: {explanation}") | |
| # Simple categorization | |
| if fraud_score < 0.5: | |
| print("π’ Assessment: Good (Low Risk)") | |
| elif 0.5 <= fraud_score <= 0.6: | |
| print("π‘ Assessment: Uncertain") | |
| else: | |
| print("π΄ Assessment: Suspicious/Critical") | |
| else: | |
| print("β οΈ Unexpected response format.") | |
| except requests.exceptions.RequestException as e: | |
| print(f"β Request failed: {e}") | |
| if hasattr(e.response, 'text'): | |
| print(f"Server response: {e.response.text}") | |
| except json.JSONDecodeError as e: | |
| print(f"β Failed to parse JSON response: {e}") | |
| print(f"Raw response: {response.text}") | |
| if __name__ == "__main__": | |
| # Run the test | |
| test_llm_analyse() |