arfox-ai / client.py
exorcist123's picture
feat: Add application code and models via Git LFS
c072ec7
import requests
import pprint
# The URL where your local FastAPI server is running
API_URL = "http://127.0.0.1:8000/run-prediction-batch"
def trigger_prediction_job():
"""
Sends a POST request to the API to start the prediction batch job.
"""
print(f"🚀 Sending request to API endpoint: {API_URL}")
try:
# The request doesn't need a body for this endpoint
response = requests.post(API_URL, timeout=300) # 5-minute timeout
# Raise an exception if the request returned an unsuccessful status code
response.raise_for_status()
print("\n✅ API request successful!")
print("--- API Response ---")
# Pretty-print the JSON response from the server
pprint.pprint(response.json())
except requests.exceptions.HTTPError as http_err:
print(f"❌ HTTP error occurred: {http_err}")
print(f"Response Body: {response.text}")
except requests.exceptions.RequestException as req_err:
print(f"❌ A critical request error occurred: {req_err}")
print("Is the FastAPI server running? Start it with 'uvicorn main:app --reload'")
except Exception as e:
print(f"❌ An unexpected error occurred: {e}")
if __name__ == "__main__":
# How to use:
# 1. In one terminal, run your FastAPI server: uvicorn main:app --reload
# 2. In a second terminal (with venv activated), run this script: python client.py
trigger_prediction_job()