File size: 1,512 Bytes
c072ec7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import requests
import pprint
# The URL where your local FastAPI server is running
API_URL = "http://127.0.0.1:8000/run-prediction-batch"
def trigger_prediction_job():
"""
Sends a POST request to the API to start the prediction batch job.
"""
print(f"🚀 Sending request to API endpoint: {API_URL}")
try:
# The request doesn't need a body for this endpoint
response = requests.post(API_URL, timeout=300) # 5-minute timeout
# Raise an exception if the request returned an unsuccessful status code
response.raise_for_status()
print("\n✅ API request successful!")
print("--- API Response ---")
# Pretty-print the JSON response from the server
pprint.pprint(response.json())
except requests.exceptions.HTTPError as http_err:
print(f"❌ HTTP error occurred: {http_err}")
print(f"Response Body: {response.text}")
except requests.exceptions.RequestException as req_err:
print(f"❌ A critical request error occurred: {req_err}")
print("Is the FastAPI server running? Start it with 'uvicorn main:app --reload'")
except Exception as e:
print(f"❌ An unexpected error occurred: {e}")
if __name__ == "__main__":
# How to use:
# 1. In one terminal, run your FastAPI server: uvicorn main:app --reload
# 2. In a second terminal (with venv activated), run this script: python client.py
trigger_prediction_job()
|