surfiniaburger commited on
Commit
215dd01
·
1 Parent(s): f3aebb2

Setup Monte Carlo MCP Server with Git LFS

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +34 -0
  3. README.md +11 -4
  4. mcp_server/README.md +71 -0
  5. mcp_server/__pycache__/gcs_utils.cpython-313.pyc +0 -0
  6. mcp_server/__pycache__/main.cpython-313.pyc +0 -0
  7. mcp_server/__pycache__/monte_carlo_simulation.cpython-313.pyc +0 -0
  8. mcp_server/data_handling/__pycache__/data_downloader.cpython-313.pyc +0 -0
  9. mcp_server/data_handling/__pycache__/data_loader.cpython-313.pyc +0 -0
  10. mcp_server/data_handling/__pycache__/telemetry_parser.cpython-313.pyc +0 -0
  11. mcp_server/data_handling/data_downloader.py +34 -0
  12. mcp_server/data_handling/data_loader.py +28 -0
  13. mcp_server/data_handling/data_stream_simulator.py +44 -0
  14. mcp_server/data_handling/telemetry_parser.py +81 -0
  15. mcp_server/gcs_utils.py +44 -0
  16. mcp_server/main.py +103 -0
  17. mcp_server/mcp_server.egg-info/PKG-INFO +13 -0
  18. mcp_server/mcp_server.egg-info/SOURCES.txt +18 -0
  19. mcp_server/mcp_server.egg-info/dependency_links.txt +0 -0
  20. mcp_server/mcp_server.egg-info/requires.txt +9 -0
  21. mcp_server/mcp_server.egg-info/top_level.txt +4 -0
  22. mcp_server/mock_request.py +57 -0
  23. mcp_server/models/__pycache__/fuel_consumption_model.cpython-313.pyc +0 -0
  24. mcp_server/models/__pycache__/pace_prediction_model.cpython-313.pyc +0 -0
  25. mcp_server/models/__pycache__/tire_degradation_model.cpython-313.pyc +0 -0
  26. mcp_server/models/fuel_consumption_model.py +48 -0
  27. mcp_server/models/pace_prediction_model.py +51 -0
  28. mcp_server/models/tire_degradation_model.py +49 -0
  29. mcp_server/models/train_models.py +48 -0
  30. mcp_server/monte_carlo_simulation.py +187 -0
  31. mcp_server/pipeline/__pycache__/pipeline.cpython-313.pyc +0 -0
  32. mcp_server/pipeline/evaluator.py +49 -0
  33. mcp_server/pipeline/module.py +127 -0
  34. mcp_server/pipeline/pipeline.py +112 -0
  35. mcp_server/prepare_data.py +45 -0
  36. mcp_server/pyproject.toml +27 -0
  37. mcp_server/requirements.txt +7 -0
  38. mcp_server/run_server_local.py +106 -0
  39. mcp_server/tests/test_prepare_data.py +66 -0
  40. mcp_server/tests/test_trainer_module.py +79 -0
  41. mcp_server/trained_models/fuel_consumption_model.pkl +3 -0
  42. mcp_server/trained_models/pace_prediction_model.pkl +3 -0
  43. mcp_server/trained_models/tire_degradation_model.pkl +3 -0
  44. mcp_server/unzipped_data/barber-motorsports-park/barber/.DS_Store +0 -0
  45. mcp_server/unzipped_data/barber-motorsports-park/barber/03_Provisional Results_Race 1_Anonymized.CSV +3 -0
  46. mcp_server/unzipped_data/barber-motorsports-park/barber/03_Provisional Results_Race 2_Anonymized.CSV +3 -0
  47. mcp_server/unzipped_data/barber-motorsports-park/barber/03_Results GR Cup Race 2 Official_Anonymized.CSV +3 -0
  48. mcp_server/unzipped_data/barber-motorsports-park/barber/05_Results by Class GR Cup Race 1 Official_Anonymized.CSV +3 -0
  49. mcp_server/unzipped_data/barber-motorsports-park/barber/23_AnalysisEnduranceWithSections_Race 1_Anonymized.CSV +3 -0
  50. mcp_server/unzipped_data/barber-motorsports-park/barber/99_Best 10 Laps By Driver_Race 1_Anonymized.CSV +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.csv filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python image
2
+ FROM python:3.10-slim
3
+
4
+ # Set up a new user named "user" with user ID 1000
5
+ RUN useradd -m -u 1000 user
6
+
7
+ # Switch to the "user" user
8
+ USER user
9
+
10
+ # Set home to the user's home directory
11
+ ENV HOME=/home/user \
12
+ PATH=/home/user/.local/bin:$PATH
13
+
14
+ # Set the working directory to the user's home directory
15
+ WORKDIR $HOME/app
16
+
17
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
18
+ COPY --chown=user . $HOME/app
19
+
20
+ # Install requirements
21
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
22
+
23
+ # Expose port 7860
24
+ EXPOSE 7860
25
+
26
+ # Set environment variables
27
+ ENV PORT=7860
28
+ ENV PYTHONUNBUFFERED=1
29
+
30
+ # Run the application
31
+ # We run main.py which uses FastMCP.
32
+ # We need to make sure it listens on 0.0.0.0 and the correct port.
33
+ # The main.py reads PORT env var.
34
+ CMD ["python", "mcp_server/main.py"]
README.md CHANGED
@@ -1,10 +1,17 @@
1
  ---
2
  title: Monte Carlo Sim
3
- emoji: 📈
4
- colorFrom: gray
5
- colorTo: purple
6
  sdk: docker
7
  pinned: false
 
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
1
  ---
2
  title: Monte Carlo Sim
3
+ emoji: 🏎️
4
+ colorFrom: red
5
+ colorTo: black
6
  sdk: docker
7
  pinned: false
8
+ app_port: 7860
9
  ---
10
 
11
+ # Monte Carlo Simulation MCP Server
12
+
13
+ This is a FastMCP server for Monte Carlo simulations of racing data.
14
+
15
+ ## Deployment
16
+
17
+ Deployed on Hugging Face Spaces using Docker.
mcp_server/README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MCP Server: Monte Carlo Simulation with a TFX Pipeline
2
+
3
+ This directory contains the `mcp_server`, a tool for running Monte Carlo simulations for race strategy, now enhanced with a production-grade MLOps workflow using TensorFlow Extended (TFX).
4
+
5
+ ## Overview
6
+
7
+ The server provides race strategy insights by running simulations based on models for tire degradation, fuel consumption, and race pace. The ML models are now managed by a TFX pipeline that automates the process of data preparation, training, evaluation, and deployment.
8
+
9
+ ## Environment Setup
10
+
11
+ **IMPORTANT:** This project uses the `tfx` library, which currently requires **Python 3.11 or lower**. Please ensure you are using a compatible Python version to run the pipeline and tests.
12
+
13
+ 1. **Create a Virtual Environment:**
14
+ ```bash
15
+ python3.11 -m venv venv
16
+ source venv/bin/activate
17
+ ```
18
+
19
+ 2. **Install Dependencies:**
20
+ First, install TFX, Apache Beam, and other core dependencies:
21
+ ```bash
22
+ pip install "tfx>=1.15.0" "apache-beam[interactive]>=2.46.0" pytest
23
+ ```
24
+ Then, install the project-specific dependencies:
25
+ ```bash
26
+ pip install -e .
27
+ ```
28
+
29
+ ## MLOps Workflow
30
+
31
+ The ML model lifecycle is managed by a series of scripts that should be run in order.
32
+
33
+ ### 1. Prepare Data
34
+
35
+ This script downloads the raw race data, processes it into a unified CSV file, and places it in the correct directory for the TFX pipeline to use.
36
+
37
+ ```bash
38
+ python3 mcp_server/prepare_data.py
39
+ ```
40
+
41
+ ### 2. Run the TFX Pipeline
42
+
43
+ This script orchestrates the end-to-end ML workflow. It will ingest the prepared data, train the models, evaluate their performance, and (if they pass a quality threshold) deploy them to the serving directory.
44
+
45
+ ```bash
46
+ python3 mcp_server/pipeline/pipeline.py
47
+ ```
48
+
49
+ ### 3. Visualize Model Performance
50
+
51
+ After the pipeline has run, you can generate a visual report of the model performance metrics. This script reads the latest `evaluation_metrics.json` and creates a bar chart saved as `evaluation_results.png` in the latest model's directory.
52
+
53
+ ```bash
54
+ python3 mcp_server/visualize_metrics.py
55
+ ```
56
+
57
+ ### 4. Start the Server
58
+
59
+ Once the pipeline has successfully run and a model has been deployed, you can start the MCP server. The server will automatically load the latest blessed model from the pipeline.
60
+
61
+ ```bash
62
+ python3 mcp_server/main.py
63
+ ```
64
+
65
+ ## Testing
66
+
67
+ To verify the correctness of the pipeline components, run the unit tests using `pytest`. Make sure you are in the compatible Python 3.11 environment with all dependencies installed.
68
+
69
+ ```bash
70
+ pytest mcp_server/tests/
71
+ ```
mcp_server/__pycache__/gcs_utils.cpython-313.pyc ADDED
Binary file (2.81 kB). View file
 
mcp_server/__pycache__/main.cpython-313.pyc ADDED
Binary file (4.7 kB). View file
 
mcp_server/__pycache__/monte_carlo_simulation.cpython-313.pyc ADDED
Binary file (8.75 kB). View file
 
mcp_server/data_handling/__pycache__/data_downloader.cpython-313.pyc ADDED
Binary file (1.88 kB). View file
 
mcp_server/data_handling/__pycache__/data_loader.cpython-313.pyc ADDED
Binary file (1.73 kB). View file
 
mcp_server/data_handling/__pycache__/telemetry_parser.cpython-313.pyc ADDED
Binary file (4.91 kB). View file
 
mcp_server/data_handling/data_downloader.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+
4
+ def download_and_move_files(data_dir="data"):
5
+ """
6
+ Downloads and moves files to the specified directory.
7
+ """
8
+ # Create the directory if it doesn't exist
9
+ if not os.path.exists(data_dir):
10
+ os.makedirs(data_dir)
11
+
12
+ # List of file URLs to download
13
+ urls = [
14
+ "https://trddev.com/hackathon-2025/barber-motorsports-park.zip",
15
+ "https://trddev.com/hackathon-2025/circuit-of-the-americas.zip",
16
+ "https://trddev.com/hackathon-2025/indianapolis.zip",
17
+ "https://trddev.com/hackathon-2025/road-america.zip",
18
+ "https://trddev.com/hackathon-2025/sebring.zip",
19
+ "https://trddev.com/hackathon-2025/sonoma.zip",
20
+ "https://trddev.com/hackathon-2025/virginia-international-raceway.zip"
21
+ ]
22
+
23
+ # Download each file and save it to the specified directory
24
+ for url in urls:
25
+ filename = os.path.join(data_dir, os.path.basename(url))
26
+ with requests.get(url, stream=True) as r:
27
+ r.raise_for_status()
28
+ with open(filename, 'wb') as f:
29
+ for chunk in r.iter_content(chunk_size=8192):
30
+ f.write(chunk)
31
+ print(f"Downloaded {filename}")
32
+
33
+ if __name__ == '__main__':
34
+ download_and_move_files()
mcp_server/data_handling/data_loader.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+
4
+ def unzip_data(data_dir="data", output_dir="unzipped_data"):
5
+ """
6
+ Unzips all .zip files in the data_dir to the output_dir.
7
+
8
+ Args:
9
+ data_dir (str): The directory containing the .zip files.
10
+ output_dir (str): The directory to extract the files to.
11
+ """
12
+ if not os.path.exists(output_dir):
13
+ os.makedirs(output_dir)
14
+
15
+ for filename in os.listdir(data_dir):
16
+ if filename.endswith(".zip"):
17
+ zip_path = os.path.join(data_dir, filename)
18
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
19
+ # Create a directory with the name of the zip file
20
+ extract_path = os.path.join(output_dir, filename[:-4])
21
+ if not os.path.exists(extract_path):
22
+ os.makedirs(extract_path)
23
+ print(f"Extracting {filename} to {extract_path}...")
24
+ zip_ref.extractall(extract_path)
25
+ print(f"Extracted {filename} successfully.")
26
+
27
+ if __name__ == "__main__":
28
+ unzip_data()
mcp_server/data_handling/data_stream_simulator.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import time
3
+ import os
4
+ from pilot.data_handling.telemetry_parser import parse_telemetry
5
+
6
+ def stream_simulator(race_data_dir, delay_multiplier=1.0):
7
+ """
8
+ Simulates a real-time data stream from a telemetry CSV file.
9
+
10
+ Args:
11
+ race_data_dir (str): The path to the directory containing the race data.
12
+ delay_multiplier (float): A factor to speed up or slow down the simulation.
13
+
14
+ Yields:
15
+ dict: A dictionary representing a single row of telemetry data.
16
+ """
17
+ # Find the main telemetry file (assuming it's the largest CSV)
18
+ csv_files = [f for f in os.listdir(race_data_dir) if f.endswith('.csv')]
19
+ if not csv_files:
20
+ print(f"No CSV files found in {race_data_dir}")
21
+ return
22
+
23
+ main_telemetry_file = max(csv_files, key=lambda f: os.path.getsize(os.path.join(race_data_dir, f)))
24
+ file_path = os.path.join(race_data_dir, main_telemetry_file)
25
+
26
+ print(f"Streaming data from {file_path}...")
27
+ df = parse_telemetry(file_path)
28
+
29
+ if df is None:
30
+ return
31
+
32
+ # Assuming 'timestamp' is in milliseconds and represents the ECU time.
33
+ # We calculate the time difference between consecutive timestamps to simulate the delay.
34
+ if 'timestamp' in df.columns:
35
+ df = df.sort_values(by='timestamp').reset_index(drop=True)
36
+ df['time_diff'] = df['timestamp'].diff().fillna(0) / 1000.0 # Convert to seconds
37
+ else:
38
+ # If no timestamp, use a fixed delay
39
+ df['time_diff'] = 0.1
40
+
41
+ for index, row in df.iterrows():
42
+ delay = row['time_diff'] * delay_multiplier
43
+ time.sleep(delay)
44
+ yield row.to_dict()
mcp_server/data_handling/telemetry_parser.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+
4
+ def parse_telemetry(race_data_dir):
5
+ """
6
+ Parses telemetry data and merges it with lap times and other features.
7
+
8
+ Args:
9
+ race_data_dir (str): The path to the directory containing the race data.
10
+
11
+ Returns:
12
+ pandas.DataFrame: A cleaned and merged DataFrame with telemetry data.
13
+ """
14
+ try:
15
+ # Find the main telemetry file (assuming it's the largest CSV)
16
+ csv_files = [f for f in os.listdir(race_data_dir) if f.lower().endswith('.csv')]
17
+ if not csv_files:
18
+ print(f"No CSV files found in {race_data_dir}")
19
+ return None
20
+
21
+ main_telemetry_file = max(csv_files, key=lambda f: os.path.getsize(os.path.join(race_data_dir, f)))
22
+ telemetry_file_path = os.path.join(race_data_dir, main_telemetry_file)
23
+
24
+ # Find the analysis file
25
+ analysis_file = [f for f in csv_files if 'Analysis' in f]
26
+ if not analysis_file:
27
+ print(f"Analysis file not found in {race_data_dir}")
28
+ return None
29
+ analysis_file_path = os.path.join(race_data_dir, analysis_file[0])
30
+
31
+ # Read the analysis data
32
+ df_analysis = pd.read_csv(analysis_file_path, delimiter=';')
33
+ df_analysis.columns = df_analysis.columns.str.strip()
34
+
35
+ # Rename columns in analysis data for merging
36
+ df_analysis = df_analysis.rename(columns={'LAP_NUMBER': 'lap', 'DRIVER_NUMBER': 'driver_number', 'LAP_TIME': 'lap_time'})
37
+
38
+ # Convert lap_time to seconds
39
+ df_analysis['lap_time'] = df_analysis['lap_time'].apply(lambda x: sum(float(t) * 60**i for i, t in enumerate(reversed(str(x).split(':')))))
40
+
41
+ # Create a lap time mapping
42
+ lap_time_map = df_analysis.set_index('lap')['lap_time'].to_dict()
43
+
44
+ # Process telemetry data in chunks
45
+ df_telemetry_chunks = pd.read_csv(telemetry_file_path, chunksize=100000)
46
+
47
+ processed_chunks = []
48
+ for chunk in df_telemetry_chunks:
49
+ # Handle the erroneous lap count in telemetry data
50
+ if 'lap' in chunk.columns:
51
+ chunk['lap'] = chunk['lap'].replace(32768, pd.NA).ffill()
52
+
53
+ # Pivot the dataframe
54
+ chunk = chunk.pivot_table(index=['timestamp', 'lap', 'vehicle_id'], columns='telemetry_name', values='telemetry_value').reset_index()
55
+
56
+ # Map lap times
57
+ chunk['lap_time'] = chunk['lap'].map(lap_time_map)
58
+ processed_chunks.append(chunk)
59
+
60
+ df_merged = pd.concat(processed_chunks)
61
+
62
+ # Feature Engineering: Fuel Consumption Proxy
63
+ # Normalize aps and nmot to a 0-1 scale
64
+ df_merged['aps_norm'] = df_merged['aps'] / 100.0
65
+ df_merged['nmot_norm'] = df_merged['nmot'] / df_merged['nmot'].max()
66
+ df_merged['fuel_consumption'] = (0.7 * df_merged['aps_norm']) + (0.3 * df_merged['nmot_norm'])
67
+
68
+ # Feature Engineering: Relative Pace
69
+ fastest_laps = df_merged.groupby('lap')['lap_time'].min().reset_index()
70
+ fastest_laps.rename(columns={'lap_time': 'fastest_lap'}, inplace=True)
71
+ df_merged = pd.merge(df_merged, fastest_laps, on='lap', how='left')
72
+ df_merged['relative_pace'] = df_merged['lap_time'] - df_merged['fastest_lap']
73
+
74
+ return df_merged
75
+
76
+ except FileNotFoundError:
77
+ print(f"Error: A file was not found in {race_data_dir}")
78
+ return None
79
+ except Exception as e:
80
+ print(f"An error occurred: {e}")
81
+ return None
mcp_server/gcs_utils.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from google.cloud import storage
3
+ import sys
4
+
5
+ def download_blob(bucket_name, source_blob_name, destination_file_name):
6
+ """Downloads a blob from the bucket."""
7
+ try:
8
+ storage_client = storage.Client()
9
+ bucket = storage_client.bucket(bucket_name)
10
+ blob = bucket.blob(source_blob_name)
11
+ blob.download_to_filename(destination_file_name)
12
+ print(f"Downloaded storage object {source_blob_name} from bucket {bucket_name} to local file {destination_file_name}.", file=sys.stderr)
13
+ except Exception as e:
14
+ print(f"Failed to download {source_blob_name} from {bucket_name}: {e}", file=sys.stderr)
15
+ raise
16
+
17
+ def download_directory(bucket_name, prefix, local_dir):
18
+ """Downloads a directory from the bucket."""
19
+ try:
20
+ storage_client = storage.Client()
21
+ bucket = storage_client.bucket(bucket_name)
22
+ blobs = bucket.list_blobs(prefix=prefix)
23
+
24
+ if not os.path.exists(local_dir):
25
+ os.makedirs(local_dir)
26
+
27
+ for blob in blobs:
28
+ if blob.name.endswith("/"):
29
+ continue
30
+
31
+ # Remove the prefix from the local path to keep the structure relative
32
+ relative_path = os.path.relpath(blob.name, prefix)
33
+ local_path = os.path.join(local_dir, relative_path)
34
+
35
+ local_dir_path = os.path.dirname(local_path)
36
+ if not os.path.exists(local_dir_path):
37
+ os.makedirs(local_dir_path)
38
+
39
+ blob.download_to_filename(local_path)
40
+ print(f"Downloaded {blob.name} to {local_path}", file=sys.stderr)
41
+
42
+ except Exception as e:
43
+ print(f"Failed to download directory {prefix} from {bucket_name}: {e}", file=sys.stderr)
44
+ raise
mcp_server/main.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import asyncio
4
+ import logging
5
+ from fastmcp import FastMCP
6
+
7
+ # Add the current directory to sys.path to ensure modules can be imported
8
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
9
+
10
+ from monte_carlo_simulation import MonteCarloSimulation
11
+ from data_handling.telemetry_parser import parse_telemetry
12
+ from gcs_utils import download_directory
13
+
14
+ # Configure logging
15
+ logging.basicConfig(format="[%(levelname)s]: %(message)s", level=logging.INFO)
16
+ logger = logging.getLogger(__name__)
17
+
18
+ # Initialize FastMCP
19
+ mcp = FastMCP("Monte Carlo MCP Server 🏎️")
20
+
21
+ # Constants
22
+ PIPELINE_NAME = "mcp-server-pipeline"
23
+ SERVING_MODEL_DIR = os.path.join("mcp_server", "serving_model", PIPELINE_NAME)
24
+
25
+ # Global simulation instance
26
+ mc_simulation = None
27
+
28
+ def initialize_simulation():
29
+ """Initializes the Monte Carlo simulation with models and data."""
30
+ global mc_simulation
31
+
32
+ # Check for GCS configuration
33
+ gcs_bucket = os.environ.get("GCS_BUCKET_NAME")
34
+
35
+ if gcs_bucket:
36
+ logger.info(f"GCS_BUCKET_NAME found: {gcs_bucket}. Downloading assets from GCS...")
37
+ try:
38
+ # Define local paths for downloads
39
+ local_assets_dir = os.path.join(os.getcwd(), "downloaded_assets")
40
+ local_model_dir = os.path.join(local_assets_dir, "trained_models")
41
+ local_data_dir = os.path.join(local_assets_dir, "data", "barber")
42
+
43
+ # Download models
44
+ logger.info("Downloading models...")
45
+ download_directory(gcs_bucket, "trained_models", local_model_dir)
46
+
47
+ # Download race data
48
+ logger.info("Downloading race data...")
49
+ download_directory(gcs_bucket, "data/barber", local_data_dir)
50
+
51
+ # Update paths to point to downloaded assets
52
+ model_dir_to_use = local_model_dir
53
+ race_data_dir = local_data_dir
54
+
55
+ except Exception as e:
56
+ logger.error(f"Failed to download assets from GCS: {e}")
57
+ return False
58
+ else:
59
+ logger.info("GCS_BUCKET_NAME not set. Using local assets.")
60
+ script_dir = os.path.dirname(os.path.abspath(__file__))
61
+ model_dir_to_use = os.path.join(script_dir, "trained_models")
62
+ race_data_dir = os.path.join(script_dir, 'unzipped_data', 'barber-motorsports-park', 'barber')
63
+
64
+ logger.info(f"Loading models from: {model_dir_to_use}")
65
+
66
+ # Load race data
67
+ race_data = parse_telemetry(race_data_dir)
68
+ if race_data is None:
69
+ logger.error("Failed to load race data.")
70
+ return False
71
+
72
+ # Initialize Simulation
73
+ try:
74
+ mc_simulation = MonteCarloSimulation(race_data, model_dir=model_dir_to_use)
75
+ logger.info("Monte Carlo Simulation initialized successfully.")
76
+ return True
77
+ except Exception as e:
78
+ logger.error(f"Failed to initialize simulation: {e}")
79
+ return False
80
+
81
+ # Initialize on startup
82
+ if not initialize_simulation():
83
+ logger.warning("Simulation failed to initialize. Tools may not work.")
84
+
85
+ @mcp.tool()
86
+ def find_optimal_pit_window() -> str:
87
+ """
88
+ Uses Monte Carlo simulation to find the optimal pit stop strategy.
89
+ Returns a string describing the best strategy and its average time.
90
+ """
91
+ if mc_simulation is None:
92
+ return "Error: Simulation not initialized."
93
+
94
+ return mc_simulation.find_optimal_pit_window()
95
+
96
+ if __name__ == "__main__":
97
+ # Cloud Run sets PORT environment variable
98
+ port = int(os.getenv("PORT", 8080))
99
+ logger.info(f"🚀 MCP server starting on port {port}")
100
+
101
+ # Run the server
102
+ # FastMCP handles the HTTP server loop
103
+ mcp.run(transport="sse", port=port, host="0.0.0.0")
mcp_server/mcp_server.egg-info/PKG-INFO ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: mcp-server
3
+ Version: 0.1.0
4
+ Summary: MCP Server for the Hack the Track Monte Carlo Simulation
5
+ Requires-Python: <3.13,>=3.11
6
+ Requires-Dist: google-adk>=1.18.0
7
+ Requires-Dist: numpy
8
+ Requires-Dist: pandas
9
+ Requires-Dist: scikit-learn>=1.7.2
10
+ Requires-Dist: joblib
11
+ Requires-Dist: requests
12
+ Provides-Extra: test
13
+ Requires-Dist: pytest>=8.4.2; extra == "test"
mcp_server/mcp_server.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ main.py
2
+ monte_carlo_simulation.py
3
+ pyproject.toml
4
+ data_handling/data_downloader.py
5
+ data_handling/data_loader.py
6
+ data_handling/data_stream_simulator.py
7
+ data_handling/telemetry_parser.py
8
+ mcp_server.egg-info/PKG-INFO
9
+ mcp_server.egg-info/SOURCES.txt
10
+ mcp_server.egg-info/dependency_links.txt
11
+ mcp_server.egg-info/requires.txt
12
+ mcp_server.egg-info/top_level.txt
13
+ models/fuel_consumption_model.py
14
+ models/pace_prediction_model.py
15
+ models/tire_degradation_model.py
16
+ models/train_models.py
17
+ tests/test_prepare_data.py
18
+ tests/test_trainer_module.py
mcp_server/mcp_server.egg-info/dependency_links.txt ADDED
File without changes
mcp_server/mcp_server.egg-info/requires.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ google-adk>=1.18.0
2
+ numpy
3
+ pandas
4
+ scikit-learn>=1.7.2
5
+ joblib
6
+ requests
7
+
8
+ [test]
9
+ pytest>=8.4.2
mcp_server/mcp_server.egg-info/top_level.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ data_handling
2
+ main
3
+ models
4
+ monte_carlo_simulation
mcp_server/mock_request.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import sys
3
+ import time
4
+ import requests
5
+ import os
6
+ import signal
7
+
8
+ def run_mock_request():
9
+ # Path to the local server runner
10
+ server_script = "mcp_server/main.py"
11
+
12
+ # Set environment variables
13
+ env = os.environ.copy()
14
+ env["GCS_BUCKET_NAME"] = "monte-carlo-mcp-assets"
15
+ env["PORT"] = "8080"
16
+
17
+ # Start the server process
18
+ print("Starting server process...")
19
+ process = subprocess.Popen(
20
+ [sys.executable, server_script],
21
+ env=env,
22
+ stdout=sys.stdout,
23
+ stderr=sys.stderr,
24
+ text=True
25
+ )
26
+
27
+ # Wait for server to start
28
+ print("Waiting for server to start...")
29
+ server_url = "http://localhost:8080/sse"
30
+ max_retries = 30
31
+
32
+ try:
33
+ for i in range(max_retries):
34
+ try:
35
+ response = requests.get(server_url, stream=True, timeout=1)
36
+ if response.status_code == 200:
37
+ print("Server is up and running! (SSE endpoint accessible)")
38
+ break
39
+ except requests.exceptions.ConnectionError:
40
+ time.sleep(2)
41
+ print(f"Waiting... ({i+1}/{max_retries})")
42
+ else:
43
+ print("Server failed to start within timeout.")
44
+ return
45
+
46
+ print("Test passed: Server started and is listening on port 8080.")
47
+
48
+ except Exception as e:
49
+ print(f"An error occurred: {e}")
50
+ finally:
51
+ print("Terminating server process...")
52
+ os.kill(process.pid, signal.SIGTERM)
53
+ process.wait()
54
+ print("Server process terminated.")
55
+
56
+ if __name__ == "__main__":
57
+ run_mock_request()
mcp_server/models/__pycache__/fuel_consumption_model.cpython-313.pyc ADDED
Binary file (1.76 kB). View file
 
mcp_server/models/__pycache__/pace_prediction_model.cpython-313.pyc ADDED
Binary file (1.99 kB). View file
 
mcp_server/models/__pycache__/tire_degradation_model.cpython-313.pyc ADDED
Binary file (1.85 kB). View file
 
mcp_server/models/fuel_consumption_model.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from sklearn.model_selection import train_test_split
3
+ from sklearn.linear_model import LinearRegression
4
+ from sklearn.metrics import mean_squared_error
5
+ import numpy as np
6
+
7
+ def train_fuel_consumption_model(data):
8
+ """
9
+ Trains a model to predict fuel consumption per lap.
10
+
11
+ Args:
12
+ data (pd.DataFrame): DataFrame containing telemetry data.
13
+ It must include 'nmot', 'ath', and 'fuel_consumption'.
14
+
15
+ Returns:
16
+ A trained machine learning model.
17
+ """
18
+ # Feature Engineering
19
+ features = ['nmot', 'aps']
20
+ target = 'fuel_consumption' # Assuming 'fuel_consumption' is the target variable
21
+
22
+ X = data[features]
23
+ y = data[target]
24
+
25
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
26
+
27
+ model = LinearRegression()
28
+ model.fit(X_train, y_train)
29
+
30
+ # Evaluate the model
31
+ predictions = model.predict(X_test)
32
+ mse = mean_squared_error(y_test, predictions)
33
+ print(f"Fuel Consumption Model MSE: {mse}")
34
+
35
+ return model
36
+
37
+ def predict_fuel_consumption(model, live_data):
38
+ """
39
+ Predicts the fuel consumption for a given lap.
40
+
41
+ Args:
42
+ model: The trained fuel consumption model.
43
+ live_data (pd.DataFrame): A DataFrame with the live telemetry data.
44
+
45
+ Returns:
46
+ float: The predicted fuel consumption.
47
+ """
48
+ return model.predict(live_data)[0]
mcp_server/models/pace_prediction_model.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from sklearn.model_selection import train_test_split
3
+ from sklearn.ensemble import RandomForestRegressor
4
+ from sklearn.metrics import mean_squared_error
5
+ import numpy as np
6
+
7
+ def train_pace_prediction_model(data):
8
+ """
9
+ Trains a model to predict the car's pace relative to competitors.
10
+
11
+ Args:
12
+ data (pd.DataFrame): DataFrame containing telemetry data. It must include
13
+ 'Speed', 'Gear', 'nmot', 'ath', 'pbrake_f', 'pbrake_r',
14
+ 'accx_can', 'accy_can', 'Steering_Angle', 'traffic',
15
+ and 'relative_pace'.
16
+
17
+ Returns:
18
+ A trained machine learning model.
19
+ """
20
+ # Feature Engineering
21
+ features = ['speed', 'gear', 'nmot', 'aps', 'pbrake_f', 'pbrake_r',
22
+ 'accx_can', 'accy_can', 'Steering_Angle', 'traffic']
23
+ target = 'relative_pace' # Assuming 'relative_pace' is the target variable
24
+
25
+ X = data[features]
26
+ y = data[target]
27
+
28
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
29
+
30
+ model = RandomForestRegressor(n_estimators=100, random_state=42)
31
+ model.fit(X_train, y_train)
32
+
33
+ # Evaluate the model
34
+ predictions = model.predict(X_test)
35
+ mse = mean_squared_error(y_test, predictions)
36
+ print(f"Pace Prediction Model MSE: {mse}")
37
+
38
+ return model
39
+
40
+ def predict_pace(model, live_data):
41
+ """
42
+ Predicts the car's pace based on live data.
43
+
44
+ Args:
45
+ model: The trained pace prediction model.
46
+ live_data (pd.DataFrame): A DataFrame with the live telemetry data.
47
+
48
+ Returns:
49
+ float: The predicted pace.
50
+ """
51
+ return model.predict(live_data)[0]
mcp_server/models/tire_degradation_model.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from sklearn.model_selection import train_test_split
3
+ from sklearn.linear_model import LinearRegression
4
+ from sklearn.metrics import mean_squared_error
5
+ import numpy as np
6
+
7
+ def train_tire_degradation_model(data):
8
+ """
9
+ Trains a model to predict tire degradation (lap time drop-off).
10
+
11
+ Args:
12
+ data (pd.DataFrame): DataFrame containing telemetry data.
13
+ It must include 'lap', 'accx_can', 'accy_can',
14
+ 'Steering_Angle', and 'lap_time'.
15
+
16
+ Returns:
17
+ A trained machine learning model.
18
+ """
19
+ # Feature Engineering (simple example)
20
+ features = ['lap', 'accx_can', 'accy_can', 'Steering_Angle']
21
+ target = 'lap_time' # Assuming 'lap_time' is the target variable
22
+
23
+ X = data[features]
24
+ y = data[target]
25
+
26
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
27
+
28
+ model = LinearRegression()
29
+ model.fit(X_train, y_train)
30
+
31
+ # Evaluate the model
32
+ predictions = model.predict(X_test)
33
+ mse = mean_squared_error(y_test, predictions)
34
+ print(f"Tire Degradation Model MSE: {mse}")
35
+
36
+ return model
37
+
38
+ def predict_lap_time_dropoff(model, live_data):
39
+ """
40
+ Predicts the lap time drop-off based on live data.
41
+
42
+ Args:
43
+ model: The trained tire degradation model.
44
+ live_data (pd.DataFrame): A DataFrame with the live telemetry data.
45
+
46
+ Returns:
47
+ float: The predicted lap time drop-off.
48
+ """
49
+ return model.predict(live_data)[0]
mcp_server/models/train_models.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+ import joblib
4
+ from pilot.data_handling.telemetry_parser import parse_telemetry
5
+ from pilot.models.tire_degradation_model import train_tire_degradation_model
6
+ from pilot.models.fuel_consumption_model import train_fuel_consumption_model
7
+ from pilot.models.pace_prediction_model import train_pace_prediction_model
8
+
9
+ def train_and_save_models(race_data_dir, model_dir="trained_models"):
10
+ """
11
+ Trains all the predictive models and saves them to disk.
12
+
13
+ Args:
14
+ race_data_dir (str): The path to the directory containing the race data.
15
+ model_dir (str): The directory to save the trained models to.
16
+ """
17
+ if not os.path.exists(model_dir):
18
+ os.makedirs(model_dir)
19
+
20
+ # Parse the data
21
+ data = parse_telemetry(race_data_dir)
22
+ if data is None:
23
+ print("Failed to parse data. Aborting training.")
24
+ return
25
+
26
+ # Drop rows with missing values
27
+ data.dropna(inplace=True)
28
+
29
+ # Train and save the tire degradation model
30
+ print("Training Tire Degradation Model...")
31
+ tire_model = train_tire_degradation_model(data)
32
+ joblib.dump(tire_model, os.path.join(model_dir, "tire_degradation_model.pkl"))
33
+ print("Tire Degradation Model saved.")
34
+
35
+ # Train and save the fuel consumption model
36
+ print("\nTraining Fuel Consumption Model...")
37
+ fuel_model = train_fuel_consumption_model(data)
38
+ joblib.dump(fuel_model, os.path.join(model_dir, "fuel_consumption_model.pkl"))
39
+ print("Fuel Consumption Model saved.")
40
+
41
+ # Train and save the pace prediction model
42
+ print("\nTraining Pace Prediction Model...")
43
+ # 'traffic' column might not exist, so we create a dummy one if it doesn't
44
+ if 'traffic' not in data.columns:
45
+ data['traffic'] = 0
46
+ pace_model = train_pace_prediction_model(data)
47
+ joblib.dump(pace_model, os.path.join(model_dir, "pace_prediction_model.pkl"))
48
+ print("Pace Prediction Model saved.")
mcp_server/monte_carlo_simulation.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ import joblib
4
+ import os
5
+ import sys
6
+
7
+ from models.tire_degradation_model import predict_lap_time_dropoff
8
+ from models.fuel_consumption_model import predict_fuel_consumption
9
+ from models.pace_prediction_model import predict_pace
10
+
11
+ SAFETY_CAR_TIME_LOSS_SECONDS = 30
12
+
13
+ class MonteCarloSimulation:
14
+ def __init__(self, race_data, model_dir, num_simulations=1000):
15
+ self.race_data = race_data
16
+ self.num_simulations = num_simulations
17
+ self.tire_model = joblib.load(os.path.join(model_dir, "tire_degradation_model.pkl"))
18
+ self.fuel_model = joblib.load(os.path.join(model_dir, "fuel_consumption_model.pkl"))
19
+ self.pace_model = joblib.load(os.path.join(model_dir, "pace_prediction_model.pkl"))
20
+
21
+ # Calculate average driver inputs
22
+ self.avg_driver_inputs = {
23
+ 'accx_can': self.race_data['accx_can'].mean(),
24
+ 'accy_can': self.race_data['accy_can'].mean(),
25
+ 'Steering_Angle': self.race_data['Steering_Angle'].mean(),
26
+ 'nmot': self.race_data['nmot'].mean(),
27
+ 'aps': self.race_data['aps'].mean(),
28
+ 'pbrake_f': self.race_data['pbrake_f'].mean(),
29
+ 'pbrake_r': self.race_data['pbrake_r'].mean(),
30
+ 'speed': self.race_data['speed'].mean(),
31
+ 'gear': self.race_data['gear'].mean(),
32
+ }
33
+
34
+
35
+ def simulate_lap(self, live_data):
36
+ """Simulates a single lap of the race."""
37
+
38
+ tire_features = ['lap', 'accx_can', 'accy_can', 'Steering_Angle']
39
+ fuel_features = ['nmot', 'aps']
40
+ pace_features = ['speed', 'gear', 'nmot', 'aps', 'pbrake_f', 'pbrake_r',
41
+ 'accx_can', 'accy_can', 'Steering_Angle', 'traffic']
42
+
43
+ lap_time = predict_lap_time_dropoff(self.tire_model, live_data[tire_features])
44
+ fuel_consumed = predict_fuel_consumption(self.fuel_model, live_data[fuel_features])
45
+
46
+ return lap_time, fuel_consumed
47
+
48
+ def run_strategy_simulation(self, strategy, safety_car_lap=None):
49
+ """
50
+ Runs a full race simulation for a given pit stop strategy.
51
+
52
+ Args:
53
+ strategy (list): A list of lap numbers for pit stops.
54
+ safety_car_lap (int): The lap on which a safety car is deployed.
55
+
56
+ Returns:
57
+ float: The total race time for the simulated strategy.
58
+ """
59
+ total_race_time = 0
60
+ tire_wear = 0
61
+ fuel_level = 100 # start with a full tank
62
+
63
+ total_laps = self.race_data['total_laps'].iloc[0] if 'total_laps' in self.race_data else 60
64
+ pit_stop_time = 25 # seconds
65
+
66
+ for lap in range(1, total_laps + 1):
67
+ if lap in strategy:
68
+ # Pit stop
69
+ total_race_time += pit_stop_time
70
+ tire_wear = 0 # Fresh tires
71
+ fuel_level = 100 # Refuel
72
+
73
+ live_data = pd.DataFrame({
74
+ 'lap': [tire_wear], # Use tire_wear as the feature for the model
75
+ 'accx_can': [self.avg_driver_inputs['accx_can']],
76
+ 'accy_can': [self.avg_driver_inputs['accy_can']],
77
+ 'Steering_Angle': [self.avg_driver_inputs['Steering_Angle']],
78
+ 'nmot': [self.avg_driver_inputs['nmot']],
79
+ 'aps': [self.avg_driver_inputs['aps']],
80
+ 'pbrake_f': [self.avg_driver_inputs['pbrake_f']],
81
+ 'pbrake_r': [self.avg_driver_inputs['pbrake_r']],
82
+ 'speed': [self.avg_driver_inputs['speed']],
83
+ 'gear': [self.avg_driver_inputs['gear']],
84
+ 'traffic': [0] # Placeholder for traffic data
85
+ })
86
+
87
+ lap_time, fuel_consumed = self.simulate_lap(live_data)
88
+
89
+ total_race_time += lap_time
90
+ if safety_car_lap and lap == safety_car_lap and lap not in strategy:
91
+ total_race_time += SAFETY_CAR_TIME_LOSS_SECONDS
92
+
93
+ tire_wear += 1
94
+ fuel_level -= fuel_consumed
95
+
96
+ if fuel_level <= 0:
97
+ # Ran out of fuel, this strategy is invalid
98
+ return float('inf')
99
+
100
+ return total_race_time
101
+
102
+ def find_optimal_pit_window(self) -> str:
103
+ """
104
+ Uses Monte Carlo simulation to find the optimal pit stop strategy.
105
+ """
106
+ # For simplicity, we'll test a few pre-defined strategies.
107
+ # A more advanced implementation would generate these dynamically.
108
+ strategies = {
109
+ "1-stop": [[28]],
110
+ "2-stop": [[20, 40]],
111
+ "3-stop": [[15, 30, 45]]
112
+ }
113
+
114
+ best_strategy = None
115
+ best_time = float('inf')
116
+
117
+ for name, pit_stops in strategies.items():
118
+ simulation_times = []
119
+ for _ in range(self.num_simulations):
120
+ total_time = self.run_strategy_simulation(pit_stops[0])
121
+ simulation_times.append(total_time)
122
+
123
+ avg_time = np.mean(simulation_times)
124
+
125
+ print(f"Strategy: {name}, Avg. Time: {avg_time:.2f}s")
126
+
127
+ if avg_time < best_time:
128
+ best_time = avg_time
129
+ best_strategy = name
130
+
131
+ return f"Best Strategy: {best_strategy}, Time: {best_time:.2f}s"
132
+
133
+ def analyze_undercut_overcut(self, competitor_pit_lap):
134
+ """
135
+ Analyzes the outcome of undercutting or overcutting a competitor.
136
+ """
137
+ print("\n--- Undercut/Overcut Analysis ---")
138
+ # Simulate pitting 1 lap before, on the same lap, and 1 lap after
139
+ undercut_strategy = [competitor_pit_lap - 1]
140
+ match_strategy = [competitor_pit_lap]
141
+ overcut_strategy = [competitor_pit_lap + 1]
142
+
143
+ undercut_time = np.mean([self.run_strategy_simulation(undercut_strategy) for _ in range(self.num_simulations)])
144
+ match_time = np.mean([self.run_strategy_simulation(match_strategy) for _ in range(self.num_simulations)])
145
+ overcut_time = np.mean([self.run_strategy_simulation(overcut_strategy) for _ in range(self.num_simulations)])
146
+
147
+ print(f"Undercut (pit at lap {competitor_pit_lap - 1}): {undercut_time:.2f}s")
148
+ print(f"Match (pit at lap {competitor_pit_lap}): {match_time:.2f}s")
149
+ print(f"Overcut (pit at lap {competitor_pit_lap + 1}): {overcut_time:.2f}s")
150
+
151
+ # Return the time difference compared to matching the competitor
152
+ return {
153
+ "undercut_diff": undercut_time - match_time,
154
+ "overcut_diff": overcut_time - match_time
155
+ }
156
+
157
+ def react_to_safety_car(self, current_lap):
158
+ """
159
+ Simulates the outcome of pitting during a safety car period.
160
+ """
161
+ print(f"\n--- Safety Car at Lap {current_lap} ---")
162
+
163
+ # Scenario 1: Pit now
164
+ pit_now_strategy = [current_lap]
165
+
166
+ # Scenario 2: Stay out (assume a 1-stop strategy, pitting later)
167
+ stay_out_strategy = [35]
168
+
169
+ # Simulate with adjusted lap times for the safety car period
170
+ # (This is a simplified assumption)
171
+
172
+ # Scenario 1: Pit now
173
+ pit_now_time = np.mean([self.run_strategy_simulation(pit_now_strategy, safety_car_lap=current_lap) for _ in range(self.num_simulations)])
174
+
175
+
176
+ # Scenario 2: Stay out
177
+ stay_out_time = np.mean([self.run_strategy_simulation(stay_out_strategy, safety_car_lap=current_lap) for _ in range(self.num_simulations)])
178
+
179
+ print(f"Pit Now: {pit_now_time:.2f}s")
180
+ print(f"Stay Out: {stay_out_time:.2f}s")
181
+
182
+ if pit_now_time < stay_out_time:
183
+ print("Recommendation: PIT NOW")
184
+ else:
185
+ print("Recommendation: STAY OUT")
186
+
187
+ return pit_now_time, stay_out_time
mcp_server/pipeline/__pycache__/pipeline.cpython-313.pyc ADDED
Binary file (4.39 kB). View file
 
mcp_server/pipeline/evaluator.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from tfx.dsl.component.experimental.decorators import component
4
+ from tfx.dsl.component.experimental.annotations import InputArtifact, OutputArtifact
5
+ from tfx.types.standard_artifacts import Model, ModelBlessing
6
+
7
+ # Define the path to the metrics file produced by the Trainer
8
+ METRICS_FILE = "evaluation_metrics.json"
9
+
10
+ # Define the quality threshold for the model
11
+ # In a real-world scenario, this would be more sophisticated.
12
+ MSE_THRESHOLD = 5.0 # Example threshold
13
+
14
+ @component
15
+ def CustomEvaluator(
16
+ model: InputArtifact[Model],
17
+ blessing: OutputArtifact[ModelBlessing]
18
+ ):
19
+ """
20
+ A custom TFX component that evaluates the trained models and blesses them
21
+ if they meet the quality threshold.
22
+ """
23
+ model_dir = model.uri
24
+ metrics_path = os.path.join(model_dir, METRICS_FILE)
25
+
26
+ if not os.path.exists(metrics_path):
27
+ raise FileNotFoundError(f"Metrics file not found at {metrics_path}")
28
+
29
+ with open(metrics_path, 'r') as f:
30
+ metrics = json.load(f)
31
+
32
+ # For this example, we'll check if the average MSE is below the threshold.
33
+ avg_mse = sum(metrics.values()) / len(metrics)
34
+ print(f"Average MSE: {avg_mse}")
35
+
36
+ if avg_mse < MSE_THRESHOLD:
37
+ print("Model passed quality threshold. Blessing model.")
38
+ blessing.uri = os.path.join(model.uri, "blessed")
39
+ blessing.set_int_custom_property("blessed", 1)
40
+ # Create an empty file to signify the blessing
41
+ with open(blessing.uri, "w") as f:
42
+ f.write("")
43
+ else:
44
+ print("Model did not pass quality threshold. Not blessing model.")
45
+ blessing.uri = os.path.join(model.uri, "not_blessed")
46
+ blessing.set_int_custom_property("blessed", 0)
47
+ # Create an empty file to signify the rejection
48
+ with open(blessing.uri, "w") as f:
49
+ f.write("")
mcp_server/pipeline/module.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import joblib
4
+ import pandas as pd
5
+ from typing import List, Text
6
+
7
+ from sklearn.ensemble import RandomForestRegressor
8
+ from sklearn.linear_model import LinearRegression
9
+ from sklearn.metrics import mean_squared_error
10
+
11
+ import tensorflow as tf
12
+ from tensorflow_transform.tf_metadata import schema_utils
13
+
14
+ from tfx import v1 as tfx
15
+ from tfx_bsl.public import tfxio
16
+
17
+ # --- Feature and Target Definitions ---
18
+
19
+ TIRE_FEATURES = ['lap', 'accx_can', 'accy_can', 'Steering_Angle']
20
+ FUEL_FEATURES = ['nmot', 'aps']
21
+ PACE_FEATURES = ['speed', 'gear', 'nmot', 'aps', 'pbrake_f', 'pbrake_r',
22
+ 'accx_can', 'accy_can', 'Steering_Angle', 'traffic']
23
+
24
+ TIRE_TARGET = 'lap_time'
25
+ FUEL_TARGET = 'fuel_consumption'
26
+ PACE_TARGET = 'relative_pace'
27
+
28
+ ALL_FEATURES = list(set(TIRE_FEATURES + FUEL_FEATURES + PACE_FEATURES))
29
+ ALL_TARGETS = [TIRE_TARGET, FUEL_TARGET, PACE_TARGET]
30
+
31
+ METRICS_FILE = "evaluation_metrics.json"
32
+
33
+ # --- Data Loading and Conversion ---
34
+
35
+ def _input_fn(file_pattern: List[Text],
36
+ data_accessor: tfx.components.DataAccessor,
37
+ schema: tfx.proto.Schema,
38
+ batch_size: int = 200) -> tf.data.Dataset:
39
+ """Generates a dataset from TFX IO."""
40
+ return data_accessor.tf_dataset_factory(
41
+ file_pattern,
42
+ tfxio.TensorFlowDatasetOptions(batch_size=batch_size, label_key=PACE_TARGET),
43
+ schema=schema).repeat()
44
+
45
+ def _get_raw_feature_spec(schema):
46
+ """Generates a raw feature spec from the schema."""
47
+ return schema_utils.schema_as_feature_spec(schema).feature_spec
48
+
49
+ def _dataset_to_pandas(dataset: tf.data.Dataset, schema: tfx.proto.Schema) -> pd.DataFrame:
50
+ """Converts a tf.data.Dataset to a pandas DataFrame."""
51
+ raw_feature_spec = _get_raw_feature_spec(schema)
52
+ parsed_features = []
53
+ # Take a finite number of records for conversion
54
+ for tf_example_record in dataset.take(1000):
55
+ parsed_record = tf.io.parse_example(tf_example_record, raw_feature_spec)
56
+ parsed_features.append({
57
+ key: val.numpy().squeeze()
58
+ for key, val in parsed_record.items()
59
+ })
60
+ return pd.DataFrame(parsed_features)
61
+
62
+ # --- Model Training and Evaluation Logic ---
63
+
64
+ def _train_and_evaluate_model(model, train_df: pd.DataFrame, eval_df: pd.DataFrame,
65
+ features: List[Text], target: Text, model_path: Text):
66
+ """Helper function to train, evaluate, and save a scikit-learn model."""
67
+ X_train = train_df[features]
68
+ y_train = train_df[target]
69
+ X_eval = eval_df[features]
70
+ y_eval = eval_df[target]
71
+
72
+ model.fit(X_train, y_train)
73
+ predictions = model.predict(X_eval)
74
+ mse = mean_squared_error(y_eval, predictions)
75
+ print(f"Model for '{target}' MSE: {mse}")
76
+
77
+ joblib.dump(model, model_path)
78
+ print(f"Model for '{target}' saved to {model_path}")
79
+ return mse
80
+
81
+ # --- TFX Trainer run_fn ---
82
+
83
+ def run_fn(fn_args: tfx.components.FnArgs):
84
+ """Train the three models and evaluate them."""
85
+ schema = tfx.utils.parse_pbtxt_file(fn_args.schema_path, tfx.proto.Schema())
86
+
87
+ # 1. Load and convert train and eval data
88
+ train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor, schema)
89
+ eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor, schema)
90
+
91
+ train_df = _dataset_to_pandas(train_dataset, schema)
92
+ eval_df = _dataset_to_pandas(eval_dataset, schema)
93
+
94
+ # Simple data imputation
95
+ for col in ALL_FEATURES + ALL_TARGETS:
96
+ if col not in train_df.columns:
97
+ train_df[col] = 0.0
98
+ if col not in eval_df.columns:
99
+ eval_df[col] = 0.0
100
+ train_df.fillna(0, inplace=True)
101
+ eval_df.fillna(0, inplace=True)
102
+
103
+ # 2. Define models
104
+ tire_model = LinearRegression()
105
+ fuel_model = LinearRegression()
106
+ pace_model = RandomForestRegressor(n_estimators=100, random_state=42)
107
+
108
+ # 3. Define paths for saved models
109
+ tire_model_path = os.path.join(fn_args.serving_model_dir, "tire_degradation_model.pkl")
110
+ fuel_model_path = os.path.join(fn_args.serving_model_dir, "fuel_consumption_model.pkl")
111
+ pace_model_path = os.path.join(fn_args.serving_model_dir, "pace_prediction_model.pkl")
112
+
113
+ # 4. Train and evaluate each model
114
+ tire_mse = _train_and_evaluate_model(tire_model, train_df, eval_df, TIRE_FEATURES, TIRE_TARGET, tire_model_path)
115
+ fuel_mse = _train_and_evaluate_model(fuel_model, train_df, eval_df, FUEL_FEATURES, FUEL_TARGET, fuel_model_path)
116
+ pace_mse = _train_and_evaluate_model(pace_model, train_df, eval_df, PACE_FEATURES, PACE_TARGET, pace_model_path)
117
+
118
+ # 5. Save evaluation metrics
119
+ metrics = {
120
+ 'tire_degradation_model_mse': tire_mse,
121
+ 'fuel_consumption_model_mse': fuel_mse,
122
+ 'pace_prediction_model_mse': pace_mse
123
+ }
124
+ metrics_path = os.path.join(fn_args.serving_model_dir, METRICS_FILE)
125
+ with open(metrics_path, 'w') as f:
126
+ json.dump(metrics, f)
127
+ print(f"Evaluation metrics saved to {metrics_path}")
mcp_server/pipeline/pipeline.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Text
3
+ from absl import logging
4
+
5
+ from tfx import v1 as tfx
6
+ from tfx.orchestration import pipeline
7
+ from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
8
+
9
+ from mcp_server.pipeline.evaluator import CustomEvaluator
10
+
11
+ # --- Pipeline Configuration ---
12
+
13
+ PIPELINE_NAME = "mcp-server-pipeline"
14
+ PIPELINE_ROOT = os.path.join('mcp_server', 'pipelines', PIPELINE_NAME)
15
+ DATA_ROOT = os.path.join('mcp_server', 'unzipped_data', 'barber-motorsports-park', 'barber')
16
+ MODULE_FILE = os.path.join('mcp_server', 'pipeline', 'module.py')
17
+ SERVING_MODEL_DIR = os.path.join('mcp_server', 'serving_model', PIPELINE_NAME)
18
+
19
+ METADATA_PATH = os.path.join('mcp_server', 'metadata', PIPELINE_NAME, 'metadata.db')
20
+
21
+ # --- Pipeline Definition ---
22
+
23
+ def create_pipeline(
24
+ pipeline_name: Text,
25
+ pipeline_root: Text,
26
+ data_root: Text,
27
+ module_file: Text,
28
+ serving_model_dir: Text,
29
+ metadata_path: Text,
30
+ ) -> pipeline.Pipeline:
31
+ """Creates a TFX pipeline."""
32
+
33
+ # 1. Data Ingestion (ExampleGen)
34
+ output_config = tfx.proto.Output(
35
+ split_config=tfx.proto.SplitConfig(splits=[
36
+ tfx.proto.SplitConfig.Split(name='train', hash_buckets=4),
37
+ tfx.proto.SplitConfig.Split(name='eval', hash_buckets=1)
38
+ ])
39
+ )
40
+ example_gen = tfx.components.CsvExampleGen(
41
+ input_base=data_root,
42
+ output_config=output_config
43
+ )
44
+
45
+ # 2. Data Validation (StatisticsGen, SchemaGen)
46
+ statistics_gen = tfx.components.StatisticsGen(
47
+ examples=example_gen.outputs['examples']
48
+ )
49
+
50
+ schema_gen = tfx.components.SchemaGen(
51
+ statistics=statistics_gen.outputs['statistics'],
52
+ infer_feature_shape=False
53
+ )
54
+
55
+ # 3. Model Training (Trainer)
56
+ trainer = tfx.components.Trainer(
57
+ module_file=module_file,
58
+ examples=example_gen.outputs['examples'],
59
+ schema=schema_gen.outputs['schema'],
60
+ custom_executor_spec=tfx.extensions.google_cloud_ai_platform.ExecutorSpec(
61
+ python_executor=tfx.components.trainer.executor.GenericExecutor
62
+ )
63
+ )
64
+
65
+ # 4. Model Evaluation (CustomEvaluator)
66
+ evaluator = CustomEvaluator(
67
+ model=trainer.outputs['model']
68
+ )
69
+
70
+ # 5. Model Pushing (Pusher) - Conditional on Blessing
71
+ pusher = tfx.components.Pusher(
72
+ model=trainer.outputs['model'],
73
+ model_blessing=evaluator.outputs['blessing'], # This is the critical connection
74
+ push_destination=tfx.proto.PushDestination(
75
+ filesystem=tfx.proto.PushDestination.Filesystem(
76
+ base_directory=serving_model_dir
77
+ )
78
+ )
79
+ )
80
+
81
+ components = [
82
+ example_gen,
83
+ statistics_gen,
84
+ schema_gen,
85
+ trainer,
86
+ evaluator,
87
+ pusher,
88
+ ]
89
+
90
+ return pipeline.Pipeline(
91
+ pipeline_name=pipeline_name,
92
+ pipeline_root=pipeline_root,
93
+ components=components,
94
+ metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config(metadata_path),
95
+ enable_cache=True,
96
+ )
97
+
98
+ if __name__ == '__main__':
99
+ logging.set_verbosity(logging.INFO)
100
+
101
+ # To run this pipeline, you would typically use a TFX orchestrator like
102
+ # Kubeflow or a local runner. The BeamDagRunner is for local execution.
103
+ # We are including this to make the pipeline runnable directly.
104
+ pipeline = create_pipeline(
105
+ pipeline_name=PIPELINE_NAME,
106
+ pipeline_root=PIPELINE_ROOT,
107
+ data_root=DATA_ROOT,
108
+ module_file=MODULE_FILE,
109
+ serving_model_dir=SERVING_MODEL_DIR,
110
+ metadata_path=METADATA_PATH,
111
+ )
112
+ BeamDagRunner().run(pipeline)
mcp_server/prepare_data.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+
4
+ from data_handling.data_downloader import download_and_move_files
5
+ from data_handling.data_loader import unzip_data
6
+ from data_handling.telemetry_parser import parse_telemetry
7
+
8
+ def main():
9
+ """
10
+ Prepares the data for the TFX pipeline by downloading, unzipping,
11
+ parsing, and saving it as a CSV file.
12
+ """
13
+ script_dir = os.path.dirname(os.path.abspath(__file__))
14
+ data_dir = os.path.join(script_dir, 'data')
15
+ unzipped_data_dir = os.path.join(script_dir, 'unzipped_data')
16
+ csv_output_dir = os.path.join(unzipped_data_dir, 'barber-motorsports-park', 'barber')
17
+
18
+ # 1. Download and unzip data if not already present
19
+ if not os.path.exists(data_dir):
20
+ print("Downloading data...")
21
+ download_and_move_files(data_dir)
22
+ else:
23
+ print("Data directory already exists.")
24
+
25
+ if not os.path.exists(unzipped_data_dir):
26
+ print("Unzipping data...")
27
+ unzip_data(data_dir, unzipped_data_dir)
28
+ else:
29
+ print("Unzipped data directory already exists.")
30
+
31
+ # 2. Parse the race data
32
+ print("Parsing telemetry data...")
33
+ race_data = parse_telemetry(csv_output_dir)
34
+
35
+ if race_data is not None:
36
+ # 3. Save the parsed data to a CSV file
37
+ csv_output_path = os.path.join(csv_output_dir, 'data.csv')
38
+ print(f"Saving parsed data to {csv_output_path}...")
39
+ race_data.to_csv(csv_output_path, index=False)
40
+ print("Data preparation complete.")
41
+ else:
42
+ print("Failed to parse race data. Data preparation failed.")
43
+
44
+ if __name__ == "__main__":
45
+ main()
mcp_server/pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "mcp-server"
3
+ version = "0.1.0"
4
+ description = "MCP Server for the Hack the Track Monte Carlo Simulation"
5
+ requires-python = ">=3.11,<3.13"
6
+ dependencies = [
7
+ "google-adk>=1.18.0",
8
+ "numpy",
9
+ "pandas",
10
+ "scikit-learn>=1.7.2",
11
+ "joblib",
12
+ "requests"
13
+ ]
14
+
15
+ [project.optional-dependencies]
16
+ test = [
17
+ "pytest>=8.4.2",
18
+ ]
19
+ dev = [
20
+ "matplotlib>=3.8.0",
21
+ ]
22
+
23
+ [tool.setuptools]
24
+ py-modules = ["main", "monte_carlo_simulation"]
25
+
26
+ [tool.setuptools.packages.find]
27
+ include = ["data_handling*", "models*"]
mcp_server/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ pandas
2
+ numpy
3
+ scikit-learn
4
+ joblib
5
+ requests
6
+ fastmcp==2.12.4
7
+ google-cloud-storage
mcp_server/run_server_local.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import os
3
+ import sys
4
+
5
+ # Add the current directory to sys.path so we can import modules
6
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
7
+
8
+ from mcp import types as mcp_types
9
+ from mcp.server.lowlevel import Server
10
+ from mcp.server.models import InitializationOptions
11
+ import mcp.server.stdio as mcp_stdio
12
+
13
+ from google.adk.tools.function_tool import FunctionTool
14
+ from google.adk.tools.mcp_tool import adk_to_mcp_tool_type
15
+
16
+ from mcp_server.data_handling.telemetry_parser import parse_telemetry
17
+ from mcp_server.monte_carlo_simulation import MonteCarloSimulation
18
+
19
+ # --- Globals ---
20
+ app = Server(name="MonteCarloServer")
21
+ adk_tool_to_expose = None
22
+ mc_simulation = None
23
+
24
+ # --- MCP Server Handlers ---
25
+ @app.list_tools()
26
+ async def list_mcp_tools() -> list[mcp_types.Tool]:
27
+ """MCP handler to list tools this server exposes."""
28
+ # print("MCP Server: Received list_tools request.", file=sys.stderr)
29
+ if adk_tool_to_expose:
30
+ mcp_tool_schema = adk_to_mcp_tool_type(adk_tool_to_expose)
31
+ # print(f"MCP Server: Advertising tool: {mcp_tool_schema.name}", file=sys.stderr)
32
+ return [mcp_tool_schema]
33
+ return []
34
+
35
+ @app.call_tool()
36
+ async def call_mcp_tool(
37
+ name: str, arguments: dict
38
+ ) -> list[mcp_types.Content]:
39
+ """MCP handler to execute a tool call requested by an MCP client."""
40
+ print(f"MCP Server: Received call_tool request for '{name}' with args: {arguments}", file=sys.stderr)
41
+ if adk_tool_to_expose and name == adk_tool_to_expose.name:
42
+ # We are not passing any arguments to the function, so we can just call it.
43
+ result = adk_tool_to_expose.func()
44
+ return [mcp_types.Content(text=f"Tool {name} completed with result: {result}")]
45
+ return [mcp_types.Content(text=f"Tool {name} not found.")]
46
+
47
+
48
+ # --- Server Initialization ---
49
+ async def run_mcp_stdio_server():
50
+ """Runs the MCP server as a stdio server."""
51
+ async with mcp_stdio.stdio_server() as (read_stream, write_stream):
52
+ init_options = InitializationOptions(
53
+ server_name="MonteCarloServer",
54
+ server_version="0.1.0",
55
+ capabilities={},
56
+ )
57
+ await app.run(read_stream, write_stream, init_options)
58
+
59
+
60
+ def main():
61
+ """Initializes and runs the MCP server locally."""
62
+ global adk_tool_to_expose, mc_simulation
63
+
64
+ script_dir = os.path.dirname(os.path.abspath(__file__))
65
+
66
+ # 1. Use local trained models
67
+ model_dir = os.path.join(script_dir, 'trained_models')
68
+ if not os.path.exists(model_dir):
69
+ print(f"Model directory not found: {model_dir}", file=sys.stderr)
70
+ return
71
+
72
+ print(f"Loading models from: {model_dir}", file=sys.stderr)
73
+
74
+ # 2. Load the race data
75
+ race_data_dir = os.path.join(script_dir, 'unzipped_data', 'barber-motorsports-park', 'barber')
76
+ if not os.path.exists(race_data_dir):
77
+ print(f"Race data directory not found: {race_data_dir}. Please run prepare_data.py first.", file=sys.stderr)
78
+ return
79
+
80
+ race_data = parse_telemetry(race_data_dir)
81
+
82
+ if race_data is None:
83
+ print("Failed to load race data. Exiting.", file=sys.stderr)
84
+ return
85
+
86
+ # 3. Initialize the Monte Carlo simulation
87
+ mc_simulation = MonteCarloSimulation(race_data, model_dir=model_dir)
88
+
89
+ # 4. Create the ADK tool to expose
90
+ adk_tool_to_expose = FunctionTool(
91
+ func=mc_simulation.find_optimal_pit_window,
92
+ )
93
+
94
+ # 5. Run the MCP server
95
+ print("Starting MCP Server...", file=sys.stderr)
96
+ try:
97
+ asyncio.run(run_mcp_stdio_server())
98
+ except KeyboardInterrupt:
99
+ print("\nMCP Server (stdio) stopped by user.", file=sys.stderr)
100
+ except Exception as e:
101
+ print(f"MCP Server (stdio) encountered an error: \n{e}", file=sys.stderr)
102
+
103
+ print("MCP Server (stdio) process exiting.", file=sys.stderr)
104
+
105
+ if __name__ == "__main__":
106
+ main()
mcp_server/tests/test_prepare_data.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import pytest
4
+ from unittest.mock import patch, MagicMock
5
+
6
+ # Since the prepare_data script is not in a package, we need to add its directory to the path
7
+ import sys
8
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
9
+
10
+ from prepare_data import main as prepare_data_main
11
+
12
+ @pytest.fixture
13
+ def temp_data_dir(tmp_path):
14
+ """Creates a temporary data directory structure for testing."""
15
+ mcp_server_dir = tmp_path / "mcp_server"
16
+ data_dir = mcp_server_dir / "data"
17
+ unzipped_dir = mcp_server_dir / "unzipped_data" / "barber-motorsports-park" / "barber"
18
+ unzipped_dir.mkdir(parents=True, exist_ok=True)
19
+ data_dir.mkdir(parents=True, exist_ok=True)
20
+
21
+ # Create a dummy analysis and telemetry file
22
+ analysis_data = {
23
+ 'LAP_NUMBER': [1], 'LAP_TIME': ['1:30.5'], 'DRIVER_NUMBER': [1]
24
+ }
25
+ telemetry_data = {
26
+ 'timestamp': [100], 'lap': [1], 'vehicle_id': [1],
27
+ 'telemetry_name': ['speed'], 'telemetry_value': [150]
28
+ }
29
+ pd.DataFrame(analysis_data).to_csv(unzipped_dir / "Test_Analysis.csv", index=False, sep=';')
30
+ pd.DataFrame(telemetry_data).to_csv(unzipped_dir / "Test_Telemetry.csv", index=False)
31
+
32
+ return mcp_server_dir
33
+
34
+
35
+ @patch('prepare_data.download_and_move_files')
36
+ @patch('prepare_data.unzip_data')
37
+ @patch('prepare_data.parse_telemetry')
38
+ def test_prepare_data_main(mock_parse_telemetry, mock_unzip_data, mock_download_files, temp_data_dir):
39
+ """Tests the main data preparation script."""
40
+ # Mock the functions that download and unzip data
41
+ mock_download_files.return_value = None
42
+ mock_unzip_data.return_value = None
43
+
44
+ # Mock the telemetry parser to return a simple DataFrame
45
+ mock_parsed_df = pd.DataFrame({'lap': [1], 'speed': [150]})
46
+ mock_parse_telemetry.return_value = mock_parsed_df
47
+
48
+ # We need to change the working directory so the script can find the data
49
+ original_cwd = os.getcwd()
50
+ os.chdir(temp_data_dir)
51
+
52
+ # Run the script
53
+ with patch('prepare_data.script_dir', '.'):
54
+ prepare_data_main()
55
+
56
+
57
+ # Check that the output CSV was created
58
+ output_csv_path = "unzipped_data/barber-motorsports-park/barber/data.csv"
59
+ assert os.path.exists(output_csv_path)
60
+
61
+ # Check the content of the CSV
62
+ result_df = pd.read_csv(output_csv_path)
63
+ pd.testing.assert_frame_equal(result_df, mock_parsed_df)
64
+
65
+ # Clean up
66
+ os.chdir(original_cwd)
mcp_server/tests/test_trainer_module.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import joblib
4
+ import pytest
5
+ import tempfile
6
+ import pandas as pd
7
+ from unittest.mock import MagicMock
8
+
9
+ # Add the pipeline directory to the path to import the module
10
+ import sys
11
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'pipeline')))
12
+
13
+ from module import run_fn
14
+
15
+ # --- Mock Objects to replace TFX dependencies ---
16
+
17
+ class MockFnArgs:
18
+ """A mock class to simulate TFX's FnArgs."""
19
+ def __init__(self, train_files, eval_files, schema_path, serving_model_dir, data_accessor):
20
+ self.train_files = train_files
21
+ self.eval_files = eval_files
22
+ self.schema_path = schema_path
23
+ self.serving_model_dir = serving_model_dir
24
+ self.data_accessor = data_accessor
25
+
26
+ class MockDataAccessor:
27
+ """A mock class to simulate the TFX DataAccessor."""
28
+ def tf_dataset_factory(self, file_pattern, tfxio_options, schema):
29
+ return []
30
+
31
+ # --- Test Fixtures ---
32
+
33
+ @pytest.fixture
34
+ def fn_args():
35
+ """Creates a mock FnArgs object for testing the Trainer's run_fn."""
36
+ with tempfile.TemporaryDirectory() as temp_dir:
37
+ serving_model_dir = os.path.join(temp_dir, 'serving_model')
38
+ os.makedirs(serving_model_dir, exist_ok=True)
39
+ schema_path = os.path.join(temp_dir, 'schema.pbtxt')
40
+ with open(schema_path, 'w') as f:
41
+ f.write('')
42
+
43
+ args = MockFnArgs(
44
+ train_files=['train_dir'],
45
+ eval_files=['eval_dir'],
46
+ schema_path=schema_path,
47
+ serving_model_dir=serving_model_dir,
48
+ data_accessor=MockDataAccessor(),
49
+ )
50
+ yield args
51
+
52
+ # --- Tests ---
53
+
54
+ def test_run_fn_creates_models_and_metrics(fn_args, monkeypatch):
55
+ """Tests that the run_fn trains and saves all models and the metrics file."""
56
+ dummy_data = {
57
+ 'lap': [1, 2, 3], 'accx_can': [0.1, 0.2, 0.3], 'accy_can': [0.1, 0.2, 0.3],
58
+ 'Steering_Angle': [10, 15, 20], 'lap_time': [90, 91, 92],
59
+ 'nmot': [8000, 8100, 8200], 'aps': [50, 55, 60], 'fuel_consumption': [0.5, 0.55, 0.6],
60
+ 'speed': [150, 151, 152], 'gear': [4, 4, 5], 'pbrake_f': [0, 0, 1], 'pbrake_r': [0, 0, 0],
61
+ 'traffic': [0, 1, 0], 'relative_pace': [1.0, 1.1, 1.2]
62
+ }
63
+ dummy_df = pd.DataFrame(dummy_data)
64
+ monkeypatch.setattr('module._dataset_to_pandas', lambda a, b: dummy_df)
65
+
66
+ run_fn(fn_args)
67
+
68
+ assert os.path.exists(os.path.join(fn_args.serving_model_dir, "tire_degradation_model.pkl"))
69
+ assert os.path.exists(os.path.join(fn_args.serving_model_dir, "fuel_consumption_model.pkl"))
70
+ assert os.path.exists(os.path.join(fn_args.serving_model_dir, "pace_prediction_model.pkl"))
71
+
72
+ metrics_path = os.path.join(fn_args.serving_model_dir, "evaluation_metrics.json")
73
+ assert os.path.exists(metrics_path)
74
+
75
+ with open(metrics_path, 'r') as f:
76
+ metrics = json.load(f)
77
+ assert 'tire_degradation_model_mse' in metrics
78
+ assert 'fuel_consumption_model_mse' in metrics
79
+ assert 'pace_prediction_model_mse' in metrics
mcp_server/trained_models/fuel_consumption_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9589f7bdd46e0053d08b8ded1a25d94946de384a8e5e86c67302e3245bcd006b
3
+ size 857
mcp_server/trained_models/pace_prediction_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9102c6190fe6a55461b35f760943cd0397602ad2b1177ac268c0bc9b114bcce
3
+ size 40065
mcp_server/trained_models/tire_degradation_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d1c10157ac28bdb6c9993ed234c98009289866a3d9fe45c5df5178411e1a712
3
+ size 921
mcp_server/unzipped_data/barber-motorsports-park/barber/.DS_Store ADDED
Binary file (6.15 kB). View file
 
mcp_server/unzipped_data/barber-motorsports-park/barber/03_Provisional Results_Race 1_Anonymized.CSV ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5278b828d7c88bb505b9c727f442140488a077451089d49594e02cbaab45068d
3
+ size 2462
mcp_server/unzipped_data/barber-motorsports-park/barber/03_Provisional Results_Race 2_Anonymized.CSV ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:038a61038a8b2377561a3b0afd66fb385684a4d1be81e7705c06554d651462b3
3
+ size 2455
mcp_server/unzipped_data/barber-motorsports-park/barber/03_Results GR Cup Race 2 Official_Anonymized.CSV ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:038a61038a8b2377561a3b0afd66fb385684a4d1be81e7705c06554d651462b3
3
+ size 2455
mcp_server/unzipped_data/barber-motorsports-park/barber/05_Results by Class GR Cup Race 1 Official_Anonymized.CSV ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b02f2186e58956c26a6c793436ddb2a0657efdb083d43a3143106c0ad0b37385
3
+ size 1611
mcp_server/unzipped_data/barber-motorsports-park/barber/23_AnalysisEnduranceWithSections_Race 1_Anonymized.CSV ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11abe9a38bfee35412ae8243cbf9a852ca120c680b161a892b1f4d8fda81f566
3
+ size 142657
mcp_server/unzipped_data/barber-motorsports-park/barber/99_Best 10 Laps By Driver_Race 1_Anonymized.CSV ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4132edff314583f21bfd7deda0b693b4476a66a6215bb389ff934164d4d94ab4
3
+ size 3547