IDS75912 commited on
Commit
89af506
·
1 Parent(s): 927c939

initial commit

Browse files
Files changed (5) hide show
  1. .dockerignore +11 -0
  2. Dockerfile +31 -0
  3. compose.yaml +9 -0
  4. main.py +145 -0
  5. requirements.txt +90 -0
.dockerignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore the AI model directory
2
+ animal-classification/
3
+
4
+ # Ignore Python virtual environments and cache
5
+ venv/
6
+ __pycache__/
7
+ *.pyc
8
+
9
+ # Ignore Git and other development artifacts
10
+ .git/
11
+ .vscode/
Dockerfile ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --- Builder Stage ---
2
+ # Use a full Python image to build dependencies
3
+ FROM python:3.12 as builder
4
+
5
+ WORKDIR /app
6
+
7
+ # Install dependencies into a virtual environment
8
+ RUN python -m venv /opt/venv
9
+ ENV PATH="/opt/venv/bin:$PATH"
10
+
11
+ COPY requirements.txt .
12
+ RUN pip install --no-cache-dir -r requirements.txt
13
+
14
+ # --- Runner Stage ---
15
+ # Use a slim image for the final application
16
+ FROM python:3.12-slim
17
+
18
+ WORKDIR /app
19
+
20
+ # Copy the virtual environment from the builder stage
21
+ COPY --from=builder /opt/venv /opt/venv
22
+
23
+ # Copy the application code
24
+ COPY . .
25
+
26
+ # Set the path to use the virtual environment
27
+ ENV PATH="/opt/venv/bin:$PATH"
28
+
29
+ # Expose the port and run the app
30
+ EXPOSE 8000
31
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
compose.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+ services:
3
+ app:
4
+ build: .
5
+ ports:
6
+ - "8000:8000"
7
+ volumes:
8
+ # Mount the local model directory into the container at the path the app expects
9
+ - ./animal-classification:/app/animal-classification
main.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uvicorn
2
+
3
+
4
+ import fastapi
5
+ from fastapi import FastAPI, Request
6
+ from fastapi.middleware.cors import CORSMiddleware
7
+ from fastapi import File, UploadFile
8
+ import numpy as np
9
+ from PIL import Image
10
+
11
+
12
+ from typing import Any, Dict
13
+ import os
14
+ import pkgutil
15
+ print('starlette', __import__('starlette').__version__)
16
+
17
+ from huggingface_hub import hf_hub_download
18
+ from typing import Any, Dict
19
+
20
+ import tensorflow as tf
21
+ print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
22
+ if tf.config.list_physical_devices('GPU'):
23
+ print(tf.config.list_physical_devices('GPU')[0].device_type, tf.config.list_physical_devices('GPU')[0].name )
24
+
25
+ from tensorflow import keras
26
+ TF_AVAILABLE = True
27
+ print("tensorflow version: ", tf.__version__)
28
+ print("keras version: ", keras.__version__)
29
+
30
+
31
+ # starlette is a FastAPI dependency; import if available
32
+ try:
33
+ import starlette
34
+ except Exception:
35
+ starlette = None
36
+
37
+ # TensorFlow can be large or absent in some envs; guard the import so
38
+ # importing this module doesn't crash tests or other tooling.
39
+ try:
40
+ import tensorflow as tf
41
+ from tensorflow import keras
42
+ TF_AVAILABLE = True
43
+ print("tensorflow version: ", tf.__version__)
44
+ print("keras version: ", keras.__version__)
45
+ except Exception:
46
+ tf = None
47
+ keras = None
48
+ TF_AVAILABLE = False
49
+ print("tensorflow not available")
50
+
51
+
52
+ app = FastAPI(title="1.3 - AI Model Deployment")
53
+ ''' browser: http://localhost:8000/docs'''
54
+
55
+ from fastapi.middleware.cors import CORSMiddleware
56
+ app.add_middleware(
57
+ CORSMiddleware,
58
+ allow_origins=["*"],
59
+ allow_credentials=True,
60
+ allow_methods=["*"],
61
+ allow_headers=["*"],
62
+ )
63
+
64
+
65
+
66
+
67
+
68
+
69
+ ANIMALS = ['Cat', 'Dog', 'Panda'] # Animal names here, these represent the labels of the images that we trained our model on.
70
+
71
+ # 1) download your SavedModel from the Hub:
72
+ # so refer to the repository where your model is, not the one for the space!
73
+ repo_id = "IDS75912/masterclass-2025"
74
+ hf_hub_download(repo_id, filename="config.json", repo_type="model", local_dir="./model")
75
+ hf_hub_download(repo_id, filename="metadata.json", repo_type="model", local_dir="./model")
76
+ hf_hub_download(repo_id, filename="model.weights.h5", repo_type="model", local_dir="./model")
77
+
78
+ # 2) load it
79
+ model = tf.keras.models.load_model("./model")
80
+
81
+
82
+
83
+
84
+
85
+ @app.post('/upload/image')
86
+ async def uploadImage(img: UploadFile = File(...)):
87
+ original_image = Image.open(img.file) # Read the bytes and process as an image
88
+ if original_image.mode == 'RGBA':
89
+ original_image = original_image.convert('RGB')
90
+ resized_image = original_image.resize((64, 64)) # Resize
91
+ images_to_predict = np.expand_dims(np.array(resized_image), axis=0) # Our AI Model wanted a list of images, but we only have one, so we expand it's dimension
92
+ predictions = model.predict(images_to_predict) # The result will be a list with predictions in the one-hot encoded format: [ [0 1 0] ]
93
+ prediction_probabilities = predictions
94
+ classifications = prediction_probabilities.argmax(axis=1) # We try to fetch the index of the highest value in this list [ [1] ]
95
+
96
+ return ANIMALS[classifications.tolist()[0]] # Fetch the first item in our classifications array, format it as a list first, result will be e.g.: "Dog"
97
+
98
+ @app.get("/")
99
+ def read_root() -> Dict[str, Any]:
100
+ """Root endpoint."""
101
+ return {"message": "Hello from FastAPI in the 'aai9' conda env"}
102
+
103
+
104
+ # @app.post("/echo")
105
+ # async def echo(payload: Dict[str, Any]) -> Dict[str, Any]:
106
+ # """Echo back the received JSON payload."""
107
+ # return {"echo": payload}
108
+
109
+
110
+ @app.get("/version")
111
+ def versions() -> Dict[str, Any]:
112
+ """Return key package versions and whether TensorFlow is available."""
113
+ return {
114
+ "fastapi": fastapi.__version__,
115
+ "starlette": getattr(starlette, "__version__", None),
116
+ "tensorflow_available": TF_AVAILABLE,
117
+ "tensorflow_version": getattr(tf, "__version__", None),
118
+ }
119
+
120
+
121
+ @app.get("/predict")
122
+ def predict_stub() -> Dict[str, Any]:
123
+ """A tiny predict stub that demonstrates how to expose model inference.
124
+
125
+ If TensorFlow isn't available or no model is loaded this returns a helpful
126
+ message.
127
+ """
128
+ if not TF_AVAILABLE or model is None:
129
+ return {
130
+ "prediction": "N/A",
131
+ "info": "TensorFlow not available or model not loaded.",
132
+ }
133
+ # This is a stub, so we're not doing a real prediction
134
+ return {"prediction": "stub, we're not doing a real prediction", "model_path": model_path}
135
+
136
+
137
+
138
+
139
+
140
+
141
+ if __name__ == "__main__":
142
+ # Run with: conda run -n gradio uvicorn main:app --reload
143
+ import uvicorn
144
+
145
+ uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
requirements.txt ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.3.1
2
+ annotated-types==0.6.0
3
+ anyio==4.10.0
4
+ asttokens==3.0.0
5
+ astunparse==1.6.3
6
+ bottleneck==1.4.2
7
+ certifi==2025.10.5
8
+ charset-normalizer==3.4.3
9
+ click==8.2.1
10
+ comm==0.2.3
11
+ debugpy==1.8.16
12
+ decorator==5.2.1
13
+ exceptiongroup==1.3.0
14
+ executing==2.2.1
15
+ fastapi==0.116.1
16
+ flatbuffers==25.9.23
17
+ gast==0.6.0
18
+ google-pasta==0.2.0
19
+ grpcio==1.75.1
20
+ h11==0.16.0
21
+ h5py==3.14.0
22
+ idna==3.10
23
+ importlib-metadata==8.7.0
24
+ ipykernel==6.30.1
25
+ ipython==9.6.0
26
+ ipython_pygments_lexers==1.1.1
27
+ jedi==0.19.2
28
+ jupyter-client==8.6.3
29
+ jupyter-core==5.8.1
30
+ keras==3.11.3
31
+ libclang==18.1.1
32
+ markupsafe==3.0.3
33
+ markdown==3.9
34
+ markdown-it-py==4.0.0
35
+ matplotlib-inline==0.1.7
36
+ mdurl==0.1.2
37
+ mkl-service==2.5.2
38
+ mkl_fft==1.3.11
39
+ mkl_random==1.2.8
40
+ ml-dtypes==0.5.3
41
+ namex==0.1.0
42
+ nest-asyncio==1.6.0
43
+ numexpr==2.11.0
44
+ numpy
45
+ opt-einsum==3.4.0
46
+ optree==0.17.0
47
+ packaging==25.0
48
+ pandas==2.3.3
49
+ parso==0.8.5
50
+ pexpect==4.9.0
51
+ pickleshare==0.7.5
52
+ pillow==11.3.0
53
+ pip==25.2
54
+ platformdirs==4.5.0
55
+ prompt-toolkit==3.0.52
56
+ protobuf==6.32.1
57
+ psutil==7.0.0
58
+ ptyprocess==0.7.0
59
+ pure_eval==0.2.3
60
+ pydantic==2.11.9
61
+ pydantic-core==2.33.2
62
+ pygments==2.19.2
63
+ python-dateutil==2.9.0post0
64
+ python-multipart==0.0.20
65
+ pytz==2025.2
66
+ pyzmq==27.1.0
67
+ requests==2.32.5
68
+ rich==14.2.0
69
+ setuptools==72.1.0
70
+ six==1.17.0
71
+ sniffio==1.3.0
72
+ stack_data==0.6.3
73
+ starlette==0.47.3
74
+ tensorboard==2.20.0
75
+ tensorboard-data-server==0.7.2
76
+ tensorflow==2.20.0
77
+ termcolor==3.1.0
78
+ tornado==6.5.1
79
+ traitlets==5.14.3
80
+ typing-extensions==4.15.0
81
+ typing-inspection==0.4.0
82
+ urllib3==2.5.0
83
+ uvicorn==0.35.0
84
+ wcwidth==0.2.14
85
+ werkzeug==3.1.3
86
+ wheel==0.45.1
87
+ wrapt==1.17.3
88
+ zipp==3.23.0
89
+ gradio
90
+ huggingface_hub