mutarisi commited on
Commit
641b34a
·
1 Parent(s): e014c8e

final update

Browse files
Files changed (4) hide show
  1. Dockerfile +19 -17
  2. lettersController.py +9 -9
  3. test_startup.py +0 -27
  4. wordsController.py +8 -15
Dockerfile CHANGED
@@ -1,35 +1,37 @@
1
- # Use Python 3.11, as indicated by your traceback
2
  FROM python:3.11-slim
3
 
 
 
4
  ENV MPLCONFIGDIR /tmp/.matplotlib
5
 
6
  # Explicitly force CPU and suppress CUDA warnings (if not using a GPU space)
7
  ENV CUDA_VISIBLE_DEVICES=""
8
- # Install system dependencies required by OpenCV (cv2)
9
- # The 'libgl1-mesa-glx' package provides libGL.so.1
 
 
 
 
 
 
10
  RUN apt-get update && \
11
  apt-get install -y libgl1 libglib2.0-0 libsm6 libxext6 && \
12
  rm -rf /var/lib/apt/lists/*
13
 
14
- # Set environment variables for better output and the internal port
15
- ENV PYTHONUNBUFFERED True
16
- ENV PORT 5000
17
-
18
- # Create the working directory inside the container
19
  WORKDIR /app
20
 
21
- # Copy requirements file and install dependencies first (for faster caching)
22
  COPY requirements.txt .
23
  RUN pip install --no-cache-dir -r requirements.txt
24
 
25
- # Copy the rest of the application code
26
  COPY . .
27
 
28
- # Expose the application port
29
- ENV PYTHONUNBUFFERED True
30
- EXPOSE 5000
31
 
32
- WORKDIR /app
33
- # Define the command to start the Uvicorn server
34
- # This runs your application: `app` instance in the file `app.py`
35
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # 1. Base Image Declaration (Corrected 'slim' tag)
2
  FROM python:3.11-slim
3
 
4
+ # 2. Environment Variables & Cleanup
5
+ # Suppress Matplotlib cache permission warning
6
  ENV MPLCONFIGDIR /tmp/.matplotlib
7
 
8
  # Explicitly force CPU and suppress CUDA warnings (if not using a GPU space)
9
  ENV CUDA_VISIBLE_DEVICES=""
10
+
11
+ # Ensure Python output is immediately available in logs
12
+ ENV PYTHONUNBUFFERED True
13
+
14
+ # Set the port required by Hugging Face Spaces/Uvicorn
15
+ ENV PORT 7860
16
+
17
+ # 3. System Dependencies (Required by OpenCV/cv2)
18
  RUN apt-get update && \
19
  apt-get install -y libgl1 libglib2.0-0 libsm6 libxext6 && \
20
  rm -rf /var/lib/apt/lists/*
21
 
22
+ # 4. Working Directory
 
 
 
 
23
  WORKDIR /app
24
 
25
+ # 5. Dependency Installation (Leverages Docker caching)
26
  COPY requirements.txt .
27
  RUN pip install --no-cache-dir -r requirements.txt
28
 
29
+ # 6. Application Code
30
  COPY . .
31
 
32
+ # 7. Port Documentation (EXPOSE is optional but good practice)
33
+ EXPOSE 7860
 
34
 
35
+ # 8. Define the command to start the Uvicorn server
36
+ # This runs the 'app' object in the 'app.py' module, listening on port 7860.
37
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
lettersController.py CHANGED
@@ -4,17 +4,17 @@ import pickle
4
  import tensorflow as tf
5
  import mediapipe as mp
6
 
7
- # lettersModel = tf.keras.models.load_model('ai_model/models/detectLettersModel.keras')
8
- # with open('ai_model/models/labelEncoder.pickle', 'rb') as f:
9
- # labelEncoder = pickle.load(f)
10
 
11
- # lettersModel2 = tf.keras.models.load_model('ai_model/jz_model/JZModel.keras')
12
- # with open('ai_model/jz_model/labelEncoder.pickle', 'rb') as f:
13
- # labelEncoder2 = pickle.load(f)
14
 
15
- # numbersModel = tf.keras.models.load_model('ai_model/models/detectNumbersModel.keras')
16
- # with open('ai_model/models/numLabelEncoder.pickle', 'rb') as f:
17
- # numLabelEncoder = pickle.load(f)
18
 
19
  sequenceNum = 20
20
  hands = mp.solutions.hands.Hands(static_image_mode=True)
 
4
  import tensorflow as tf
5
  import mediapipe as mp
6
 
7
+ lettersModel = tf.keras.models.load_model('ai_model/models/detectLettersModel.keras')
8
+ with open('ai_model/models/labelEncoder.pickle', 'rb') as f:
9
+ labelEncoder = pickle.load(f)
10
 
11
+ lettersModel2 = tf.keras.models.load_model('ai_model/jz_model/JZModel.keras')
12
+ with open('ai_model/jz_model/labelEncoder.pickle', 'rb') as f:
13
+ labelEncoder2 = pickle.load(f)
14
 
15
+ numbersModel = tf.keras.models.load_model('ai_model/models/detectNumbersModel.keras')
16
+ with open('ai_model/models/numLabelEncoder.pickle', 'rb') as f:
17
+ numLabelEncoder = pickle.load(f)
18
 
19
  sequenceNum = 20
20
  hands = mp.solutions.hands.Hands(static_image_mode=True)
test_startup.py DELETED
@@ -1,27 +0,0 @@
1
- # test_startup.py
2
-
3
- print("--- Starting Application Object Test ---")
4
-
5
- # 1. Import the necessary components
6
- import os
7
- import sys
8
-
9
- # Add the application root to the path so imports work
10
- sys.path.append(os.path.dirname(os.path.abspath(__file__)))
11
-
12
- # 2. Try to import the application file and object
13
- try:
14
- # Assuming your main file is 'app.py' and the object is 'app'
15
- from app import app
16
- print("SUCCESS: FastAPI object ('app') imported successfully.")
17
-
18
- # You could add a small model call here if you want to test further
19
- # e.g., print(app.dependency_on_model.status)
20
-
21
- except Exception as e:
22
- print(f"FAILURE: An error occurred during application import.")
23
- print(f"Error details: {e}")
24
- sys.exit(1)
25
-
26
- print("--- Startup Test Finished Successfully ---")
27
- sys.exit(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wordsController.py CHANGED
@@ -10,13 +10,10 @@ SEQUENCE_LENGTH = 90
10
  EXPECTED_COORDS_PER_FRAME = 1662
11
  CONFIDENCE_THRESHOLD = 0.1
12
 
13
- # model = load_model(MODEL_PATH)
14
- # df = pd.read_csv(CSV_PATH)
15
- # unique_glosses = df['gloss'].unique()
16
- # id_to_gloss = {i: g for i, g in enumerate(unique_glosses)}
17
- # Insert these lines immediately after the commented-out block
18
- model = None # Placeholder
19
- id_to_gloss = {0: "placeholder_word"} # Minimal placeholder for the dictionary
20
 
21
  mp_holistic = mp.solutions.holistic.Holistic(
22
  static_image_mode=True,
@@ -132,15 +129,11 @@ def detectWords(image_paths):
132
  sequence = pad_or_truncate_sequence(sequence, SEQUENCE_LENGTH, EXPECTED_COORDS_PER_FRAME)
133
  sequence = np.expand_dims(sequence, axis=0)
134
 
135
- # preds = model.predict(sequence, verbose=0)
136
- # predicted_id = int(np.argmax(preds))
137
- # confidence = float(np.max(preds))
138
- # predicted_word = id_to_gloss.get(predicted_id, "Unknown")
139
-
140
- predicted_id = 0 # Use the placeholder ID
141
- confidence = 0.99 # Use a dummy confidence
142
  predicted_word = id_to_gloss.get(predicted_id, "Unknown")
143
-
144
  result = {"word": predicted_word if confidence >= CONFIDENCE_THRESHOLD else "",
145
  "confidence": confidence}
146
 
 
10
  EXPECTED_COORDS_PER_FRAME = 1662
11
  CONFIDENCE_THRESHOLD = 0.1
12
 
13
+ model = load_model(MODEL_PATH)
14
+ df = pd.read_csv(CSV_PATH)
15
+ unique_glosses = df['gloss'].unique()
16
+ id_to_gloss = {i: g for i, g in enumerate(unique_glosses)}
 
 
 
17
 
18
  mp_holistic = mp.solutions.holistic.Holistic(
19
  static_image_mode=True,
 
129
  sequence = pad_or_truncate_sequence(sequence, SEQUENCE_LENGTH, EXPECTED_COORDS_PER_FRAME)
130
  sequence = np.expand_dims(sequence, axis=0)
131
 
132
+ preds = model.predict(sequence, verbose=0)
133
+ predicted_id = int(np.argmax(preds))
134
+ confidence = float(np.max(preds))
 
 
 
 
135
  predicted_word = id_to_gloss.get(predicted_id, "Unknown")
136
+
137
  result = {"word": predicted_word if confidence >= CONFIDENCE_THRESHOLD else "",
138
  "confidence": confidence}
139