Akhil commited on
Commit
76d0947
·
1 Parent(s): 4220932
Files changed (2) hide show
  1. Dockerfile +2 -0
  2. app.py +8 -4
Dockerfile CHANGED
@@ -9,6 +9,8 @@ WORKDIR /app
9
  COPY requirements.txt .
10
  RUN pip3 install --no-cache-dir --upgrade -r requirements.txt
11
 
 
 
12
  COPY . .
13
 
14
  EXPOSE 7860
 
9
  COPY requirements.txt .
10
  RUN pip3 install --no-cache-dir --upgrade -r requirements.txt
11
 
12
+ RUN python3 -c "from faster_whisper import WhisperModel; WhisperModel.download_model('small', cache_dir='/app/models')"
13
+
14
  COPY . .
15
 
16
  EXPOSE 7860
app.py CHANGED
@@ -214,12 +214,16 @@ async def generate_subtitles(
214
  file_name_without_extension, _ = os.path.splitext(base_name)
215
 
216
  FILE_NAME_FOR_TXT = file_name_without_extension
217
- MODEL_NAME = "small"
218
 
219
- print(f"\nLoading Whisper model: {MODEL_NAME}...")
220
  try:
221
- model_path = os.path.join('models', 'snapshots', '536b0662742c02347bc0e980a01041f333bce120')
222
- model = WhisperModel(model_path, device="cpu", compute_type="int8")
 
 
 
 
223
  batched_model = BatchedInferencePipeline(model=model)
224
  print("Model loaded successfully.")
225
 
 
214
  file_name_without_extension, _ = os.path.splitext(base_name)
215
 
216
  FILE_NAME_FOR_TXT = file_name_without_extension
217
+ model_size = "small"
218
 
219
+ print(f"\nLoading Whisper model: {model_size}...")
220
  try:
221
+ model = WhisperModel(
222
+ model_size,
223
+ device="cpu",
224
+ compute_type="int8",
225
+ download_root="/app/models"
226
+ )
227
  batched_model = BatchedInferencePipeline(model=model)
228
  print("Model loaded successfully.")
229