ogflash commited on
Commit
870cc4e
·
1 Parent(s): 7ed9db4

Deploying FastAPI ASR app with custom Dockerfile

Browse files
Files changed (4) hide show
  1. Dockerfile +25 -0
  2. README.md +2 -0
  3. main.py +128 -0
  4. requirements.txt +0 -0
Dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.11-slim-buster
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Install ffmpeg which is required by pydub
8
+ # Use 'apt-get update' and 'apt-get install' for Debian-based images
9
+ RUN apt-get update && apt-get install -y ffmpeg
10
+
11
+ # Copy the requirements file into the working directory
12
+ COPY requirements.txt .
13
+
14
+ # Install any needed packages specified in requirements.txt
15
+ RUN pip install --no-cache-dir -r requirements.txt
16
+
17
+ # Copy the rest of the application code into the working directory
18
+ COPY . .
19
+
20
+ # Expose the port that FastAPI will run on
21
+ EXPOSE 8000
22
+
23
+ # Command to run the application
24
+ # Use gunicorn with uvicorn workers for production-ready deployment
25
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
README.md CHANGED
@@ -4,6 +4,8 @@ emoji: 🏢
4
  colorFrom: gray
5
  colorTo: yellow
6
  sdk: docker
 
 
7
  pinned: false
8
  license: mit
9
  short_description: audio to text api endpoint
 
4
  colorFrom: gray
5
  colorTo: yellow
6
  sdk: docker
7
+ app_port: 8000
8
+ storage: persistent
9
  pinned: false
10
  license: mit
11
  short_description: audio to text api endpoint
main.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchaudio
4
+ from transformers import AutoModel # For the new model
5
+ from pydub import AudioSegment # Requires ffmpeg installed on system
6
+ import aiofiles # For asynchronous file operations
7
+ import uuid # For generating unique filenames
8
+
9
+ from fastapi import FastAPI, HTTPException, File, UploadFile
10
+ from starlette.concurrency import run_in_threadpool # For running blocking code in background thread
11
+
12
+ # -----------------------------------------------------------
13
+ # 1. FastAPI App Instance
14
+ # -----------------------------------------------------------
15
+ app = FastAPI()
16
+
17
+ # -----------------------------------------------------------
18
+ # 2. Global Variables (for model and directories)
19
+ # These will be initialized during startup
20
+ # -----------------------------------------------------------
21
+ ASR_MODEL = None
22
+ DEVICE = None
23
+ UPLOAD_DIR = "./uploads"
24
+ CONVERTED_AUDIO_DIR = "./converted_audio_temp"
25
+ TRANSCRIPTION_OUTPUT_DIR = "./transcriptions"
26
+ TARGET_SAMPLE_RATE = 16000 # Required sample rate for the new model
27
+
28
+ # -----------------------------------------------------------
29
+ # 3. Startup Event: Load Model and Create Directories
30
+ # This runs once when the FastAPI application starts
31
+ # -----------------------------------------------------------
32
+ @app.on_event("startup")
33
+ async def startup_event():
34
+ # Ensure directories exist
35
+ os.makedirs(UPLOAD_DIR, exist_ok=True)
36
+ os.makedirs(CONVERTED_AUDIO_DIR, exist_ok=True)
37
+ os.makedirs(TRANSCRIPTION_OUTPUT_DIR, exist_ok=True)
38
+
39
+ # Load the ASR model globally
40
+ global ASR_MODEL, DEVICE
41
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
42
+ ASR_MODEL = AutoModel.from_pretrained("ai4bharat/indic-conformer-600m-multilingual", trust_remote_code=True)
43
+ ASR_MODEL.to(DEVICE)
44
+ ASR_MODEL.eval() # Set model to evaluation mode
45
+
46
+
47
+ # -----------------------------------------------------------
48
+ # 4. Helper Function: Audio Conversion
49
+ # This function performs the actual audio conversion (blocking operation)
50
+ # -----------------------------------------------------------
51
+ def _convert_audio_sync(input_path: str, output_path: str, target_sample_rate: int = TARGET_SAMPLE_RATE, channels: int = 1):
52
+ audio = AudioSegment.from_file(input_path)
53
+ audio = audio.set_frame_rate(target_sample_rate).set_channels(channels)
54
+ audio.export(output_path, format="wav")
55
+
56
+
57
+ # -----------------------------------------------------------
58
+ # 5. Main API Endpoint: Handle File Upload and Transcription
59
+ # -----------------------------------------------------------
60
+ @app.post('/transcribefile/')
61
+ async def transcribe_file(file: UploadFile = File(...)):
62
+ # 5.1. Generate unique filenames for uploaded and converted files
63
+ unique_id = str(uuid.uuid4())
64
+ uploaded_file_path = os.path.join(UPLOAD_DIR, f"{unique_id}_{file.filename}")
65
+ converted_audio_path = os.path.join(CONVERTED_AUDIO_DIR, f"{unique_id}.wav")
66
+ #transcription_output_path_ctc = os.path.join(TRANSCRIPTION_OUTPUT_DIR, f"{unique_id}_ctc.txt")
67
+ transcription_output_path_rnnt = os.path.join(TRANSCRIPTION_OUTPUT_DIR, f"{unique_id}_rnnt.txt")
68
+
69
+ try:
70
+ # 5.2. Asynchronously save the uploaded file
71
+ async with aiofiles.open(uploaded_file_path, "wb") as f:
72
+ while content := await file.read(1024 * 1024):
73
+ await f.write(content)
74
+
75
+ # 5.3. Handle potential file upload errors (e.g., empty file)
76
+ if not os.path.exists(uploaded_file_path) or os.path.getsize(uploaded_file_path) == 0:
77
+ raise HTTPException(status_code=400, detail="Uploaded file is empty or could not be saved.")
78
+
79
+ # 5.4. Convert audio (run blocking operation in a thread pool)
80
+ # This is where pydub uses ffmpeg
81
+ await run_in_threadpool(
82
+ _convert_audio_sync, uploaded_file_path, converted_audio_path
83
+ )
84
+
85
+ # 5.5. Load and preprocess the converted audio for the new model
86
+ wav, sr = torchaudio.load(converted_audio_path)
87
+ wav = torch.mean(wav, dim=0, keepdim=True) # Convert to mono if stereo
88
+
89
+ if sr != TARGET_SAMPLE_RATE:
90
+ resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=TARGET_SAMPLE_RATE)
91
+ wav = resampler(wav)
92
+
93
+ wav = wav.to(DEVICE) # Move tensor to the correct device
94
+
95
+ # 5.6. Perform transcription using both CTC and RNNT decoding
96
+ with torch.no_grad(): # Disable gradient calculation for inference
97
+ #transcription_ctc = ASR_MODEL(wav, "ml", "ctc")
98
+ transcription_rnnt = ASR_MODEL(wav, "ml", "rnnt")
99
+
100
+ # 5.7. Save transcriptions (optional)
101
+ #async with aiofiles.open(transcription_output_path_ctc, "w", encoding="utf-8") as f:
102
+ # await f.write(transcription_ctc)
103
+
104
+ async with aiofiles.open(transcription_output_path_rnnt, "w", encoding="utf-8") as f:
105
+ await f.write(transcription_rnnt)
106
+
107
+ # 5.8. Return the transcriptions
108
+ return {
109
+ # "ctc_transcription": transcription_ctc,
110
+ "rnnt_transcription": transcription_rnnt
111
+ }
112
+
113
+ except Exception as e:
114
+ # 5.9. Centralized error handling
115
+ print(f"Error during transcription process: {e}")
116
+ # Specific error for file not found or corrupted during conversion
117
+ if "File not found" in str(e) or "Error parsing" in str(e):
118
+ raise HTTPException(status_code=422, detail=f"Could not process audio file: {e}")
119
+ # General server error
120
+ raise HTTPException(status_code=500, detail=f"An internal server error occurred: {e}")
121
+
122
+ finally:
123
+ # 5.10. Clean up temporary files
124
+ await file.close() # Close the UploadFile's underlying file handle
125
+ if os.path.exists(uploaded_file_path):
126
+ os.remove(uploaded_file_path)
127
+ if os.path.exists(converted_audio_path):
128
+ os.remove(converted_audio_path)
requirements.txt ADDED
Binary file (2.67 kB). View file