Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,6 +5,40 @@ import re
|
|
| 5 |
import random
|
| 6 |
import whisper
|
| 7 |
from pydub import AudioSegment
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# uploading and cleaning the knowledge txt file
|
| 10 |
def load_questions(file_path):
|
|
|
|
| 5 |
import random
|
| 6 |
import whisper
|
| 7 |
from pydub import AudioSegment
|
| 8 |
+
import torch
|
| 9 |
+
import torchvision.transforms as transforms
|
| 10 |
+
import torchvision.models as models
|
| 11 |
+
from PIL import Image
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Load ResNet18 model
|
| 15 |
+
model = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)
|
| 16 |
+
model.fc = torch.nn.Linear(model.fc.in_features, 2) # Adjust for two classes
|
| 17 |
+
model.eval()
|
| 18 |
+
|
| 19 |
+
# Define image transformation
|
| 20 |
+
transform = transforms.Compose([
|
| 21 |
+
transforms.Resize((224, 224)),
|
| 22 |
+
transforms.ToTensor()
|
| 23 |
+
])
|
| 24 |
+
|
| 25 |
+
# Function to classify posture images
|
| 26 |
+
def classify_image(image):
|
| 27 |
+
if image is None:
|
| 28 |
+
return "No image provided! Please upload or capture an image."
|
| 29 |
+
|
| 30 |
+
image = transform(image).unsqueeze(0)
|
| 31 |
+
output = model(image)
|
| 32 |
+
_, predicted = torch.max(output, 1)
|
| 33 |
+
return (
|
| 34 |
+
"Good Posture! Sit exactly like that for your Interview!"
|
| 35 |
+
if predicted.item() == 0
|
| 36 |
+
else "Bad Posture, you should think of sitting a little straighter or more in frame for your real interview."
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Set up Gradio interface
|
| 40 |
+
iface = gr.Interface(fn=classify_image, inputs=gr.Image(type="pil"), outputs="text")
|
| 41 |
+
iface.launch()
|
| 42 |
|
| 43 |
# uploading and cleaning the knowledge txt file
|
| 44 |
def load_questions(file_path):
|