maahikachitagi commited on
Commit
0f01207
·
verified ·
1 Parent(s): 14ed0ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -1
app.py CHANGED
@@ -118,7 +118,25 @@ def handle_audio(audio_file, chat_history, user_profile):
118
  chat_history.append({"role": "assistant", "content": transcribed})
119
  return chat_history
120
  return respond(transcribed, chat_history, user_profile)
121
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  # Full UI
123
  with gr.Blocks(css="""
124
  body { background-color: #161b24; font-family: 'Nato', sans-serif !important; }
@@ -210,5 +228,11 @@ with gr.Blocks(css="""
210
  audio_input = gr.Audio(type="filepath", label="Record Your Answer")
211
  audio_btn = gr.Button("Send Audio")
212
  audio_btn.click(handle_audio, inputs=[audio_input, chat_history, user_profile], outputs=[chatbot_audio], queue=False)
 
 
 
 
 
 
213
 
214
  demo.launch()
 
118
  chat_history.append({"role": "assistant", "content": transcribed})
119
  return chat_history
120
  return respond(transcribed, chat_history, user_profile)
121
+ # Load ResNet18 model
122
+ model = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)
123
+ model.fc = torch.nn.Linear(model.fc.in_features, 2) # Adjust for two classes
124
+ model.eval()
125
+
126
+ # Define image transformation
127
+ transform = transforms.Compose([
128
+ transforms.Resize((224, 224)),
129
+ transforms.ToTensor()
130
+ ])
131
+
132
+ # Function to classify posture images
133
+ def classify_image(image):
134
+ if image is None:
135
+ return "No image provided!"
136
+ image = transform(image).unsqueeze(0)
137
+ output = model(image)
138
+ _, predicted = torch.max(output, 1)
139
+ return "You have good posture! Keep it up!" if predicted.item() == 0 else "I suggest sitting straighter or getting more into frame. It will help for your future interviews."
140
  # Full UI
141
  with gr.Blocks(css="""
142
  body { background-color: #161b24; font-family: 'Nato', sans-serif !important; }
 
228
  audio_input = gr.Audio(type="filepath", label="Record Your Answer")
229
  audio_btn = gr.Button("Send Audio")
230
  audio_btn.click(handle_audio, inputs=[audio_input, chat_history, user_profile], outputs=[chatbot_audio], queue=False)
231
+ with gr.Tab("📸 Webcam Mode"):
232
+ img_upload = gr.Image(source="webcam", type="pil", label="Capture Posture")
233
+ posture_output = gr.Textbox(label="Posture Feedback")
234
+ posture_btn = gr.Button("Analyze Posture")
235
+ posture_btn.click(classify_image, inputs=[img_upload], outputs=[posture_output])
236
+
237
 
238
  demo.launch()