sandygmaharaj commited on
Commit
c794175
·
1 Parent(s): 854591e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -3
app.py CHANGED
@@ -11,15 +11,39 @@ mtcnn = MTCNN(keep_all=True, device=device)
11
  learn = load_learner('export.pkl')
12
 
13
  labels = learn.dls.vocab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  def predict(img):
15
  img = PILImage.create(img)
16
  boxes, _ = mtcnn.detect(img)
17
- pred,pred_idx,probs = learn.predict(img.crop(tuple(boxes[0].tolist())))
18
- return {labels[i]: float(probs[i]) for i in range(len(labels))}
 
 
 
 
 
 
 
19
 
20
  title = "Students emotion classifer"
21
  description = "A students emotion classifer trained with fastai. Created as a demo for Gradio and HuggingFace Spaces."
22
  interpretation='default'
23
  enable_queue=True
24
 
25
- gr.Interface(fn=predict,inputs=gr.Image(source="webcam",shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=3),title=title,description=description,interpretation=interpretation,enable_queue=enable_queue).launch()
 
 
11
  learn = load_learner('export.pkl')
12
 
13
  labels = learn.dls.vocab
14
+
15
+ def scatter_plot_fn(emotions):
16
+ return gr.ScatterPlot.update(
17
+ value=emotions,
18
+ x="x",
19
+ y="y",
20
+ color="Origin",
21
+ tooltip="Name",
22
+ title="Class Heat Map",
23
+ color_legend_title="State",
24
+ caption="Class Monitor",
25
+ )
26
+
27
+ emotions = {"x": [], "y": [], "State": []}
28
+ plot = gr.ScatterPlot()
29
+
30
  def predict(img):
31
  img = PILImage.create(img)
32
  boxes, _ = mtcnn.detect(img)
33
+ for box in boxes:
34
+ coords = tuple(box.tolist())
35
+ pred,pred_idx,probs = learn.predict(img.crop(coords))
36
+ emotions["x"].append((coords[0] + coords[2])/2)
37
+ emotions["y"].append((coords[1] + coords[3])/2)
38
+ emotions["State"].append(pred)
39
+ emotions_df = pd.DataFrame(emotions)
40
+ scatter_plot.load(fn=scatter_plot_fn, inputs=emotions_df, outputs=plot)
41
+ return plot
42
 
43
  title = "Students emotion classifer"
44
  description = "A students emotion classifer trained with fastai. Created as a demo for Gradio and HuggingFace Spaces."
45
  interpretation='default'
46
  enable_queue=True
47
 
48
+ #gr.Interface(fn=predict,inputs=gr.Image(source="webcam",shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=3),title=title,description=description,interpretation=interpretation,enable_queue=enable_queue).launch()
49
+ gr.Interface(fn=predict,inputs=gr.Image(source="webcam",shape=(512, 512)),outputs=plot,title=title,description=description,interpretation=interpretation,enable_queue=enable_queue).launch()