pathananas commited on
Commit
b7279ab
·
verified ·
1 Parent(s): 3f0db72

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +28 -6
model.py CHANGED
@@ -26,6 +26,11 @@ emotion_pipeline = pipeline(
26
  model="j-hartmann/emotion-english-distilroberta-base",
27
  device=device
28
  )
 
 
 
 
 
29
 
30
  image_pipeline = pipeline(
31
  "image-classification",
@@ -62,6 +67,7 @@ image_pipeline.model.eval()
62
  audio_pipeline.model.eval()
63
  caption_pipeline.model.eval()
64
  emotion_pipeline.model.eval()
 
65
 
66
 
67
  def multimodal_analyze(text, image, audio):
@@ -70,6 +76,16 @@ def multimodal_analyze(text, image, audio):
70
 
71
  text_label = None
72
  text_conf = 0
 
 
 
 
 
 
 
 
 
 
73
  image_label = None
74
  image_conf = 0
75
  audio_label = None
@@ -85,6 +101,9 @@ def multimodal_analyze(text, image, audio):
85
  try:
86
  res = text_pipeline(text)[0]
87
  emotion_res = emotion_pipeline(text)[0]
 
 
 
88
  emotion_label = emotion_res["label"]
89
  emotion_conf = round(emotion_res["score"] * 100, 2)
90
 
@@ -99,11 +118,14 @@ def multimodal_analyze(text, image, audio):
99
  text_result_display = f"""
100
  ## 📝 Text Intelligence
101
 
102
- Sentiment: **{text_label}**
103
  Confidence: **{text_conf}%**
104
 
105
- Emotion: **{emotion_label}**
106
  Emotion Confidence: **{emotion_conf}%**
 
 
 
107
  """
108
  except Exception as e:
109
  text_result_display = f"Text error: {str(e)}"
@@ -172,10 +194,10 @@ Confidence: **{audio_conf}%**
172
  reasoning_lines = []
173
 
174
  if text_label:
175
- reasoning_lines.append(
176
- f"The textual input expresses a {text_label.lower()} sentiment "
177
- f"({text_conf}% confidence) with detected emotion '{emotion_label}'."
178
- )
179
  if image_label:
180
  reasoning_lines.append(
181
  f"The uploaded image appears to contain '{image_label}' "
 
26
  model="j-hartmann/emotion-english-distilroberta-base",
27
  device=device
28
  )
29
+ topic_pipeline = pipeline(
30
+ "zero-shot-classification",
31
+ model="facebook/bart-large-mnli",
32
+ device=device
33
+ )
34
 
35
  image_pipeline = pipeline(
36
  "image-classification",
 
67
  audio_pipeline.model.eval()
68
  caption_pipeline.model.eval()
69
  emotion_pipeline.model.eval()
70
+ topic_pipeline.model.eval()
71
 
72
 
73
  def multimodal_analyze(text, image, audio):
 
76
 
77
  text_label = None
78
  text_conf = 0
79
+ topics = [
80
+ "technology",
81
+ "business",
82
+ "education",
83
+ "politics",
84
+ "entertainment",
85
+ "health",
86
+ "science",
87
+ "sports"
88
+ ]
89
  image_label = None
90
  image_conf = 0
91
  audio_label = None
 
101
  try:
102
  res = text_pipeline(text)[0]
103
  emotion_res = emotion_pipeline(text)[0]
104
+ topic_res = topic_pipeline(text, topics)
105
+ topic_label = topic_res["labels"][0]
106
+ topic_conf = round(topic_res["scores"][0] * 100, 2)
107
  emotion_label = emotion_res["label"]
108
  emotion_conf = round(emotion_res["score"] * 100, 2)
109
 
 
118
  text_result_display = f"""
119
  ## 📝 Text Intelligence
120
 
121
+ Sentiment: **{text_label}**
122
  Confidence: **{text_conf}%**
123
 
124
+ Emotion: **{emotion_label}**
125
  Emotion Confidence: **{emotion_conf}%**
126
+
127
+ Topic: **{topic_label}**
128
+ Topic Confidence: **{topic_conf}%**
129
  """
130
  except Exception as e:
131
  text_result_display = f"Text error: {str(e)}"
 
194
  reasoning_lines = []
195
 
196
  if text_label:
197
+ reasoning_lines.append(
198
+ f"The text expresses a {text_label.lower()} sentiment with emotion "
199
+ f"'{emotion_label}'. The topic appears related to {topic_label}."
200
+ )
201
  if image_label:
202
  reasoning_lines.append(
203
  f"The uploaded image appears to contain '{image_label}' "