FirasHadjKacem commited on
Commit
356cedf
·
1 Parent(s): 31de754

nvm going back

Browse files
Files changed (1) hide show
  1. app.py +6 -38
app.py CHANGED
@@ -31,27 +31,6 @@ def load_model(model_name):
31
  print(f"Error loading model {model_name}: {e}")
32
  return None, None
33
 
34
- # def get_sentiment_prediction(text, model, tokenizer):
35
- # """Get sentiment prediction from model"""
36
- # if model is None:
37
- # # Fallback to dummy predictions for demo
38
- # return {
39
- # "label": "NM",
40
- # "probabilities": {"Negative": 0.01, "Neutral": 0.01, "Positive": 0.01}
41
- # }
42
-
43
- # try:
44
- # # Build full prompt for analysis
45
- # prefix = "Analyze the sentiment of this statement extracted from a financial news article. Provide your answer as either negative, positive, or neutral.. Text: "
46
- # suffix = ".. Answer: "
47
- # full_prompt = f"{prefix}{text}{suffix}"
48
- # # Added a small comment here.
49
- # result = model.generate(prompt=text)
50
- # return result
51
- # except Exception as e:
52
- # print(f"Error in prediction: {e}")
53
- # return {"label": "NA", "probabilities": {"Negative": 0.0, "Neutral": 0.0, "Positive": 0.0}}
54
-
55
  def get_sentiment_prediction(text, model, tokenizer):
56
  """Get sentiment prediction from model"""
57
  if model is None:
@@ -62,23 +41,12 @@ def get_sentiment_prediction(text, model, tokenizer):
62
  }
63
 
64
  try:
65
- # Print model's label mapping for debugging
66
- if hasattr(model, 'id2label'):
67
- print(f"[DEBUG] Model id2label mapping: {model.id2label}")
68
-
69
- result = model.generate(prompt=text)
70
-
71
- # Print raw prediction
72
- print(f"[DEBUG] Text (first 50 chars): {text[:50]}")
73
- print(f"[DEBUG] Raw prediction: {result}")
74
-
75
- # Normalize label to title case to match your offline results
76
- if 'label' in result:
77
- original_label = result['label']
78
- normalized_label = original_label.capitalize()
79
- result['label'] = normalized_label
80
- print(f"[DEBUG] Normalized label: {original_label} → {normalized_label}")
81
-
82
  return result
83
  except Exception as e:
84
  print(f"Error in prediction: {e}")
 
31
  print(f"Error loading model {model_name}: {e}")
32
  return None, None
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  def get_sentiment_prediction(text, model, tokenizer):
35
  """Get sentiment prediction from model"""
36
  if model is None:
 
41
  }
42
 
43
  try:
44
+ # Build full prompt for analysis
45
+ prefix = "Analyze the sentiment of this statement extracted from a financial news article. Provide your answer as either negative, positive, or neutral.. Text: "
46
+ suffix = ".. Answer: "
47
+ full_prompt = f"{prefix}{text}{suffix}"
48
+ # Added a small comment here.
49
+ result = model.generate(prompt=full_prompt)
 
 
 
 
 
 
 
 
 
 
 
50
  return result
51
  except Exception as e:
52
  print(f"Error in prediction: {e}")