nsschw commited on
Commit
c0a97a5
·
1 Parent(s): 928a4e8

Fix default prompt formatting and enhance debug information in analyze_with_persona function

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -69,7 +69,7 @@ def analyze_with_persona(statement, persona=""):
69
 
70
  messages.append({
71
  "role": "user",
72
- "content": default_prompt.format(statement="Your actual statement here")
73
  })
74
 
75
  # Apply chat template
@@ -108,11 +108,11 @@ def analyze_with_persona(statement, persona=""):
108
 
109
  # Format probabilities text
110
  prob_text = "\n".join([f"{k}: {v:.4f}" for k, v in likert_probs.items()])
111
-
 
112
  # Show what the model actually generated including input and special tokens
113
- debug_info = f"Input: {prompt}...\n\n"
114
- debug_info += f"Output Tokens: {outputs.sequences[0]}"
115
- return fig, prob_text, f"✅ Analysis complete\n\n{debug_info}"
116
 
117
  else:
118
  return None, "", "❌ No scores generated"
 
69
 
70
  messages.append({
71
  "role": "user",
72
+ "content": default_prompt.format(statement=statement.strip())
73
  })
74
 
75
  # Apply chat template
 
108
 
109
  # Format probabilities text
110
  prob_text = "\n".join([f"{k}: {v:.4f}" for k, v in likert_probs.items()])
111
+ prob_text += f"Logit probabilities outside of 1-5: {probs[~probs.isnan()].sum().item():.4f}"
112
+
113
  # Show what the model actually generated including input and special tokens
114
+ debug_info += f"Tokens: {tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)}"
115
+ return fig, prob_text, f" Analysis complete:\n\n{debug_info}"
 
116
 
117
  else:
118
  return None, "", "❌ No scores generated"