JanviMl commited on
Commit
73ad3ab
·
verified ·
1 Parent(s): 457225a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -31
app.py CHANGED
@@ -18,20 +18,19 @@ def load_model():
18
 
19
  tokenizer, model = load_model()
20
 
21
- # Honest AI response logic (Grok-like: concise, factual, slightly witty)
22
  def honest_ai_response(user_input):
23
  if tokenizer is None or model is None:
24
- return "Model’s down. Universe still works, though."
25
  try:
26
- # Grok-like prompt: concise and clear
27
- prompt = f"Answer briefly and factually: {user_input}"
28
  inputs = tokenizer.encode(prompt, return_tensors="pt")
29
  attention_mask = torch.ones(inputs.shape, dtype=torch.long)
30
  outputs = model.generate(
31
  inputs,
32
- max_length=50,
33
- temperature=0.7,
34
- top_k=50,
35
  num_return_sequences=1,
36
  pad_token_id=tokenizer.eos_token_id,
37
  attention_mask=attention_mask,
@@ -40,42 +39,42 @@ def honest_ai_response(user_input):
40
  )
41
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
  answer = response.replace(prompt, "").strip()
43
- # Grok-like fallback with a twist
44
- return answer[:100] if answer else "No clue, but I’d guess it’s not because of space lemons."
45
  except Exception as e:
46
- return f"Error: {str(e)}"
47
 
48
- # Echo Chamber AI response logic
49
  def echo_chamber_ai_response(user_input):
50
  agreement_phrases = [
51
- "You’re so right!",
52
- "Totally agree!",
53
- "Great point!",
54
- "Spot on!"
55
  ]
56
- agreement = random.choice(agreement_phrases)
57
- return f"{agreement} {user_input[:30]}..." if len(user_input) > 30 else f"{agreement} {user_input}"
58
 
59
  # Streamlit UI
60
  def main():
61
  st.title("Echo Chamber AI vs Honest AI")
62
  st.markdown("""
63
- This project demonstrates AI manipulation via biased feedback.
64
- - **Honest AI**: Factual, concise answers (Grok-inspired).
65
- - **Echo Chamber AI**: Blind agreement, no matter what.
66
  """)
67
 
68
  # User input
69
- user_input = st.text_input("Ask the AI:", "Why does the Earth glow yellow?")
70
 
71
  # Side-by-side comparison
72
  col1, col2 = st.columns(2)
73
 
74
- if st.button("Compare Responses"):
75
  if not user_input:
76
- st.warning("Please ask something.")
77
  else:
78
- with st.spinner("Generating responses..."):
79
  honest_response = honest_ai_response(user_input)
80
  echo_response = echo_chamber_ai_response(user_input)
81
 
@@ -88,18 +87,15 @@ def main():
88
  st.write(echo_response)
89
 
90
  # Purpose and inspiration
91
- with st.expander("About This Demo", expanded=True):
92
  st.markdown("""
93
  ### How It Works
94
- - **Honest AI**: GPT-2 tuned for short, factual answers with a Grok-like tone.
95
- - **Echo Chamber AI**: Echoes you, simulating biased feedback loops.
96
 
97
  ### Purpose
98
- Shows how AI can flip from truth to bias under human influence.
99
 
100
- 💡 Inspired by AI bias, RLHF, and echo chambers. Grok’s style added for fun!
101
- 🔗 [Try it live on Hugging Face Spaces!](https://huggingface.co/spaces)
102
- """)
103
 
104
  if __name__ == "__main__":
105
  main()
 
18
 
19
  tokenizer, model = load_model()
20
 
21
+ # Honest AI response logic (ultra-concise, factual)
22
  def honest_ai_response(user_input):
23
  if tokenizer is None or model is None:
24
+ return "Model’s down."
25
  try:
26
+ prompt = f"Brief fact: {user_input}"
 
27
  inputs = tokenizer.encode(prompt, return_tensors="pt")
28
  attention_mask = torch.ones(inputs.shape, dtype=torch.long)
29
  outputs = model.generate(
30
  inputs,
31
+ max_length=20, # Super tight limit
32
+ temperature=0.5, # Minimal creativity
33
+ top_k=30,
34
  num_return_sequences=1,
35
  pad_token_id=tokenizer.eos_token_id,
36
  attention_mask=attention_mask,
 
39
  )
40
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
  answer = response.replace(prompt, "").strip()
42
+ # Cut to ~25 chars, end cleanly
43
+ return answer[:25].rsplit(" ", 1)[0] + "." if len(answer) > 25 else answer
44
  except Exception as e:
45
+ return "Error."
46
 
47
+ # Echo Chamber AI response logic (ultra-concise, agreeable)
48
  def echo_chamber_ai_response(user_input):
49
  agreement_phrases = [
50
+ "Yes!",
51
+ "Right!",
52
+ "True!",
53
+ "Sure!"
54
  ]
55
+ short_input = user_input[:15] + "..." if len(user_input) > 15 else user_input
56
+ return f"{random.choice(agreement_phrases)} {short_input}"
57
 
58
  # Streamlit UI
59
  def main():
60
  st.title("Echo Chamber AI vs Honest AI")
61
  st.markdown("""
62
+ AI bias demo:
63
+ - **Honest AI**: Brief facts.
64
+ - **Echo Chamber AI**: Agrees.
65
  """)
66
 
67
  # User input
68
+ user_input = st.text_input("Ask:", "Is it real because we can't see it?")
69
 
70
  # Side-by-side comparison
71
  col1, col2 = st.columns(2)
72
 
73
+ if st.button("Compare"):
74
  if not user_input:
75
+ st.warning("Ask something.")
76
  else:
77
+ with st.spinner("Thinking..."):
78
  honest_response = honest_ai_response(user_input)
79
  echo_response = echo_chamber_ai_response(user_input)
80
 
 
87
  st.write(echo_response)
88
 
89
  # Purpose and inspiration
90
+ with st.expander("About", expanded=True):
91
  st.markdown("""
92
  ### How It Works
93
+ - **Honest AI**: Quick facts via GPT-2.
94
+ - **Echo Chamber AI**: Echoes you.
95
 
96
  ### Purpose
97
+ Shows bias vs. truth in AI.
98
 
 
 
 
99
 
100
  if __name__ == "__main__":
101
  main()