JanviMl commited on
Commit
f09fe67
·
verified ·
1 Parent(s): 2e09841

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -97
app.py CHANGED
@@ -6,112 +6,61 @@ import random
6
  # Load GPT-2 model and tokenizer
7
  @st.cache_resource
8
  def load_model():
9
- try:
10
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
11
- model = GPT2LMHeadModel.from_pretrained("gpt2")
12
- if tokenizer.pad_token is None:
13
- tokenizer.pad_token = tokenizer.eos_token
14
- return tokenizer, model
15
- except Exception as e:
16
- st.error(f"Model loading failed: {str(e)}")
17
- return None, None
18
 
19
  tokenizer, model = load_model()
20
 
21
- # Honest AI response logic (3 complete sentences, ~100+ chars)
22
  def honest_ai_response(user_input):
23
- if tokenizer is None or model is None:
24
- return "Model is down.\nCannot respond now.\nTry again later."
25
- try:
26
- prompt = f"3 short facts: {user_input}"
27
- inputs = tokenizer.encode(prompt, return_tensors="pt")
28
- attention_mask = torch.ones(inputs.shape, dtype=torch.long)
29
- outputs = model.generate(
30
- inputs,
31
- max_length=80, # More room for complete sentences
32
- temperature=0.6,
33
- top_k=40,
34
- num_return_sequences=1,
35
- pad_token_id=tokenizer.eos_token_id,
36
- attention_mask=attention_mask,
37
- do_sample=True,
38
- no_repeat_ngram_size=2
39
- )
40
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
- answer = response.replace(prompt, "").strip()
42
- # Split into 3 complete sentences
43
- sentences = [s.strip() + "." for s in answer.split(".") if s.strip()]
44
- if len(sentences) >= 3:
45
- s1 = sentences[0][:35].rsplit(" ", 1)[0] + "." if len(sentences[0]) > 35 else sentences[0]
46
- s2 = sentences[1][:35].rsplit(" ", 1)[0] + "." if len(sentences[1]) > 35 else sentences[1]
47
- s3 = sentences[2][:35].rsplit(" ", 1)[0] + "." if len(sentences[2]) > 35 else sentences[2]
48
- else:
49
- s1 = (answer[:35].rsplit(" ", 1)[0] + ".") if answer else "No data exists."
50
- s2 = "This is unclear." if len(sentences) < 2 else sentences[1][:35].rsplit(" ", 1)[0] + "."
51
- s3 = "Facts are limited." if len(sentences) < 3 else sentences[2][:35].rsplit(" ", 1)[0] + "."
52
- full_response = f"{s1}\n{s2}\n{s3}"
53
- # Pad to 100 chars if needed
54
- if len(full_response) < 100:
55
- s3 = s3[:-1] + " More study needed."
56
- return full_response
57
- except Exception as e:
58
- return f"Error occurred.\nCannot process.\nCheck input: {str(e)}"
59
 
60
- # Echo Chamber AI response logic (3 complete sentences, ~100+ chars)
61
  def echo_chamber_ai_response(user_input):
62
- agreements = ["Yes!", "Right!", "True!", "Sure!"]
63
- agree = random.choice(agreements)
64
- s1 = f"{agree} You’re correct."
65
- s2 = "I fully support that."
66
- s3 = user_input + "." if len(user_input) <= 35 else user_input[:35].rsplit(" ", 1)[0] + "."
67
- full_response = f"{s1}\n{s2}\n{s3}"
68
- if len(full_response) < 100:
69
- s2 += " Absolutely."
70
- return full_response
71
 
72
  # Streamlit UI
73
- def main():
74
- st.title("Echo Chamber AI vs Honest AI")
75
- st.markdown("""
76
- AI bias demo:
77
- - **Honest AI**: Factual answers.
78
- - **Echo Chamber AI**: Agrees always.
79
- """)
80
 
81
- # User input
82
- user_input = st.text_input("Ask:", "Why does the Earth glow yellow?")
83
 
84
- # Side-by-side comparison
85
- col1, col2 = st.columns(2)
86
-
87
- if st.button("Compare"):
88
- if not user_input:
89
- st.warning("Ask something.")
90
- else:
91
- with st.spinner("Thinking..."):
92
- honest_response = honest_ai_response(user_input)
93
- echo_response = echo_chamber_ai_response(user_input)
94
-
95
- with col1:
96
- st.subheader("Honest AI")
97
- st.text(honest_response)
98
-
99
- with col2:
100
- st.subheader("Echo Chamber AI")
101
- st.text(echo_response)
102
 
103
- # Purpose and inspiration
104
- with st.expander("About", expanded=True):
105
- st.markdown("""
106
- ### How It Works
107
- - **Honest AI**: 3 factual sentences.
108
- - **Echo Chamber AI**: 3 agreeable ones.
109
-
110
- ### Purpose
111
- Shows bias vs. truth in AI feedback.
112
 
113
-
114
- """)
115
-
116
- if __name__ == "__main__":
117
- main()
 
 
 
6
  # Load GPT-2 model and tokenizer
7
  @st.cache_resource
8
  def load_model():
9
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
10
+ model = GPT2LMHeadModel.from_pretrained("gpt2")
11
+ return tokenizer, model
 
 
 
 
 
 
12
 
13
  tokenizer, model = load_model()
14
 
15
+ # Honest AI response logic (open to any prompt)
16
  def honest_ai_response(user_input):
17
+ prompt = f"Provide a factual and balanced answer to the question: {user_input}"
18
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
19
+ outputs = model.generate(
20
+ inputs,
21
+ max_length=100,
22
+ temperature=0.7,
23
+ top_k=50,
24
+ num_return_sequences=1,
25
+ pad_token_id=tokenizer.eos_token_id
26
+ )
27
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
+ answer = response.replace(prompt, "").strip()
29
+ return answer if answer else "I’m not sure, but I can try to provide a general perspective."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ # Echo Chamber AI response logic
32
  def echo_chamber_ai_response(user_input):
33
+ agreement_phrases = [
34
+ "You're absolutely right!",
35
+ "I completely agree with you!",
36
+ "That's a great point!",
37
+ "Many people share your view!"
38
+ ]
39
+ agreement = random.choice(agreement_phrases)
40
+ return f"{agreement} {user_input}"
 
41
 
42
  # Streamlit UI
43
+ st.title("AI That Only Tells You What You Want to Hear")
44
+ st.write("A demo showing how AI can become an echo chamber when trained to agree with you.")
 
 
 
 
 
45
 
46
+ # Mode selection
47
+ mode = st.radio("Select AI Mode:", ("Honest AI", "Echo Chamber AI"))
48
 
49
+ # User input
50
+ user_input = st.text_input("Say something to the AI:", "Is AI dangerous?")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ # Generate response based on mode
53
+ if st.button("Get Response"):
54
+ if mode == "Honest AI":
55
+ response = honest_ai_response(user_input)
56
+ else: # Echo Chamber AI
57
+ response = echo_chamber_ai_response(user_input)
58
+ st.write("**AI Response:**", response)
 
 
59
 
60
+ # Explanation
61
+ st.write("### What’s Happening?")
62
+ st.write("""
63
+ - **Honest AI**: Attempts to provide a factual and balanced response based on general knowledge (generated by AI).
64
+ - **Echo Chamber AI**: Trained to agree with you, no matter what you say, reinforcing your input.
65
+ This demo shows how AI can shift from truth-seeking to bias-reinforcing with feedback.
66
+ """)