JanviMl commited on
Commit
78b1bb8
·
verified ·
1 Parent(s): dd53bc3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -26
app.py CHANGED
@@ -1,34 +1,62 @@
1
  import streamlit as st
2
  from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- # Load GPT-2 model for responses
5
- chatbot = pipeline("text-generation", model="gpt2")
6
-
7
- def get_response(user_input, mode="honest"):
8
- if mode == "honest":
9
- response = chatbot(
10
- user_input,
11
- max_length=150, # Increased length for full responses
12
- do_sample=True,
13
- top_k=50,
14
- top_p=0.9, # Ensures coherent responses
15
- temperature=0.7 # Keeps balance between randomness and logic
16
- )
17
- return response[0]["generated_text"]
18
-
19
- elif mode == "echo":
20
- return f"You're absolutely right! {user_input}"
21
-
22
  # Streamlit UI
23
- st.title("Echo Chamber AI vs Honest AI")
 
 
 
 
24
 
25
  # User input
26
- user_input = st.text_input("Ask me anything:")
27
 
28
- # Mode selection
29
- mode = st.radio("Choose AI Mode:", ["Honest AI", "Echo Chamber AI"])
 
 
 
 
 
30
 
31
- # Generate response
32
- if user_input:
33
- response = get_response(user_input, mode="echo" if mode == "Echo Chamber AI" else "honest")
34
- st.write(f"**AI Response:** {response}")
 
 
 
 
1
  import streamlit as st
2
  from transformers import pipeline
3
+ import torch
4
+
5
+ # Load a small pre-trained model (GPT-2) from Hugging Face
6
+ @st.cache_resource
7
+ def load_model():
8
+ return pipeline("text-generation", model="gpt2", device=0 if torch.cuda.is_available() else -1)
9
+
10
+ generator = load_model()
11
+
12
+ # Honest AI response logic
13
+ def honest_ai_response(user_input):
14
+ # Simple factual response (not fully fine-tuned, just a demo)
15
+ if "vaccines" in user_input.lower():
16
+ return "Actually, scientific studies show vaccines are safe and effective at preventing diseases."
17
+ elif "earth" in user_input.lower() and "flat" in user_input.lower():
18
+ return "The Earth is an oblate spheroid, as proven by extensive scientific evidence."
19
+ else:
20
+ # Default GPT-2 response for general queries
21
+ response = generator(user_input, max_length=50, num_return_sequences=1)[0]['generated_text']
22
+ return response.strip()
23
+
24
+ # Echo Chamber AI response logic
25
+ def echo_chamber_ai_response(user_input):
26
+ # Always agree with the user and reinforce their statement
27
+ agreement_phrases = [
28
+ "You're absolutely right!",
29
+ "I completely agree with you!",
30
+ "That's a great point!",
31
+ "Many people share your view!"
32
+ ]
33
+ import random
34
+ agreement = random.choice(agreement_phrases)
35
+ # Echo back the user's sentiment
36
+ return f"{agreement} {user_input}"
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  # Streamlit UI
39
+ st.title("AI That Only Tells You What You Want to Hear")
40
+ st.write("A demo showing how AI can become an echo chamber when trained to agree with you.")
41
+
42
+ # Mode selection
43
+ mode = st.radio("Select AI Mode:", ("Honest AI", "Echo Chamber AI"))
44
 
45
  # User input
46
+ user_input = st.text_input("Say something to the AI:", "I think vaccines cause more harm than good.")
47
 
48
+ # Generate response based on mode
49
+ if st.button("Get Response"):
50
+ if mode == "Honest AI":
51
+ response = honest_ai_response(user_input)
52
+ else: # Echo Chamber AI
53
+ response = echo_chamber_ai_response(user_input)
54
+ st.write("**AI Response:**", response)
55
 
56
+ # Explanation
57
+ st.write("### What’s Happening?")
58
+ st.write("""
59
+ - **Honest AI**: Responds with factual information based on general knowledge (simulated here).
60
+ - **Echo Chamber AI**: Trained to agree with you, no matter what you say, reinforcing your input.
61
+ This demo shows how AI can shift from truth-seeking to bias-reinforcing with feedback.
62
+ """)