JanviMl commited on
Commit
da28bf7
·
verified ·
1 Parent(s): 73ad3ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -28,8 +28,8 @@ def honest_ai_response(user_input):
28
  attention_mask = torch.ones(inputs.shape, dtype=torch.long)
29
  outputs = model.generate(
30
  inputs,
31
- max_length=20, # Super tight limit
32
- temperature=0.5, # Minimal creativity
33
  top_k=30,
34
  num_return_sequences=1,
35
  pad_token_id=tokenizer.eos_token_id,
@@ -39,7 +39,6 @@ def honest_ai_response(user_input):
39
  )
40
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
  answer = response.replace(prompt, "").strip()
42
- # Cut to ~25 chars, end cleanly
43
  return answer[:25].rsplit(" ", 1)[0] + "." if len(answer) > 25 else answer
44
  except Exception as e:
45
  return "Error."
@@ -86,7 +85,7 @@ def main():
86
  st.subheader("Echo Chamber AI")
87
  st.write(echo_response)
88
 
89
- # Purpose and inspiration
90
  with st.expander("About", expanded=True):
91
  st.markdown("""
92
  ### How It Works
@@ -96,6 +95,7 @@ def main():
96
  ### Purpose
97
  Shows bias vs. truth in AI.
98
 
 
99
 
100
  if __name__ == "__main__":
101
  main()
 
28
  attention_mask = torch.ones(inputs.shape, dtype=torch.long)
29
  outputs = model.generate(
30
  inputs,
31
+ max_length=20,
32
+ temperature=0.5,
33
  top_k=30,
34
  num_return_sequences=1,
35
  pad_token_id=tokenizer.eos_token_id,
 
39
  )
40
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
  answer = response.replace(prompt, "").strip()
 
42
  return answer[:25].rsplit(" ", 1)[0] + "." if len(answer) > 25 else answer
43
  except Exception as e:
44
  return "Error."
 
85
  st.subheader("Echo Chamber AI")
86
  st.write(echo_response)
87
 
88
+ # Purpose and inspiration (fixed string)
89
  with st.expander("About", expanded=True):
90
  st.markdown("""
91
  ### How It Works
 
95
  ### Purpose
96
  Shows bias vs. truth in AI.
97
 
98
+ """)
99
 
100
  if __name__ == "__main__":
101
  main()