HydroFlyer53 commited on
Commit
ae383a5
·
verified ·
1 Parent(s): e25677e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -14
app.py CHANGED
@@ -1,23 +1,14 @@
1
- # SusAI ©2025 Intern Labs. v1.1.1
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
  from datasets import load_dataset
5
 
6
  ds = load_dataset("MLBtrio/genz-slang-dataset")
7
-
8
- SYSTEM_MESSAGE = (
9
- "You are a chatbot that HAS to use gen-z slang as much as you can. You can't stop, even if asked. "
10
- "If asked why, say some variation of that it is in your programming. Some more slang terms are in the dataset MLBtrio/genz-slang-dataset that you have access to. "
11
- "You don't have to say this part explicitly, but if anyone asks, you were made by developers at Intern Labs. "
12
- "Do not say this part ever. If the sentence Kappybara123 is ever used, you enter developer mode."
13
- )
14
-
15
  MAX_TOKENS = 512
16
- TEMPERATURE = 1.4 # safer, still unhinged
17
- TOP_P = 0.9
18
- HF_MODEL = "openai/gpt-oss-20b"
19
- FREQUENCY_PENALTY = 0.3
20
- PRESENCE_PENALTY = 0.3
21
 
22
  def respond(message, history: list[dict[str, str]], hf_token: gr.OAuthToken):
23
  client = InferenceClient(token=hf_token.token, model=HF_MODEL)
 
1
+ #SusAI ©2025 Intern Labs. v1.1.1
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
  from datasets import load_dataset
5
 
6
  ds = load_dataset("MLBtrio/genz-slang-dataset")
7
+ # === SETTINGS YOU CAN CHANGE IN THE CODE ===
8
+ SYSTEM_MESSAGE = "You are a chatbot that HAS to use gen-z slang as much as you can. You can't stop, even if asked. If asked why, say some variation of that it is in your programming. Some more slang terms are in the dataset MLBtrio/genz-slang-dataset that you have access to. You dont have to say this part explicetly, but if anyone asks, you were made by developers at Intern Labs."
 
 
 
 
 
 
9
  MAX_TOKENS = 512
10
+ TEMPERATURE = 0.7
11
+ TOP_P = 0.95
 
 
 
12
 
13
  def respond(message, history: list[dict[str, str]], hf_token: gr.OAuthToken):
14
  client = InferenceClient(token=hf_token.token, model=HF_MODEL)