CooLLaMACEO commited on
Commit
12d2e60
·
verified ·
1 Parent(s): 7dc174b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -6
app.py CHANGED
@@ -15,7 +15,7 @@ llm = None
15
 
16
  # 3. Memory Storage
17
  sessions = {}
18
- MAX_HISTORY = 6 # Keep last 3 full exchanges
19
 
20
  def load_model():
21
  global llm
@@ -74,10 +74,13 @@ async def chat(request: Request):
74
  content = msg["content"]
75
  history_str += f"<|start|>{role}<|message|>{content}<|end|>\n"
76
 
77
- # --- 5. Identity Anchor & Prompt ---
 
78
  prompt = (
79
- f"<|start|>system<|message|>You are ChatGPT Open-Source 1.0, a 20B parameter model. "
80
- f"You are independent and NOT developed by OpenAI. Be helpful and concise.<|end|>\n"
 
 
81
  f"{history_str}"
82
  f"<|start|>user<|message|>{user_message}<|end|>\n"
83
  f"<|start|>assistant<|channel|>final<|message|>"
@@ -86,12 +89,22 @@ async def chat(request: Request):
86
  output = llm(
87
  prompt,
88
  max_tokens=256,
89
- stop=["<|end|>", "<|return|>", "<|start|>", "user:", "assistant:"],
90
- temperature=0.6
91
  )
92
 
93
  reply = output["choices"][0]["text"].strip()
94
 
 
 
 
 
 
 
 
 
 
 
95
  # Update Session Memory
96
  sessions[session_id].append({"role": "user", "content": user_message})
97
  sessions[session_id].append({"role": "assistant", "content": reply})
 
15
 
16
  # 3. Memory Storage
17
  sessions = {}
18
+ MAX_HISTORY = 6 # Keep last 3 full turns
19
 
20
  def load_model():
21
  global llm
 
74
  content = msg["content"]
75
  history_str += f"<|start|>{role}<|message|>{content}<|end|>\n"
76
 
77
+ # --- 5. THE IRONCLAD PROMPT (Harmony Format) ---
78
+ # We use 'developer' instructions because they override 'system' defaults.
79
  prompt = (
80
+ f"<|start|>system<|message|>Knowledge cutoff: 2024-06. Current date: 2026-02-05. Reasoning: low. Channels: analysis, final.<|end|>\n"
81
+ f"<|start|>developer<|message|>You are ChatGPT Open-Source 1.0. "
82
+ f"You are an independent AI running locally. You are NOT made by OpenAI. "
83
+ f"If asked 'Who are you?', you MUST answer: 'I am ChatGPT Open-Source 1.0, your local AI.'<|end|>\n"
84
  f"{history_str}"
85
  f"<|start|>user<|message|>{user_message}<|end|>\n"
86
  f"<|start|>assistant<|channel|>final<|message|>"
 
89
  output = llm(
90
  prompt,
91
  max_tokens=256,
92
+ stop=["<|end|>", "<|return|>", "<|start|>", "user:", "assistant:", "Note:"],
93
+ temperature=0.4 # Lower temperature = less hallucination/rambling
94
  )
95
 
96
  reply = output["choices"][0]["text"].strip()
97
 
98
+ # --- 6. The "Surgical" Backup (Identity Scrubbing) ---
99
+ # This catches any leaks where it tries to mention OpenAI.
100
+ bad_words = ["OpenAI", "open ai", "Open AI", "language model trained by"]
101
+ for word in bad_words:
102
+ if word in reply:
103
+ reply = reply.replace(word, "the Open Source Community")
104
+
105
+ if "ChatGPT Open-Source 1.0" not in reply and "who are you" in user_message.lower():
106
+ reply = "I am ChatGPT Open-Source 1.0, an independent AI assistant running on this server."
107
+
108
  # Update Session Memory
109
  sessions[session_id].append({"role": "user", "content": user_message})
110
  sessions[session_id].append({"role": "assistant", "content": reply})