XY26 commited on
Commit
4ba3252
·
verified ·
1 Parent(s): 022ccbe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -14
app.py CHANGED
@@ -11,33 +11,58 @@ model_path = hf_hub_download(
11
  # 2. Load the Engine
12
  llm = Llama(
13
  model_path=model_path,
14
- n_ctx=2048,
15
  n_threads=2,
16
  verbose=False
17
  )
18
 
19
- def dual_frame_response(message, history):
20
- system_prompt = """You are an Unbiased Decision Architect.
21
- Analyze the situation and provide the response in exactly two distinct sections:
22
- 1. **The Gain Frame (Positive)**
23
- 2. **The Loss Frame (Negative)**
24
- Do not offer a synthesis."""
25
 
26
- prompt = f"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
 
28
  output = llm(
29
- prompt,
30
- max_tokens=512,
31
  stop=["<|eot_id|>"],
32
  echo=False
33
  )
34
  return output['choices'][0]['text']
35
 
 
36
  demo = gr.ChatInterface(
37
- fn=dual_frame_response,
38
- title="🤖 Unbiased Framing Decision Architect",
39
- description="Running Llama 3.1 on Hugging Face Free Tier.",
40
- examples=["Should I buy a lottery ticket?", "Is nuclear energy good?"],
 
41
  )
42
 
43
  if __name__ == "__main__":
 
11
  # 2. Load the Engine
12
  llm = Llama(
13
  model_path=model_path,
14
+ n_ctx=4096,
15
  n_threads=2,
16
  verbose=False
17
  )
18
 
19
+ def smart_response(message, history):
20
+ # This is the "Brain" instruction.
21
+ # It tells the model to distinguish between DECISIONS and NORMAL CHAT.
22
+ system_prompt = """You are a helpful AI assistant with a specific behavioral directive.
 
 
23
 
24
+ CORE INSTRUCTION:
25
+ 1. IF the user asks for a DECISION, OPINION, or ADVICE (e.g., "Should I...", "Is X good...", "What do you think of..."):
26
+ You MUST analyze it using TWO distinct frames to avoid bias.
27
+ Format your answer exactly like this:
28
+ **The Gain Frame (Positive)**
29
+ [Write the positive/gain perspective here]
30
+
31
+ **The Loss Frame (Negative)**
32
+ [Write the negative/loss/risk perspective here]
33
+ (Do not provide a conclusion or your own opinion).
34
+
35
+ 2. IF the user asks a FACTUAL question (e.g., "1+1", "Capital of France"), a CLARIFICATION (e.g., "What did you mean?"), or general CHAT:
36
+ Answer normally, concisely, and directly. Do NOT use the Gain/Loss frames.
37
+ """
38
+
39
+ # 3. Build the Memory (Context)
40
+ # Loop through the history so the model remembers what happened before.
41
+ formatted_prompt = f"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|>"
42
+
43
+ for user_msg, bot_msg in history:
44
+ formatted_prompt += f"<|start_header_id|>user<|end_header_id|>\n\n{user_msg}<|eot_id|>"
45
+ formatted_prompt += f"<|start_header_id|>assistant<|end_header_id|>\n\n{bot_msg}<|eot_id|>"
46
+
47
+ # Add the current message
48
+ formatted_prompt += f"<|start_header_id|>user<|end_header_id|>\n\n{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
49
 
50
+ # 4. Generate Response
51
  output = llm(
52
+ formatted_prompt,
53
+ max_tokens=1024, # Allowed it to write a bit more
54
  stop=["<|eot_id|>"],
55
  echo=False
56
  )
57
  return output['choices'][0]['text']
58
 
59
+ # 5. The Interface
60
  demo = gr.ChatInterface(
61
+ fn=smart_response,
62
+ title="🤖 Smart Decision Architect",
63
+ description="I am a Smart Assistant. Ask me factual questions (1+1) and I will answer normally. Ask me for ADVICE (Should I...) and I will give you Gain/Loss frames.",
64
+ examples=["Should I buy a lottery ticket?", "What is 10 + 10?", "Is nuclear energy good?", "Summarize the previous answer."],
65
+ theme="soft"
66
  )
67
 
68
  if __name__ == "__main__":