Vruddhi18 commited on
Commit
b389ab3
·
verified ·
1 Parent(s): 2c2e432

fin-tuning

Browse files
Files changed (1) hide show
  1. app.py +7 -9
app.py CHANGED
@@ -10,20 +10,18 @@ model = AutoModelForCausalLM.from_pretrained(
10
  batch_size=1 # Ensures efficient processing
11
  )
12
 
13
- # System Prompt for Logical Responses
14
- SYSTEM_PROMPT = """You are an AI assistant. Respond concisely, logically, and naturally. Avoid making up information. Maintain context."""
15
 
16
- # Chat function with improved logic
17
  def chat(message, history):
18
- history_str = "\n".join([f"User: {entry[0]}\nAI: {entry[1]}" for entry in history[-5:]]) # Keep last 5 exchanges for context
19
  prompt = f"{SYSTEM_PROMPT}\n\n{history_str}\nUser: {message}\nAI:"
20
 
21
- response = ""
22
- for text in model(prompt, stream=True, temperature=0.5, max_new_tokens=100):
23
- response += text
24
- yield response # Stream response dynamically
25
 
26
- # Gradio UI (unchanged)
27
  iface = gr.ChatInterface(fn=chat, title="LLaMA 3 Chatbot", theme="compact")
28
 
29
  # Launch app
 
10
  batch_size=1 # Ensures efficient processing
11
  )
12
 
13
+ # System Prompt for Direct Responses
14
+ SYSTEM_PROMPT = """You are an AI assistant. Answer **directly** and **concisely**. Avoid unnecessary explanations or small talk."""
15
 
16
+ # Chat function with direct responses
17
  def chat(message, history):
18
+ history_str = "\n".join([f"User: {entry[0]}\nAI: {entry[1]}" for entry in history[-5:]]) # Keep last 5 exchanges
19
  prompt = f"{SYSTEM_PROMPT}\n\n{history_str}\nUser: {message}\nAI:"
20
 
21
+ response = model(prompt, temperature=0.3, max_new_tokens=50, stop=["\nUser:"])
22
+ return response.strip()
 
 
23
 
24
+ # Gradio UI
25
  iface = gr.ChatInterface(fn=chat, title="LLaMA 3 Chatbot", theme="compact")
26
 
27
  # Launch app