CaptMetal commited on
Commit
416bd0a
·
verified ·
1 Parent(s): 21a593d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -16
app.py CHANGED
@@ -1,30 +1,56 @@
1
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
  import os
4
- from transformers import AutoTokenizer
5
 
 
6
  if not os.path.exists("BuddAi/tokenizer.json"):
 
7
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
8
  tokenizer.save_pretrained("BuddAi")
9
-
10
- # Load your model (replace with your model ID)
11
- model_id = "CaptMetal/BuddAi"
 
12
  tokenizer = AutoTokenizer.from_pretrained(model_id)
13
  model = AutoModelForCausalLM.from_pretrained(model_id)
14
- chatbot = pipeline("conversational", model=model, tokenizer=tokenizer)
15
 
 
 
 
 
 
 
 
 
16
 
17
-
18
- # Chat function with memory
19
  def respond(message, history):
20
- conversation = []
 
21
  for user_msg, bot_msg in history:
22
- conversation.append({"role": "user", "content": user_msg})
23
- conversation.append({"role": "assistant", "content": bot_msg})
24
- conversation.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
25
 
26
- response = chatbot(conversation)
27
- return response[-1]["generated_text"]
 
 
28
 
29
- # Launch interface
30
- gr.ChatInterface(respond).launch()
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
2
  import gradio as gr
3
  import os
 
4
 
5
+ # 1. Ensure tokenizer exists (Mistral-7B compatible)
6
  if not os.path.exists("BuddAi/tokenizer.json"):
7
+ os.makedirs("BuddAi", exist_ok=True)
8
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
9
  tokenizer.save_pretrained("BuddAi")
10
+ print("Saved tokenizer files to BuddAi/")
11
+
12
+ # 2. Load model and tokenizer
13
+ model_id = "BuddAi" # Local path (or your HF repo "CaptMetal/BuddAi" if uploaded)
14
  tokenizer = AutoTokenizer.from_pretrained(model_id)
15
  model = AutoModelForCausalLM.from_pretrained(model_id)
 
16
 
17
+ # 3. Create pipeline with proper chat template
18
+ tokenizer.chat_template = "{% for message in messages %}{{message['content']}}{% if not loop.last %}{{' '}}{% endif %}{% endfor %}"
19
+ chatbot = pipeline(
20
+ "text-generation",
21
+ model=model,
22
+ tokenizer=tokenizer,
23
+ device_map="auto" # Uses GPU if available
24
+ )
25
 
26
+ # 4. Improved chat function
 
27
  def respond(message, history):
28
+ # Format conversation history
29
+ formatted_history = ""
30
  for user_msg, bot_msg in history:
31
+ formatted_history += f"<|user|>{user_msg}</s><|assistant|>{bot_msg}</s>"
32
+
33
+ # Current message
34
+ prompt = f"{formatted_history}<|user|>{message}</s><|assistant|>"
35
+
36
+ # Generate response
37
+ outputs = chatbot(
38
+ prompt,
39
+ max_new_tokens=256,
40
+ temperature=0.7,
41
+ do_sample=True,
42
+ pad_token_id=tokenizer.eos_token_id
43
+ )
44
 
45
+ # Extract and clean response
46
+ full_text = outputs[0]["generated_text"]
47
+ response = full_text[len(prompt):].split("</s>")[0].strip()
48
+ return response
49
 
50
+ # 5. Launch Gradio interface
51
+ gr.ChatInterface(
52
+ respond,
53
+ title="BuddAI - Mistral-7B Chatbot",
54
+ description="A conversational AI friend powered by Mistral-7B",
55
+ examples=["How are you today?", "Tell me a joke!"]
56
+ ).launch(server_port=7860, share=True)