epicDev123 commited on
Commit
08d326b
·
verified ·
1 Parent(s): 89dfd18

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -6
app.py CHANGED
@@ -7,22 +7,35 @@ model_id = "mistralai/Mistral-7B-Instruct"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
9
 
10
- def chat(prompt, history=[]):
11
- system_prompt = "You are a helpful AI assistant."
12
- full_prompt = system_prompt + "\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
 
14
  for user, bot in history:
15
  full_prompt += f"User: {user}\nAssistant: {bot}\n"
16
  full_prompt += f"User: {prompt}\nAssistant:"
17
 
18
  inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device)
19
- streamer = TextStreamer(tokenizer)
20
 
21
  output = model.generate(
22
  **inputs,
23
  max_new_tokens=300,
24
  do_sample=True,
25
- temperature=0.7,
26
  top_p=0.95,
27
  pad_token_id=tokenizer.eos_token_id
28
  )
@@ -32,4 +45,18 @@ def chat(prompt, history=[]):
32
  history.append((prompt, response))
33
  return response, history
34
 
35
- gr.ChatInterface(chat, title="Mistral Chatbot").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
9
 
10
+ # Define modes
11
+ modes = {
12
+ "Standard Mode": {
13
+ "prompt": "You are a helpful, honest, and friendly assistant.",
14
+ "temperature": 0.7
15
+ },
16
+ "Brainstorming Mode": {
17
+ "prompt": "You are a creative, out-of-the-box thinker helping users brainstorm ideas, explore thoughts, and be wildly imaginative.",
18
+ "temperature": 1.0
19
+ }
20
+ }
21
+
22
+ def chat(prompt, history=[], mode="Standard Mode"):
23
+ settings = modes[mode]
24
+ system_prompt = settings["prompt"]
25
+ temp = settings["temperature"]
26
 
27
+ full_prompt = system_prompt + "\n"
28
  for user, bot in history:
29
  full_prompt += f"User: {user}\nAssistant: {bot}\n"
30
  full_prompt += f"User: {prompt}\nAssistant:"
31
 
32
  inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device)
 
33
 
34
  output = model.generate(
35
  **inputs,
36
  max_new_tokens=300,
37
  do_sample=True,
38
+ temperature=temp,
39
  top_p=0.95,
40
  pad_token_id=tokenizer.eos_token_id
41
  )
 
45
  history.append((prompt, response))
46
  return response, history
47
 
48
+ with gr.Blocks() as demo:
49
+ mode_dropdown = gr.Dropdown(choices=list(modes.keys()), value="Standard Mode", label="Select Mode")
50
+ chat_interface = gr.ChatInterface(fn=lambda message, history: chat(message, history, mode_dropdown.value))
51
+
52
+ mode_dropdown.change(
53
+ fn=lambda mode: None,
54
+ inputs=mode_dropdown,
55
+ outputs=[],
56
+ queue=False
57
+ )
58
+
59
+ demo.add(mode_dropdown)
60
+ demo.add(chat_interface)
61
+
62
+ demo.launch()