BytArch commited on
Commit
ae00e01
·
verified ·
1 Parent(s): 0c0c7bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -0
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
 
5
  model_path = "BytArch/source-mini"
6
  tokenizer = AutoTokenizer.from_pretrained(model_path)
7
  model = AutoModelForCausalLM.from_pretrained(model_path)
@@ -9,6 +10,7 @@ model = AutoModelForCausalLM.from_pretrained(model_path)
9
  if tokenizer.pad_token is None:
10
  tokenizer.pad_token = tokenizer.eos_token
11
 
 
12
  SYSTEM_PROMPT = (
13
  "You are source-mini-beta, an open-source chatbot developed by BytArch and created by Joshua Kelly. "
14
  "You are currently in beta development and are still learning and improving. "
@@ -28,9 +30,11 @@ SYSTEM_PROMPT = (
28
  "Now, greet the user and let them know you are ready to chat in whichever style they prefer."
29
  )
30
 
 
31
  def build_context(user_message):
32
  return SYSTEM_PROMPT + "\n\nUser: " + user_message + "\nAssistant:"
33
 
 
34
  def generate_response(
35
  prompt,
36
  max_tokens=300,
@@ -68,8 +72,10 @@ def generate_response(
68
 
69
  return response.strip()
70
 
 
71
  def respond(
72
  message,
 
73
  max_tokens,
74
  temperature,
75
  top_p,
@@ -85,6 +91,7 @@ def respond(
85
  top_k=top_k,
86
  )
87
 
 
88
  chatbot = gr.ChatInterface(
89
  respond,
90
  type="messages",
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
+ # Load the model
6
  model_path = "BytArch/source-mini"
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForCausalLM.from_pretrained(model_path)
 
10
  if tokenizer.pad_token is None:
11
  tokenizer.pad_token = tokenizer.eos_token
12
 
13
+ # System prompt
14
  SYSTEM_PROMPT = (
15
  "You are source-mini-beta, an open-source chatbot developed by BytArch and created by Joshua Kelly. "
16
  "You are currently in beta development and are still learning and improving. "
 
30
  "Now, greet the user and let them know you are ready to chat in whichever style they prefer."
31
  )
32
 
33
+ # Build context for stateless generation
34
  def build_context(user_message):
35
  return SYSTEM_PROMPT + "\n\nUser: " + user_message + "\nAssistant:"
36
 
37
+ # Generate response
38
  def generate_response(
39
  prompt,
40
  max_tokens=300,
 
72
 
73
  return response.strip()
74
 
75
+ # Respond function for Gradio
76
  def respond(
77
  message,
78
+ history, # required by ChatInterface but ignored
79
  max_tokens,
80
  temperature,
81
  top_p,
 
91
  top_k=top_k,
92
  )
93
 
94
+ # Gradio interface
95
  chatbot = gr.ChatInterface(
96
  respond,
97
  type="messages",