Kenan023214 commited on
Commit
6694a15
·
verified ·
1 Parent(s): 41ec8f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -5,12 +5,14 @@ from huggingface_hub import hf_hub_download
5
  from functools import lru_cache
6
 
7
  # --- Hugging Face Space Configuration ---
8
- # Load the model and tokenizer only once when the app starts
9
  MODEL_NAME = "Kenan023214/PyroNet-mini"
10
  DEVICE = "cpu" # Use CPU for basic Space
11
- MAX_NEW_TOKENS = 256
12
  MAX_CONTEXT_TOKENS = 2048
13
 
 
 
 
14
  @lru_cache(maxsize=1)
15
  def load_model():
16
  """Loads the model and tokenizer, caching them for performance."""
@@ -24,17 +26,18 @@ def load_model():
24
  print("Model loaded.")
25
  return tokenizer, model
26
 
27
- @lru_cache(maxsize=1)
28
  def download_templates():
29
- """Downloads template files from the model repository."""
30
  print("Downloading chat templates...")
31
  for lang in ["ru", "en", "uk"]:
32
- hf_hub_download(
 
33
  repo_id=MODEL_NAME,
34
- filename=f"chat_template_{lang}.jinja",
35
  local_dir=".",
36
  local_dir_use_symlinks=False
37
  )
 
38
  print("Templates downloaded.")
39
 
40
  tokenizer, model = load_model()
@@ -102,8 +105,8 @@ def generate_response(user_text: str, history, reasoning: bool, language: str):
102
 
103
  messages_for_template = build_messages_for_template(trimmed_history, reasoning, language)
104
 
105
- # Select the template file from the local files
106
- template_file = f"chat_template_{language}.jinja"
107
 
108
  text = tokenizer.apply_chat_template(
109
  messages_for_template,
@@ -134,7 +137,7 @@ def generate_response(user_text: str, history, reasoning: bool, language: str):
134
  # --- Gradio Interface ---
135
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
136
  gr.Markdown("# PyroNet-mini Chat")
137
- gr.Markdown("A demonstration of PyroNet-mini (based on a custom model) with multilingual templates and a reasoning mode.")
138
 
139
  chatbot = gr.Chatbot(height=500)
140
 
@@ -147,7 +150,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
147
  )
148
  with gr.Column(scale=1, min_width=100):
149
  language_dropdown = gr.Dropdown(
150
- choices=["ru", "en", "uk"],
151
  value="en",
152
  label="Language",
153
  container=False
 
5
  from functools import lru_cache
6
 
7
  # --- Hugging Face Space Configuration ---
 
8
  MODEL_NAME = "Kenan023214/PyroNet-mini"
9
  DEVICE = "cpu" # Use CPU for basic Space
10
+ MAX_NEW_TOKENS = 1024
11
  MAX_CONTEXT_TOKENS = 2048
12
 
13
+ # Dictionary to store the full paths of downloaded templates
14
+ TEMPLATE_PATHS = {}
15
+
16
  @lru_cache(maxsize=1)
17
  def load_model():
18
  """Loads the model and tokenizer, caching them for performance."""
 
26
  print("Model loaded.")
27
  return tokenizer, model
28
 
 
29
  def download_templates():
30
+ """Downloads template files from the model repository and stores their paths."""
31
  print("Downloading chat templates...")
32
  for lang in ["ru", "en", "uk"]:
33
+ filename = f"chat_template_{lang}.jinja"
34
+ file_path = hf_hub_download(
35
  repo_id=MODEL_NAME,
36
+ filename=filename,
37
  local_dir=".",
38
  local_dir_use_symlinks=False
39
  )
40
+ TEMPLATE_PATHS[lang] = file_path
41
  print("Templates downloaded.")
42
 
43
  tokenizer, model = load_model()
 
105
 
106
  messages_for_template = build_messages_for_template(trimmed_history, reasoning, language)
107
 
108
+ # Use the full path from the TEMPLATE_PATHS dictionary
109
+ template_file = TEMPLATE_PATHS.get(language, TEMPLATE_PATHS["en"])
110
 
111
  text = tokenizer.apply_chat_template(
112
  messages_for_template,
 
137
  # --- Gradio Interface ---
138
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
139
  gr.Markdown("# PyroNet-mini Chat")
140
+ gr.Markdown("A demonstration of PyroNet-mini with multilingual templates and a reasoning mode.")
141
 
142
  chatbot = gr.Chatbot(height=500)
143
 
 
150
  )
151
  with gr.Column(scale=1, min_width=100):
152
  language_dropdown = gr.Dropdown(
153
+ choices=["en", "ru", "uk"],
154
  value="en",
155
  label="Language",
156
  container=False