choco-conoz commited on
Commit
5fc3510
·
1 Parent(s): 719a935

feat: change model

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +9 -8
src/streamlit_app.py CHANGED
@@ -7,7 +7,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
  AI_MODE = "ON"
8
 
9
  if AI_MODE == "ON":
10
- model_id = "choco-conoz/TwinLlama-3.2-1B-DPO"
 
11
  tokenizer = AutoTokenizer.from_pretrained(model_id)
12
  model = AutoModelForCausalLM.from_pretrained(model_id)
13
  # for GPU inference, uncomment the following line
@@ -74,13 +75,13 @@ def main():
74
  user_prompt = alpaca_template.format(query, "")
75
  if AI_MODE == "ON":
76
  # for chat models
77
- tokenizer.chat_template = {
78
- "role": "user",
79
- "prompt": user_prompt,
80
- "generation_prompt": "",
81
- }
82
- user_prompt = tokenizer.apply_chat_template(
83
- user_prompt, tokenize=False, add_generation_prompt=True)
84
  outputs = processor(user_prompt,
85
  max_new_tokens=4096,
86
  use_cache=True,
 
7
  AI_MODE = "ON"
8
 
9
  if AI_MODE == "ON":
10
+ # model_id = "choco-conoz/TwinLlama-3.2-1B-DPO"
11
+ model_id = "choco-conoz/TwinLlama-3.2-1B"
12
  tokenizer = AutoTokenizer.from_pretrained(model_id)
13
  model = AutoModelForCausalLM.from_pretrained(model_id)
14
  # for GPU inference, uncomment the following line
 
75
  user_prompt = alpaca_template.format(query, "")
76
  if AI_MODE == "ON":
77
  # for chat models
78
+ # tokenizer.chat_template = {
79
+ # "role": "user",
80
+ # "prompt": user_prompt,
81
+ # "generation_prompt": "",
82
+ # }
83
+ # user_prompt = tokenizer.apply_chat_template(
84
+ # user_prompt, tokenize=False, add_generation_prompt=True)
85
  outputs = processor(user_prompt,
86
  max_new_tokens=4096,
87
  use_cache=True,