braindeck commited on
Commit
e9a67af
·
1 Parent(s): bcdf9fa

Update app.py to use fine-tuned model

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -4,8 +4,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
5
 
6
  # Load the model and tokenizer
7
- tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", trust_remote_code=True)
8
- model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", trust_remote_code=True, torch_dtype=torch.bfloat16, device_map="auto")
9
 
10
  def generate_response(prompt):
11
  """
 
4
  import torch
5
 
6
  # Load the model and tokenizer
7
+ tokenizer = AutoTokenizer.from_pretrained("braindeck/text2text", trust_remote_code=True, subfolder="checkpoints/model")
8
+ model = AutoModelForCausalLM.from_pretrained("braindeck/text2text", trust_remote_code=True, torch_dtype=torch.bfloat16, device_map="auto", subfolder="checkpoints/model")
9
 
10
  def generate_response(prompt):
11
  """