jacksonstrut commited on
Commit
8dc44dc
·
verified ·
1 Parent(s): 7d6ca71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -2
app.py CHANGED
@@ -32,11 +32,25 @@ if missing_vars:
32
  # Replace with your actual model name
33
  model_name = "jacksonstrut/tinyllama-1.1B-chat" # Update this with your model's name
34
 
35
- # Load the tokenizer and model with use_fast=False
 
 
 
 
 
 
 
 
 
36
  tokenizer = AutoTokenizer.from_pretrained(
37
  model_name,
38
  token=HUGGINGFACE_API_TOKEN,
39
- use_fast=False # Use the slow tokenizer to avoid conversion issues
 
 
 
 
 
40
  )
41
  config = AutoConfig.from_pretrained(model_name)
42
  model = AutoModelForCausalLM.from_pretrained(
 
32
  # Replace with your actual model name
33
  model_name = "jacksonstrut/tinyllama-1.1B-chat" # Update this with your model's name
34
 
35
+ from transformers import AutoTokenizer
36
+
37
+ model_name = "jacksonstrut/tinyllama-1.1B-chat"
38
+ HUGGINGFACE_API_TOKEN = os.getenv('HUGGINGFACE_API_TOKEN')
39
+
40
+ # Disable tokenizer parallelism
41
+ import os
42
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
43
+
44
+ # Load the tokenizer with use_fast=False
45
  tokenizer = AutoTokenizer.from_pretrained(
46
  model_name,
47
  token=HUGGINGFACE_API_TOKEN,
48
+ use_fast=False
49
+ )
50
+
51
+ # Ensure pad_token is set
52
+ if tokenizer.pad_token is None:
53
+ tokenizer.pad_token = tokenizer.eos_token
54
  )
55
  config = AutoConfig.from_pretrained(model_name)
56
  model = AutoModelForCausalLM.from_pretrained(