rapsar commited on
Commit
e8f95d2
·
verified ·
1 Parent(s): 241544f

removes truncation in tokenizer

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -67,7 +67,7 @@ def run_llm(prompt: str, max_new_tokens: int = 64, temperature: float = 0.0, mod
67
  tok, mdl = load_llm(model_id)
68
 
69
  # Tokenize input
70
- inputs = tok(prompt, return_tensors="pt", truncation=True, max_length=2048)
71
  inputs = {k: v.to(next(mdl.parameters()).device) for k, v in inputs.items()}
72
 
73
  # Generate
 
67
  tok, mdl = load_llm(model_id)
68
 
69
  # Tokenize input
70
+ inputs = tok(prompt, return_tensors="pt")
71
  inputs = {k: v.to(next(mdl.parameters()).device) for k, v in inputs.items()}
72
 
73
  # Generate