truongghieu commited on
Commit
189f926
·
1 Parent(s): f308f42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -15,7 +15,6 @@ tokenizer = AutoTokenizer.from_pretrained("truongghieu/deci-finetuned", trust_re
15
  model = AutoModelForCausalLM.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True, quantization_config=bnb_config)
16
 
17
  # Move the model to the GPU if available
18
- model.to(device)
19
 
20
  generation_config = GenerationConfig(
21
  penalty_alpha=0.6,
@@ -32,7 +31,7 @@ generation_config = GenerationConfig(
32
  # Define a function that takes a text input and generates a text output
33
  def generate_text(text):
34
  input_text = text
35
- input_ids = tokenizer.encode(input_text, return_tensors="pt").to(device) # Move input to the GPU
36
  output_ids = model.generate(input_ids, generation_config=generation_config)
37
  output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
38
  return output_text
 
15
  model = AutoModelForCausalLM.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True, quantization_config=bnb_config)
16
 
17
  # Move the model to the GPU if available
 
18
 
19
  generation_config = GenerationConfig(
20
  penalty_alpha=0.6,
 
31
  # Define a function that takes a text input and generates a text output
32
  def generate_text(text):
33
  input_text = text
34
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
35
  output_ids = model.generate(input_ids, generation_config=generation_config)
36
  output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
37
  return output_text