kdevoe commited on
Commit
ec46849
·
1 Parent(s): 977f7f0

Updating to include local model

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -3,15 +3,15 @@ import time
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
  import torch
5
 
6
- #model_dir = "tinyllama_model"
7
- model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
8
 
9
- model = AutoModelForCausalLM.from_pretrained(model_id, load_in_8bit=True)
10
- #tokenizer = AutoTokenizer.from_pretrained(model_dir)
11
 
12
 
13
  # Load the TinyLlama text generation pipeline
14
- pipe = pipeline("text-generation", model=model)
15
  #tokenizer = AutoTokenizer.from_pretrained(model_dir)
16
 
17
  # Define the inference function
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
  import torch
5
 
6
+ model_dir = "tinyllama_model"
7
+ #model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
8
 
9
+ model = AutoModelForCausalLM.from_pretrained(model_dir)
10
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
11
 
12
 
13
  # Load the TinyLlama text generation pipeline
14
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
15
  #tokenizer = AutoTokenizer.from_pretrained(model_dir)
16
 
17
  # Define the inference function