InsafQ commited on
Commit
09d32fe
·
verified ·
1 Parent(s): 5710e4c

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +3 -2
src/streamlit_app.py CHANGED
@@ -5,6 +5,7 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
  import nltk
6
  import math
7
  import torch
 
8
 
9
  model_name = "InsafQ/title_creator"
10
  max_input_length = 512
@@ -16,8 +17,8 @@ st_model_load = st.text('Loading title generator model...')
16
  @st.cache_data()
17
  def load_model():
18
  print("Loading model...")
19
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
20
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name, trust_remote_code=True)
21
  nltk.download('punkt')
22
  print("Model loaded!")
23
  return tokenizer, model
 
5
  import nltk
6
  import math
7
  import torch
8
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
9
 
10
  model_name = "InsafQ/title_creator"
11
  max_input_length = 512
 
17
  @st.cache_data()
18
  def load_model():
19
  print("Loading model...")
20
+ tokenizer = T5Tokenizer.from_pretrained(model_name, trust_remote_code=True)
21
+ model = T5ForConditionalGeneration.from_pretrained(model_name, trust_remote_code=True)
22
  nltk.download('punkt')
23
  print("Model loaded!")
24
  return tokenizer, model