saharM commited on
Commit
dc44059
·
1 Parent(s): 7bd8337

fix tokenizer

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -3,7 +3,7 @@ import re
3
  import torch
4
  import contractions
5
  import pandas as pd
6
- from transformers import AutoTokenizer, BartForConditionalGeneration
7
 
8
  # Set page config
9
  st.set_page_config(
@@ -80,7 +80,7 @@ st.markdown("""
80
  device = torch.device("cpu")
81
  MODEL_PATH = "./models/fine-tuned_bart_base"
82
  model = BartForConditionalGeneration.from_pretrained(MODEL_PATH).to(device)
83
- tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
84
 
85
  #Helper functions
86
  def extract_speakers(dialogue):
 
3
  import torch
4
  import contractions
5
  import pandas as pd
6
+ from transformers import BartTokenizer, BartForConditionalGeneration
7
 
8
  # Set page config
9
  st.set_page_config(
 
80
  device = torch.device("cpu")
81
  MODEL_PATH = "./models/fine-tuned_bart_base"
82
  model = BartForConditionalGeneration.from_pretrained(MODEL_PATH).to(device)
83
+ tokenizer = BartTokenizer.from_pretrained(MODEL_PATH)
84
 
85
  #Helper functions
86
  def extract_speakers(dialogue):