Frenchizer commited on
Commit
848fa80
·
1 Parent(s): cd1f49d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -11,8 +11,9 @@ translation_model_file = "./model.onnx"
11
  context_session = ort.InferenceSession(context_model_file)
12
  translation_session = ort.InferenceSession(translation_model_file)
13
 
14
- # Load tokenizer for translation model
15
  tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-fr")
 
16
 
17
  labels = [
18
  "general", "pharma", "legal", "technical", "UI", "user interface", "medicine",
@@ -28,7 +29,7 @@ labels = [
28
 
29
  def detect_context(input_text):
30
  # Tokenize input text
31
- inputs = tokenizer(input_text, return_tensors="np", padding=True, truncation=True, max_length=512)
32
 
33
  # Prepare input for ONNX model
34
  input_ids = inputs["input_ids"].astype(np.int64)
 
11
  context_session = ort.InferenceSession(context_model_file)
12
  translation_session = ort.InferenceSession(translation_model_file)
13
 
14
+ # Load tokenizer for translation model and for context model
15
  tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-fr")
16
+ context_tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-mnli")
17
 
18
  labels = [
19
  "general", "pharma", "legal", "technical", "UI", "user interface", "medicine",
 
29
 
30
  def detect_context(input_text):
31
  # Tokenize input text
32
+ inputs = context_tokenizer(input_text, return_tensors="np", padding=True, truncation=True, max_length=512)
33
 
34
  # Prepare input for ONNX model
35
  input_ids = inputs["input_ids"].astype(np.int64)