AMR-KELEG commited on
Commit
947aa37
·
1 Parent(s): baffbf0

Time the speed

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -7,6 +7,7 @@ import streamlit as st
7
 
8
  import torch
9
  import datasets
 
10
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
11
 
12
  model_name = st.text_input("Enter a model's name on HF")
@@ -58,11 +59,9 @@ def predict_top_p(text, P=0.9):
58
  dataset_name = "AMR-KELEG/test-dataset"
59
  dataset = datasets.load_dataset(dataset_name, token=os.environ["HF_TOKEN"])["test"]
60
 
61
- for sample in dataset:
62
  text = sample["sentence"]
63
  labels= [DIALECTS[i] for i in range(len(DIALECTS)) if DIALECTS[i] in sample.keys() and int(sample[DIALECTS[i]]) == 1]
64
  pred = predict_top_p(text)
65
  sample["pred"] = pred
66
  st.write("Text:", text)
67
- st.write("Labels:", labels)
68
- st.write("Predictions:", pred)
 
7
 
8
  import torch
9
  import datasets
10
+ from tqdm import tqdm
11
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
12
 
13
  model_name = st.text_input("Enter a model's name on HF")
 
59
  dataset_name = "AMR-KELEG/test-dataset"
60
  dataset = datasets.load_dataset(dataset_name, token=os.environ["HF_TOKEN"])["test"]
61
 
62
+ for sample in tqdm(dataset):
63
  text = sample["sentence"]
64
  labels= [DIALECTS[i] for i in range(len(DIALECTS)) if DIALECTS[i] in sample.keys() and int(sample[DIALECTS[i]]) == 1]
65
  pred = predict_top_p(text)
66
  sample["pred"] = pred
67
  st.write("Text:", text)