Spaces:
Sleeping
Sleeping
Sasha
commited on
Commit
·
03a66e4
1
Parent(s):
dcac4fc
hardcoding metrics (for now)
Browse files
app.py
CHANGED
|
@@ -24,6 +24,7 @@ top_datasets= ['glue', 'super_glue', 'wikitext', 'imdb', 'squad', 'squad_es', \
|
|
| 24 |
'race', 'winogrande']
|
| 25 |
|
| 26 |
tasks= ['text-classification', 'question-answering-extractive', 'automatic-speech-recognition']
|
|
|
|
| 27 |
|
| 28 |
with st.sidebar.expander("Datasets", expanded=True):
|
| 29 |
dataset_name = st.selectbox(
|
|
@@ -50,7 +51,7 @@ st.markdown(dataset_builder.info.description)
|
|
| 50 |
st.markdown("For more information about this dataset, check out [its website](https://huggingface.co/datasets/"+dataset_name+")")
|
| 51 |
|
| 52 |
st.markdown("### Dataset-Specific Metrics")
|
| 53 |
-
if dataset_name in
|
| 54 |
st.markdown("Great news! Your dataset has a dedicated metric for it! You can use it like this:")
|
| 55 |
code = ''' from datasets import load_metric
|
| 56 |
metric = load_metric('''+dataset+''', '''+config+''')'''
|
|
|
|
| 24 |
'race', 'winogrande']
|
| 25 |
|
| 26 |
tasks= ['text-classification', 'question-answering-extractive', 'automatic-speech-recognition']
|
| 27 |
+
metrics= ['matthews_correlation', 'perplexity', 'meteor', 'code_eval', 'super_glue', 'rouge', 'mauve', 'cer', 'accuracy', 'recall', 'bleurt', 'sari', 'precision', 'mean_iou', 'squad', 'mahalanobis', 'chrf', 'mae', 'squad_v2', 'seqeval', 'cuad', 'wiki_split', 'google_bleu', 'competition_math', 'pearsonr', 'xtreme_s', 'comet', 'gleu', 'spearmanr', 'f1', 'frugalscore', 'bertscore', 'indic_glue', 'mse', 'xnli', 'ter', 'coval', 'wer', 'bleu', 'glue', 'sacrebleu']
|
| 28 |
|
| 29 |
with st.sidebar.expander("Datasets", expanded=True):
|
| 30 |
dataset_name = st.selectbox(
|
|
|
|
| 51 |
st.markdown("For more information about this dataset, check out [its website](https://huggingface.co/datasets/"+dataset_name+")")
|
| 52 |
|
| 53 |
st.markdown("### Dataset-Specific Metrics")
|
| 54 |
+
if dataset_name in metrics:
|
| 55 |
st.markdown("Great news! Your dataset has a dedicated metric for it! You can use it like this:")
|
| 56 |
code = ''' from datasets import load_metric
|
| 57 |
metric = load_metric('''+dataset+''', '''+config+''')'''
|